@article {pmid36990988, year = {2023}, author = {Kusunose, M and Muto, K}, title = {Public attitudes toward cloud computing and willingness to share personal health records (PHRs) and genome data for health care research in Japan.}, journal = {Human genome variation}, volume = {10}, number = {1}, pages = {11}, pmid = {36990988}, issn = {2054-345X}, abstract = {Japan's government aims to promote the linkage of medical records, including medical genomic testing data and personal health records (PHRs), via cloud computing (the cloud). However, linking national medical records and using them for health care research can be controversial. Additionally, many ethical issues with using cloud networks with health care and genome data have been noted. However, no research has yet explored the Japanese public's opinions about their PHRs, including genome data, being shared for health care research or the use of the cloud for storing and analyzing such data. Therefore, we conducted a survey in March 2021 to clarify the public's attitudes toward sharing their PHRs, including genome data and using the cloud for health care research. We analyzed data to experimentally create digital health basic literacy scores (BLSs). Our results showed that the Japanese public had concerns about data sharing that overlapped with structural cloud computing issues. The effect of incentives on changes in participants' willingness to share data (WTSD) was limited. Instead, there could be a correlation between WTSD and BLSs. Finally, we argue that it is vital to consider not only researchers but also research participants as value cocreators in health care research conducted through the cloud to overcome both parties' vulnerability.}, } @article {pmid36977690, year = {2023}, author = {Rogers, DM and Agarwal, R and Vermaas, JV and Smith, MD and Rajeshwar, RT and Cooper, C and Sedova, A and Boehm, S and Baker, M and Glaser, J and Smith, JC}, title = {SARS-CoV2 billion-compound docking.}, journal = {Scientific data}, volume = {10}, number = {1}, pages = {173}, pmid = {36977690}, issn = {2052-4463}, abstract = {This dataset contains ligand conformations and docking scores for 1.4 billion molecules docked against 6 structural targets from SARS-CoV2, representing 5 unique proteins: MPro, NSP15, PLPro, RDRP, and the Spike protein. Docking was carried out using the AutoDock-GPU platform on the Summit supercomputer and Google Cloud. The docking procedure employed the Solis Wets search method to generate 20 independent ligand binding poses per compound. Each compound geometry was scored using the AutoDock free energy estimate, and rescored using RFScore v3 and DUD-E machine-learned rescoring models. Input protein structures are included, suitable for use by AutoDock-GPU and other docking programs. As the result of an exceptionally large docking campaign, this dataset represents a valuable resource for discovering trends across small molecule and protein binding sites, training AI models, and comparing to inhibitor compounds targeting SARS-CoV-2. The work also gives an example of how to organize and process data from ultra-large docking screens.}, } @article {pmid36970305, year = {2022}, author = {Christensen, JR and Golden, HE and Alexander, LC and Pickard, BR and Fritz, KM and Lane, CR and Weber, MH and Kwok, RM and Keefer, MN}, title = {Headwater streams and inland wetlands: Status and advancements of geospatial datasets and maps across the United States.}, journal = {Earth-science reviews}, volume = {235}, number = {}, pages = {1-24}, pmid = {36970305}, issn = {0012-8252}, support = {EPA999999/ImEPA/Intramural EPA/United States ; }, abstract = {Headwater streams and inland wetlands provide essential functions that support healthy watersheds and downstream waters. However, scientists and aquatic resource managers lack a comprehensive synthesis of national and state stream and wetland geospatial datasets and emerging technologies that can further improve these data. We conducted a review of existing United States (US) federal and state stream and wetland geospatial datasets, focusing on their spatial extent, permanence classifications, and current limitations. We also examined recent peer-reviewed literature for emerging methods that can potentially improve the estimation, representation, and integration of stream and wetland datasets. We found that federal and state datasets rely heavily on the US Geological Survey's National Hydrography Dataset for stream extent and duration information. Only eleven states (22%) had additional stream extent information and seven states (14%) provided additional duration information. Likewise, federal and state wetland datasets primarily use the US Fish and Wildlife Service's National Wetlands Inventory (NWI) Geospatial Dataset, with only two states using non-NWI datasets. Our synthesis revealed that LiDAR-based technologies hold promise for advancing stream and wetland mapping at limited spatial extents. While machine learning techniques may help to scale-up these LiDAR-derived estimates, challenges related to preprocessing and data workflows remain. High-resolution commercial imagery, supported by public imagery and cloud computing, may further aid characterization of the spatial and temporal dynamics of streams and wetlands, especially using multi-platform and multi-temporal machine learning approaches. Models integrating both stream and wetland dynamics are limited, and field-based efforts must remain a key component in developing improved headwater stream and wetland datasets. Continued financial and partnership support of existing databases is also needed to enhance mapping and inform water resources research and policy decisions.}, } @article {pmid36969371, year = {2023}, author = {Islam, MJ and Datta, R and Iqbal, A}, title = {Actual rating calculation of the zoom cloud meetings app using user reviews on google play store with sentiment annotation of BERT and hybridization of RNN and LSTM.}, journal = {Expert systems with applications}, volume = {223}, number = {}, pages = {119919}, pmid = {36969371}, issn = {0957-4174}, abstract = {The recent outbreaks of the COVID-19 forced people to work from home. All the educational institutes run their academic activities online. The online meeting app the "Zoom Cloud Meeting" provides the most entire supports for this purpose. For providing proper functionalities require in this situation of online supports the developers need the frequent release of new versions of the application. Which makes the chances to have lots of bugs during the release of new versions. To fix those bugs introduce developer needs users' feedback based on the new release of the application. But most of the time the ratings and reviews are created contraposition between them because of the users' inadvertent in giving ratings and reviews. And it has been the main problem to fix those bugs using user ratings for software developers. For this reason, we conduct this average rating calculation process based on the sentiment of user reviews to help software developers. We use BERT-based sentiment annotation to create unbiased datasets and hybridize RNN with LSTM to find calculated ratings based on the unbiased reviews dataset. Out of four models trained on four different datasets, we found promising performance in two datasets containing a necessarily large amount of unbiased reviews. The results show that the reviews have more positive sentiments than the actual ratings. Our results found an average of 3.60 stars rating, where the actual average rating found in dataset is 3.08 stars. We use reviews of more than 250 apps from the Google Play app store. The results of our can provide more promising if we can use a large dataset only containing the reviews of the Zoom Cloud Meeting app.}, } @article {pmid36967390, year = {2023}, author = {Camacho, C and Boratyn, GM and Joukov, V and Vera Alvarez, R and Madden, TL}, title = {ElasticBLAST: accelerating sequence search via cloud computing.}, journal = {BMC bioinformatics}, volume = {24}, number = {1}, pages = {117}, pmid = {36967390}, issn = {1471-2105}, abstract = {BACKGROUND: Biomedical researchers use alignments produced by BLAST (Basic Local Alignment Search Tool) to categorize their query sequences. Producing such alignments is an essential bioinformatics task that is well suited for the cloud. The cloud can perform many calculations quickly as well as store and access large volumes of data. Bioinformaticians can also use it to collaborate with other researchers, sharing their results, datasets and even their pipelines on a common platform.

RESULTS: We present ElasticBLAST, a cloud native application to perform BLAST alignments in the cloud. ElasticBLAST can handle anywhere from a few to many thousands of queries and run the searches on thousands of virtual CPUs (if desired), deleting resources when it is done. It uses cloud native tools for orchestration and can request discounted instances, lowering cloud costs for users. It is supported on Amazon Web Services and Google Cloud Platform. It can search BLAST databases that are user provided or from the National Center for Biotechnology Information.

CONCLUSION: We show that ElasticBLAST is a useful application that can efficiently perform BLAST searches for the user in the cloud, demonstrating that with two examples. At the same time, it hides much of the complexity of working in the cloud, lowering the threshold to move work to the cloud.}, } @article {pmid36961920, year = {2023}, author = {Goldnadel Monteiro, M and Pantani, D and Pinsky, I and Hernandes Rocha, TA}, title = {Using the Pan American Health Organization digital conversational agent to educate the public on alcohol use and health: a preliminary analysis.}, journal = {JMIR formative research}, volume = {}, number = {}, pages = {}, doi = {10.2196/43165}, pmid = {36961920}, issn = {2561-326X}, abstract = {BACKGROUND: Background: There is widespread misinformation about the effects of alcohol consumption on health, which were amplified during the COVID-19 pandemic through social media and internet channels. Chatbots and conversational agents became an important piece of the WHO response during the COVID-19 pandemic to quickly disseminate evidence-based information to the public, related to COVID-19 and tobacco. PAHO seized the opportunity to develop a conversational agent to talk about alcohol related topics and therefore complement traditional forms of health education which have been promoted in the past.

OBJECTIVE: Objective: To develop and deploy a digital conversational agent to interact to an unlimited number of users, 24 hours a day, anonymously, about alcohol topics, in several languages, including on ways to reduce risks from drinking, at no cost and accessible through various devices.

METHODS: Methods: The content development was based on the latest scientific evidence on alcohol impacts on health, social norms about drinking and data from the World Health Organization and PAHO. The agent itself was developed through a non-exclusive license agreement with a private company and included Google Digital Flow ES as the natural language processing software, and AWS for cloud services. Another company was contracted to program all the conversations, following the technical advice of PAHO staff.

RESULTS: Results: The conversational agent was named Pahola and it was deployed on November 19, 2021, through the PAHO website after a launch event with high publicity. No identifiable data were used and all interactions were anonymous, and therefore this was considered not research with human subjects. Pahola speaks in English, Spanish and Portuguese, interacts anonymously to a potential infinite number of users through various digital devices. Users were required to accept terms and conditions to enable access to their camera and microphone to interact with Pahola. Pahola attracted good attention from the media, reached 1.6 million people, leading to 236,000 clicks on its landing page, mostly through mobile devices. Only 1,532 users had a conversation after clicking to talk to Pahola. The average time users spent talking to Pahola was five minutes. Major dropouts were observed in different steps of the conversation flow. Some questions asked by users were not anticipated during programming and could not be answered.

CONCLUSIONS: Our findings showed several limitations to using a conversational agent for alcohol education to the general public. Improvements are needed to expand the content to make it more meaningful and engaging to the public. The potential of chatbots to educate the public on alcohol related topics seems enormous but requires a long-term investment of resources and research to be useful and reach many more people.}, } @article {pmid36958108, year = {2023}, author = {Menghani, RR and Das, A and Kraft, RH}, title = {A sensor-enabled cloud-based computing platform for computational brain biomechanics.}, journal = {Computer methods and programs in biomedicine}, volume = {233}, number = {}, pages = {107470}, doi = {10.1016/j.cmpb.2023.107470}, pmid = {36958108}, issn = {1872-7565}, abstract = {BACKGROUND AND OBJECTIVES: Driven by the risk of repetitive head trauma, sensors have been integrated into mouthguards to measure head impacts in contact sports and military activities. These wearable devices, referred to as "instrumented" or "smart" mouthguards are being actively developed by various research groups and organizations. These instrumented mouthguards provide an opportunity to further study and understand the brain biomechanics due to impact. In this study, we present a brain modeling service that can use information from these sensors to predict brain injury metrics in an automated fashion.

METHODS: We have built a brain modeling platform using several of Amazon's Web Services (AWS) to enable cloud computing and scalability. We use a custom-built cloud-based finite element modeling code to compute the physics-based nonlinear response of the intracranial brain tissue and provide a frontend web application and an application programming interface for groups working on head impact sensor technology to include simulated injury predictions into their research pipeline.

RESULTS: The platform results have been validated against experimental data available in literature for brain-skull relative displacements, brain strains and intracranial pressure. The parallel processing capability of the platform has also been tested and verified. We also studied the accuracy of the custom head surfaces generated by Avatar 3D.

CONCLUSION: We present a validated cloud-based computational brain modeling platform that uses sensor data as input for numerical brain models and outputs a quantitative description of brain tissue strains and injury metrics. The platform is expected to generate transparent, reproducible, and traceable brain computing results.}, } @article {pmid36950362, year = {2023}, author = {Gonzalez, EM and Zarei, A and Hendler, N and Simmons, T and Zarei, A and Demieville, J and Strand, R and Rozzi, B and Calleja, S and Ellingson, H and Cosi, M and Davey, S and Lavelle, DO and Truco, MJ and Swetnam, TL and Merchant, N and Michelmore, RW and Lyons, E and Pauli, D}, title = {PhytoOracle: Scalable, modular phenomics data processing pipelines.}, journal = {Frontiers in plant science}, volume = {14}, number = {}, pages = {1112973}, pmid = {36950362}, issn = {1664-462X}, abstract = {As phenomics data volume and dimensionality increase due to advancements in sensor technology, there is an urgent need to develop and implement scalable data processing pipelines. Current phenomics data processing pipelines lack modularity, extensibility, and processing distribution across sensor modalities and phenotyping platforms. To address these challenges, we developed PhytoOracle (PO), a suite of modular, scalable pipelines for processing large volumes of field phenomics RGB, thermal, PSII chlorophyll fluorescence 2D images, and 3D point clouds. PhytoOracle aims to (i) improve data processing efficiency; (ii) provide an extensible, reproducible computing framework; and (iii) enable data fusion of multi-modal phenomics data. PhytoOracle integrates open-source distributed computing frameworks for parallel processing on high-performance computing, cloud, and local computing environments. Each pipeline component is available as a standalone container, providing transferability, extensibility, and reproducibility. The PO pipeline extracts and associates individual plant traits across sensor modalities and collection time points, representing a unique multi-system approach to addressing the genotype-phenotype gap. To date, PO supports lettuce and sorghum phenotypic trait extraction, with a goal of widening the range of supported species in the future. At the maximum number of cores tested in this study (1,024 cores), PO processing times were: 235 minutes for 9,270 RGB images (140.7 GB), 235 minutes for 9,270 thermal images (5.4 GB), and 13 minutes for 39,678 PSII images (86.2 GB). These processing times represent end-to-end processing, from raw data to fully processed numerical phenotypic trait data. Repeatability values of 0.39-0.95 (bounding area), 0.81-0.95 (axis-aligned bounding volume), 0.79-0.94 (oriented bounding volume), 0.83-0.95 (plant height), and 0.81-0.95 (number of points) were observed in Field Scanalyzer data. We also show the ability of PO to process drone data with a repeatability of 0.55-0.95 (bounding area).}, } @article {pmid36949901, year = {2023}, author = {Cossío, F and Schurz, H and Engström, M and Barck-Holst, C and Tsirikoglou, A and Lundström, C and Gustafsson, H and Smith, K and Zackrisson, S and Strand, F}, title = {VAI-B: a multicenter platform for the external validation of artificial intelligence algorithms in breast imaging.}, journal = {Journal of medical imaging (Bellingham, Wash.)}, volume = {10}, number = {6}, pages = {061404}, pmid = {36949901}, issn = {2329-4302}, abstract = {PURPOSE: Multiple vendors are currently offering artificial intelligence (AI) computer-aided systems for triage detection, diagnosis, and risk prediction of breast cancer based on screening mammography. There is an imminent need to establish validation platforms that enable fair and transparent testing of these systems against external data.

APPROACH: We developed validation of artificial intelligence for breast imaging (VAI-B), a platform for independent validation of AI algorithms in breast imaging. The platform is a hybrid solution, with one part implemented in the cloud and another in an on-premises environment at Karolinska Institute. Cloud services provide the flexibility of scaling the computing power during inference time, while secure on-premises clinical data storage preserves their privacy. A MongoDB database and a python package were developed to store and manage the data on-premises. VAI-B requires four data components: radiological images, AI inferences, radiologist assessments, and cancer outcomes.

RESULTS: To pilot test VAI-B, we defined a case-control population based on 8080 patients diagnosed with breast cancer and 36,339 healthy women based on the Swedish national quality registry for breast cancer. Images and radiological assessments from more than 100,000 mammography examinations were extracted from hospitals in three regions of Sweden. The images were processed by AI systems from three vendors in a virtual private cloud to produce abnormality scores related to signs of cancer in the images. A total of 105,706 examinations have been processed and stored in the database.

CONCLUSIONS: We have created a platform that will allow downstream evaluation of AI systems for breast cancer detection, which enables faster development cycles for participating vendors and safer AI adoption for participating hospitals. The platform was designed to be scalable and ready to be expanded should a new vendor want to evaluate their system or should a new hospital wish to obtain an evaluation of different AI systems on their images.}, } @article {pmid36947346, year = {2023}, author = {Abler, D and Schaer, R and Oreiller, V and Verma, H and Reichenbach, J and Aidonopoulos, O and Evéquoz, F and Jreige, M and Prior, JO and Depeursinge, A}, title = {QuantImage v2: a comprehensive and integrated physician-centered cloud platform for radiomics and machine learning research.}, journal = {European radiology experimental}, volume = {7}, number = {1}, pages = {16}, pmid = {36947346}, issn = {2509-9280}, abstract = {BACKGROUND: Radiomics, the field of image-based computational medical biomarker research, has experienced rapid growth over the past decade due to its potential to revolutionize the development of personalized decision support models. However, despite its research momentum and important advances toward methodological standardization, the translation of radiomics prediction models into clinical practice only progresses slowly. The lack of physicians leading the development of radiomics models and insufficient integration of radiomics tools in the clinical workflow contributes to this slow uptake.

METHODS: We propose a physician-centered vision of radiomics research and derive minimal functional requirements for radiomics research software to support this vision. Free-to-access radiomics tools and frameworks were reviewed to identify best practices and reveal the shortcomings of existing software solutions to optimally support physician-driven radiomics research in a clinical environment.

RESULTS: Support for user-friendly development and evaluation of radiomics prediction models via machine learning was found to be missing in most tools. QuantImage v2 (QI2) was designed and implemented to address these shortcomings. QI2 relies on well-established existing tools and open-source libraries to realize and concretely demonstrate the potential of a one-stop tool for physician-driven radiomics research. It provides web-based access to cohort management, feature extraction, and visualization and supports "no-code" development and evaluation of machine learning models against patient-specific outcome data.

CONCLUSIONS: QI2 fills a gap in the radiomics software landscape by enabling "no-code" radiomics research, including model validation, in a clinical environment. Further information about QI2, a public instance of the system, and its source code is available at https://medgift.github.io/quantimage-v2-info/ . Key points As domain experts, physicians play a key role in the development of radiomics models. Existing software solutions do not support physician-driven research optimally. QuantImage v2 implements a physician-centered vision for radiomics research. QuantImage v2 is a web-based, "no-code" radiomics research platform.}, } @article {pmid36944981, year = {2023}, author = {Varesio, C and De Giorgis, V and Veggiotti, P and Nardocci, N and Granata, T and Ragona, F and Pasca, L and Mensi, MM and Borgatti, R and Olivotto, S and Previtali, R and Riva, A and Mancardi, MM and Striano, P and Cavallin, M and Guerrini, R and Operto, FF and Pizzolato, A and Di Maulo, R and Martino, F and Lodi, A and Marini, C}, title = {GLUT1-DS Italian registry: past, present, and future: a useful tool for rare disorders.}, journal = {Orphanet journal of rare diseases}, volume = {18}, number = {1}, pages = {63}, pmid = {36944981}, issn = {1750-1172}, abstract = {BACKGROUND: GLUT1 deficiency syndrome is a rare, genetically determined neurological disorder for which Ketogenic Dietary Treatment represents the gold standard and lifelong treatment. Patient registries are powerful tools providing insights and real-world data on rare diseases.

OBJECTIVE: To describe the implementation of a national web-based registry for GLUT1-DS.

METHODS: This is a retrospective and prospective, multicenter, observational registry developed in collaboration with the Italian GLUT1-DS association and based on an innovative, flexible and configurable cloud computing technology platform, structured according to the most rigorous requirements for the management of patient's sensitive data. The Glut1 Registry collects baseline and follow-up data on the patient's demographics, history, symptoms, genotype, clinical, and instrumental evaluations and therapies.

RESULTS: Five Centers in Italy joined the registry, and two more Centers are currently joining. In the first two years of running, data from 67 patients (40 females and 27 males) have been collected. Age at symptom onset was within the first year of life in most (40, 60%) patients. The diagnosis was formulated in infancy in almost half of the cases (34, 51%). Symptoms at onset were mainly paroxysmal (mostly epileptic seizure and paroxysmal ocular movement disorder) or mixed paroxysmal and fixed symptoms (mostly psychomotor delay). Most patients (53, 79%) are currently under Ketogenic dietary treatments.

CONCLUSIONS: We describe the principles behind the design, development, and deployment of the web-based nationwide GLUT1-DS registry. It represents a stepping stone towards a more comprehensive understanding of the disease from onset to adulthood. It also represents a virtuous model from a technical, legal, and organizational point of view, thus representing a possible paradigmatic example for other rare disease registry implementation.}, } @article {pmid36939746, year = {2021}, author = {Zhang, G and Zhang, Y and Jin, J}, title = {The Ultrafast and Accurate Mapping Algorithm FANSe3: Mapping a Human Whole-Genome Sequencing Dataset Within 30 Minutes.}, journal = {Phenomics (Cham, Switzerland)}, volume = {1}, number = {1}, pages = {22-30}, pmid = {36939746}, issn = {2730-5848}, abstract = {Aligning billions of reads generated by the next-generation sequencing (NGS) to reference sequences, termed "mapping", is the time-consuming and computationally-intensive process in most NGS applications. A Fast, accurate and robust mapping algorithm is highly needed. Therefore, we developed the FANSe3 mapping algorithm, which can map a 30 × human whole-genome sequencing (WGS) dataset within 30 min, a 50 × human whole exome sequencing (WES) dataset within 30 s, and a typical mRNA-seq dataset within seconds in a single-server node without the need for any hardware acceleration feature. Like its predecessor FANSe2, the error rate of FANSe3 can be kept as low as 10[-9] in most cases, this is more robust than the Burrows-Wheeler transform-based algorithms. Error allowance hardly affected the identification of a driver somatic mutation in clinically relevant WGS data and provided robust gene expression profiles regardless of the parameter settings and sequencer used. The novel algorithm, designed for high-performance cloud-computing after infrastructures, will break the bottleneck of speed and accuracy in NGS data analysis and promote NGS applications in various fields. The FANSe3 algorithm can be downloaded from the website: http://www.chi-biotech.com/fanse3/.}, } @article {pmid36937654, year = {2023}, author = {Selvarajan, S and Srivastava, G and Khadidos, AO and Khadidos, AO and Baza, M and Alshehri, A and Lin, JC}, title = {An artificial intelligence lightweight blockchain security model for security and privacy in IIoT systems.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {38}, pmid = {36937654}, issn = {2192-113X}, abstract = {The Industrial Internet of Things (IIoT) promises to deliver innovative business models across multiple domains by providing ubiquitous connectivity, intelligent data, predictive analytics, and decision-making systems for improved market performance. However, traditional IIoT architectures are highly susceptible to many security vulnerabilities and network intrusions, which bring challenges such as lack of privacy, integrity, trust, and centralization. This research aims to implement an Artificial Intelligence-based Lightweight Blockchain Security Model (AILBSM) to ensure privacy and security of IIoT systems. This novel model is meant to address issues that can occur with security and privacy when dealing with Cloud-based IIoT systems that handle data in the Cloud or on the Edge of Networks (on-device). The novel contribution of this paper is that it combines the advantages of both lightweight blockchain and Convivial Optimized Sprinter Neural Network (COSNN) based AI mechanisms with simplified and improved security operations. Here, the significant impact of attacks is reduced by transforming features into encoded data using an Authentic Intrinsic Analysis (AIA) model. Extensive experiments are conducted to validate this system using various attack datasets. In addition, the results of privacy protection and AI mechanisms are evaluated separately and compared using various indicators. By using the proposed AILBSM framework, the execution time is minimized to 0.6 seconds, the overall classification accuracy is improved to 99.8%, and detection performance is increased to 99.7%. Due to the inclusion of auto-encoder based transformation and blockchain authentication, the anomaly detection performance of the proposed model is highly improved, when compared to other techniques.}, } @article {pmid36937168, year = {2023}, author = {Sadasivan, H and Maric, M and Dawson, E and Iyer, V and Israeli, J and Narayanasamy, S}, title = {Accelerating Minimap2 for Accurate Long Read Alignment on GPUs.}, journal = {Journal of biotechnology and biomedicine}, volume = {6}, number = {1}, pages = {13-23}, pmid = {36937168}, issn = {2642-9128}, abstract = {Long read sequencing technology is becoming increasingly popular for Precision Medicine applications like Whole Genome Sequencing (WGS) and microbial abundance estimation. Minimap2 is the state-of-the-art aligner and mapper used by the leading long read sequencing technologies, today. However, Minimap2 on CPUs is very slow for long noisy reads. ~60-70% of the run-time on a CPU comes from the highly sequential chaining step in Minimap2. On the other hand, most Point-of-Care computational workflows in long read sequencing use Graphics Processing Units (GPUs). We present minimap2-accelerated (mm2-ax), a heterogeneous design for sequence mapping and alignment where minimap2's compute intensive chaining step is sped up on the GPU and demonstrate its time and cost benefits. We extract better intra-read parallelism from chaining without losing mapping accuracy by forward transforming Minimap2's chaining algorithm. Moreover, we better utilize the high memory available on modern cloud instances apart from better workload balancing, data locality and minimal branch divergence on the GPU. We show mm2-ax on an NVIDIA A100 GPU improves the chaining step with 5.41 - 2.57X speedup and 4.07 - 1.93X speedup : costup over the fastest version of Minimap2, mm2-fast, benchmarked on a Google Cloud Platform instance of 30 SIMD cores.}, } @article {pmid36936667, year = {2023}, author = {Namoun, A and Tufail, A and Nawas, W and BenRhouma, O and Alshanqiti, A}, title = {A Systematic Literature Review on Service Composition for People with Disabilities: Taxonomies, Solutions, and Open Research Challenges.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {5934548}, pmid = {36936667}, issn = {1687-5273}, abstract = {Integrating smart heterogeneous objects, IoT devices, data sources, and software services to produce new business processes and functionalities continues to attract considerable attention from the research community due to its unraveled advantages, including reusability, adaptation, distribution, and pervasiveness. However, the exploitation of service-oriented computing technologies (e.g., SOC, SOA, and microservice architectures) by people with special needs is underexplored and often overlooked. Furthermore, the existing challenges in this area are yet to be identified clearly. This research study presents a rigorous literature survey of the recent advances in service-oriented composition approaches and solutions for disabled people, their domains of application, and the major challenges, covering studies published between January 2010 and October 2022. To this end, we applied the systematic literature review (SLR) methodology to retrieve and collate only the articles presenting and discussing service composition solutions tailored to produce digitally accessible services for consumption by people who suffer from an impairment or loss of some physical or mental functions. We searched six renowned bibliographic databases, particularly IEEE Xplore, Web of Science, Springer Link, ACM Library, ScienceDirect, and Google Scholar, to synthesize a final pool of 38 related articles. Our survey contributes a comprehensive taxonomy of service composition solutions, techniques, and practices that are utilized to create assistive technologies and services. The seven-facet taxonomy helps researchers and practitioners to quickly understand and analyze the fundamental conceptualizations and characteristics of accessible service composition for people with disabilities. Key findings showed that services are fused to assist disabled persons to carry out their daily activities, mainly in smart homes and ambient intelligent environments. Despite the emergence of immersive technologies (e.g., wearable computing), user-service interactions are enabled primarily through tactile and speech modalities. Service descriptions mainly incorporate functional features (e.g., performance, latency, and cost) of service quality, largely ignoring accessibility features. Moreover, the outstanding research problems revolve around (1) the unavailability of assistive services datasets, (2) the underspecification of accessibility aspects of disabilities, (3) the weak adoption of accessible and universal design practices, (4) the abstraction of service composition approaches, and (5) the rare experimental testing of composition approaches with disabled users. We conclude our survey with a set of guidelines to realize effective assistive service composition in IoT and cloud environments. Researchers and practitioners are advised to create assistive services that support the social relationships of disabled users and model their accessibility needs as part of the quality of service (QoS). Moreover, they should exploit AI/ML models to address the evolving requirements of disabled users in their unique environments. Furthermore, weaknesses of service composition solutions and research challenges are exposed as notable opportunities for future research.}, } @article {pmid36923109, year = {2023}, author = {Wu, H}, title = {Sharing and Cooperation of Improved Cross-Entropy Optimization Algorithm in Telemedicine Multimedia Information Processing.}, journal = {International journal of telemedicine and applications}, volume = {2023}, number = {}, pages = {7353489}, pmid = {36923109}, issn = {1687-6415}, abstract = {In order to improve the efficiency of medical multimedia information sharing, this paper combines cloud computing technology and SOA (service-oriented architecture) technology to build a medical multimedia information sharing system. Building a medical information sharing platform requires integrating information resources stored in information systems of medical institutions and nonmedical information systems related to medical information and forming a huge resource pool. It is important to mine and analyze the information resources in the resource pool to realize the sharing and interaction of medical information. To this end, this paper proposes a gain-adaptive control algorithm with online adjustable parameters and investigates the extension of the mutual entropy optimization algorithm in the control domain and its integrated processing capability in the process of medical multimedia information processing. In addition, this paper constructs a medical multimedia information sharing and collaboration platform with medical multimedia information sharing and telemedicine as the core and verifies the effectiveness of the platform through experiments. The simulation results and comparison results with other systems prove that the system in this paper can realize fast data processing, retrieve and analyze massive data, and meet the demand of remote intelligent diagnosis under the premise of safety and stability. Meanwhile, the system in this paper can help hospitals achieve fast and accurate diagnosis, which has strong theoretical and practical values.}, } @article {pmid36937228, year = {2017}, author = {Navas-Molina, JA and Hyde, ER and Sanders, J and Knight, R}, title = {The Microbiome and Big Data.}, journal = {Current opinion in systems biology}, volume = {4}, number = {}, pages = {92-96}, pmid = {36937228}, issn = {2452-3100}, abstract = {Microbiome datasets have expanded rapidly in recent years. Advances in DNA sequencing, as well as the rise of shotgun metagenomics and metabolomics, are producing datasets that exceed the ability of researchers to analyze them on their personal computers. Here we describe what Big Data is in the context of microbiome research, how this data can be transformed into knowledge about microbes and their functions in their environments, and how the knowledge can be applied to move microbiome research forward. In particular, the development of new high-resolution tools to assess strain-level variability (moving away from OTUs), the advent of cloud computing and centralized analysis resources such as Qiita (for sequences) and GNPS (for mass spectrometry), and better methods for curating and describing "metadata" (contextual information about the sequence or chemical information) are rapidly assisting the use of microbiome data in fields ranging from human health to environmental studies.}, } @article {pmid36914133, year = {2023}, author = {Aman, MA and Chu, HJ}, title = {Long-term river extent dynamics and transition detection using remote sensing: Case studies of Mekong and Ganga River.}, journal = {The Science of the total environment}, volume = {}, number = {}, pages = {162774}, doi = {10.1016/j.scitotenv.2023.162774}, pmid = {36914133}, issn = {1879-1026}, abstract = {Currently, understanding river dynamics is limited to either bankline or reach-wise scale studies. Monitoring large-scale and long-term river extent dynamics provides fundamental insights relevant to the impact of climatic factors and anthropogenic activities on fluvial geomorphology. This study analyzed the two most populous rivers, Ganga and Mekong, to understand the river extent dynamics using 32 years of Landsat satellite data (1990-2022) in a cloud computing platform. This study categorizes river dynamics and transitions using the combination of pixel-wise water frequency and temporal trends. This approach can demarcate the river channel stability, areas affected by erosion and sedimentation, and the seasonal transitions in the river. The results illustrate that the Ganga river channel is found to be relatively unstable and very prone to meandering and migration as almost 40 % of the river channel has been altered in the past 32 years. The seasonal transitions, such as lost seasonal and seasonal to permanent changes are more prominent in the Ganga river, and the dominance of meandering and sedimentation in the lower course is also illustrated. In contrast, the Mekong river has a more stable course with erosion and sedimentation observed at sparse locations in the lower course. However, the lost seasonal and seasonal to permanent changes are also dominant in the Mekong river. Since 1990, Ganga and Mekong rivers have lost approximately 13.3 % and 4.7 % of their seasonal water respectively, as compared to the other transitions and categories. Factors such as climate change, floods, and man-made reservoirs could all be critical in triggering these morphological changes.}, } @article {pmid36913423, year = {2023}, author = {Paulraj, D and Sethukarasi, T and Neelakandan, S and Prakash, M and Baburaj, E}, title = {An Efficient Hybrid Job Scheduling Optimization (EHJSO) approach to enhance resource search using Cuckoo and Grey Wolf Job Optimization for cloud environment.}, journal = {PloS one}, volume = {18}, number = {3}, pages = {e0282600}, doi = {10.1371/journal.pone.0282600}, pmid = {36913423}, issn = {1932-6203}, abstract = {Cloud computing has now evolved as an unavoidable technology in the fields of finance, education, internet business, and nearly all organisations. The cloud resources are practically accessible to cloud users over the internet to accomplish the desired task of the cloud users. The effectiveness and efficacy of cloud computing services depend on the tasks that the cloud users submit and the time taken to complete the task as well. By optimising resource allocation and utilisation, task scheduling is crucial to enhancing the effectiveness and performance of a cloud system. In this context, cloud computing offers a wide range of advantages, such as cost savings, security, flexibility, mobility, quality control, disaster recovery, automatic software upgrades, and sustainability. According to a recent research survey, more and more tech-savvy companies and industry executives are recognize and utilize the advantages of the Cloud computing. Hence, as the number of users of the Cloud increases, so did the need to regulate the resource allocation as well. However, the scheduling of jobs in the cloud necessitates a smart and fast algorithm that can discover the resources that are accessible and schedule the jobs that are requested by different users. Consequently, for better resource allocation and job scheduling, a fast, efficient, tolerable job scheduling algorithm is required. Efficient Hybrid Job Scheduling Optimization (EHJSO) utilises Cuckoo Search Optimization and Grey Wolf Job Optimization (GWO). Due to some cuckoo species' obligate brood parasitism (laying eggs in other species' nests), the Cuckoo search optimization approach was developed. Grey wolf optimization (GWO) is a population-oriented AI system inspired by grey wolf social structure and hunting strategies. Make span, computation time, fitness, iteration-based performance, and success rate were utilised to compare previous studies. Experiments show that the recommended method is superior.}, } @article {pmid36910722, year = {2023}, author = {Yang, M and Ge, C and Zhao, X and Kou, H}, title = {FSPLO: a fast sensor placement location optimization method for cloud-aided inspection of smart buildings.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {31}, pmid = {36910722}, issn = {2192-113X}, abstract = {With the awakening of health awareness, people are raising a series of health-related requirements for the buildings they live in, with a view to improving their living conditions. In this context, BIM (Building Information Modeling) makes full use of cutting-edge theories and technologies in many domains such as health, environment, and information technology to provide a new way for engineers to design and build various healthy and green buildings. Specifically, sensors are playing an important role in achieving smart building goals by monitoring the surroundings of buildings, objects and people with the help of cloud computing technology. In addition, it is necessary to quickly determine the optimal sensor placement to save energy and minimize the number of sensors for a building, which is a de-trial task for the cloud platform due to the limited number of sensors available and massive candidate locations for each sensor. In this paper, we propose a Fast Sensor Placement Location Optimization approach (FSPLO) to solve the BIM problem in cloud-aided smart buildings. In particular, we quickly filter out the repeated candidate locations of sensors in FSPLO using Locality Sensitive Hashing (LSH) techniques to maintain only a small number of optimized locations for deploying sensors around buildings. In this way, we can significantly reduce the number of sensors used for health and green buildings. Finally, a set of simulation experiments demonstrates the excellent performance of our proposed FSPLO method.}, } @article {pmid36904959, year = {2023}, author = {Salat, L and Davis, M and Khan, N}, title = {DNS Tunnelling, Exfiltration and Detection over Cloud Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, pmid = {36904959}, issn = {1424-8220}, abstract = {The domain name system (DNS) protocol is fundamental to the operation of the internet, however, in recent years various methodologies have been developed that enable DNS attacks on organisations. In the last few years, the increased use of cloud services by organisations has created further security challenges as cyber criminals use numerous methodologies to exploit cloud services, configurations and the DNS protocol. In this paper, two different DNS tunnelling methods, Iodine and DNScat, have been conducted in the cloud environment (Google and AWS) and positive results of exfiltration have been achieved under different firewall configurations. Detection of malicious use of DNS protocol can be a challenge for organisations with limited cybersecurity support and expertise. In this study, various DNS tunnelling detection techniques were utilised in a cloud environment to create an effective monitoring system with a reliable detection rate, low implementation cost, and ease of use for organisations with limited detection capabilities. The Elastic stack (an open-source framework) was used to configure a DNS monitoring system and to analyse the collected DNS logs. Furthermore, payload and traffic analysis techniques were implemented to identify different tunnelling methods. This cloud-based monitoring system offers various detection techniques that can be used for monitoring DNS activities of any network especially accessible to small organisations. Moreover, the Elastic stack is open-source and it has no limitation with regards to the data that can be uploaded daily.}, } @article {pmid36904927, year = {2023}, author = {Saban, M and Bekkour, M and Amdaouch, I and El Gueri, J and Ait Ahmed, B and Chaari, MZ and Ruiz-Alzola, J and Rosado-Muñoz, A and Aghzout, O}, title = {A Smart Agricultural System Based on PLC and a Cloud Computing Web Application Using LoRa and LoRaWan.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, doi = {10.3390/s23052725}, pmid = {36904927}, issn = {1424-8220}, abstract = {The increasing challenges of agricultural processes and the growing demand for food globally are driving the industrial agriculture sector to adopt the concept of 'smart farming'. Smart farming systems, with their real-time management and high level of automation, can greatly improve productivity, food safety, and efficiency in the agri-food supply chain. This paper presents a customized smart farming system that uses a low-cost, low-power, and wide-range wireless sensor network based on Internet of Things (IoT) and Long Range (LoRa) technologies. In this system, LoRa connectivity is integrated with existing Programmable Logic Controllers (PLCs), which are commonly used in industry and farming to control multiple processes, devices, and machinery through the Simatic IOT2040. The system also includes a newly developed web-based monitoring application hosted on a cloud server, which processes data collected from the farm environment and allows for remote visualization and control of all connected devices. A Telegram bot is included for automated communication with users through this mobile messaging app. The proposed network structure has been tested, and the path loss in the wireless LoRa is evaluated.}, } @article {pmid36904909, year = {2023}, author = {Lin, HY and Tsai, TT and Ting, PY and Fan, YR}, title = {Identity-Based Proxy Re-Encryption Scheme Using Fog Computing and Anonymous Key Generation.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, doi = {10.3390/s23052706}, pmid = {36904909}, issn = {1424-8220}, abstract = {In the fog computing architecture, a fog is a node closer to clients and responsible for responding to users' requests as well as forwarding messages to clouds. In some medical applications such as the remote healthcare, a sensor of patients will first send encrypted data of sensed information to a nearby fog such that the fog acting as a re-encryption proxy could generate a re-encrypted ciphertext designated for requested data users in the cloud. Specifically, a data user can request access to cloud ciphertexts by sending a query to the fog node that will forward this query to the corresponding data owner who preserves the right to grant or deny the permission to access his/her data. When the access request is granted, the fog node will obtain a unique re-encryption key for carrying out the re-encryption process. Although some previous concepts have been proposed to fulfill these application requirements, they either have known security flaws or incur higher computational complexity. In this work, we present an identity-based proxy re-encryption scheme on the basis of the fog computing architecture. Our identity-based mechanism uses public channels for key distribution and avoids the troublesome problem of key escrow. We also formally prove that the proposed protocol is secure in the IND-PrID-CPA notion. Furthermore, we show that our work exhibits better performance in terms of computational complexity.}, } @article {pmid36904869, year = {2023}, author = {Lin, HY}, title = {Secure Data Transfer Based on a Multi-Level Blockchain for Internet of Vehicles.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, doi = {10.3390/s23052664}, pmid = {36904869}, issn = {1424-8220}, abstract = {Because of the decentralized trait of the blockchain and the Internet of vehicles, both are very suitable for the architecture of the other. This study proposes a multi-level blockchain framework to secure information security on the Internet of vehicles. The main motivation of this study is to propose a new transaction block and ensure the identity of traders and the non-repudiation of transactions through the elliptic curve digital signature algorithm ECDSA. The designed multi-level blockchain architecture distributes the operations within the intra_cluster blockchain and the inter_cluster blockchain to improve the efficiency of the entire block. On the cloud computing platform, we exploit the threshold key management protocol, and the system can recover the system key as long as the threshold partial key is collected. This avoids the occurrence of PKI single-point failure. Thus, the proposed architecture ensures the security of OBU-RSU-BS-VM. The proposed multi-level blockchain framework consists of a block, intra-cluster blockchain and inter-cluster blockchain. The roadside unit RSU is responsible for the communication of vehicles in the vicinity, similar to a cluster head on the Internet of vehicles. This study exploits RSU to manage the block, and the base station is responsible for managing the intra-cluster blockchain named intra_clusterBC, and the cloud server at the back end is responsible for the entire system blockchain named inter_clusterBC. Finally, RSU, base stations and cloud servers cooperatively construct the multi-level blockchain framework and improve the security and the efficiency of the operation of the blockchain. Overall, in order to protect the security of the transaction data of the blockchain, we propose a new transaction block structure and adopt the elliptic curve cryptographic signature ECDSA to ensure that the Merkle tree root value is not changed and also make sure the transaction identity and non-repudiation of transaction data. Finally, this study considers information security in a cloud environment, and therefore we propose a secret-sharing and secure-map-reducing architecture based on the identity confirmation scheme. The proposed scheme with decentralization is very suitable for distributed connected vehicles and can also improve the execution efficiency of the blockchain.}, } @article {pmid36904852, year = {2023}, author = {Vitali, G and Arru, M and Magnanini, E}, title = {A Scalable Device for Undisturbed Measurement of Water and CO2 Fluxes through Natural Surfaces.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, doi = {10.3390/s23052647}, pmid = {36904852}, issn = {1424-8220}, abstract = {In a climate change scenario and under a growing interest in Precision Agriculture, it is more and more important to map and record seasonal trends of the respiration of cropland and natural surfaces. Ground-level sensors to be placed in the field or integrated into autonomous vehicles are of growing interest. In this scope, a low-power IoT-compliant device for measurement of multiple surface CO2 and WV concentrations have been designed and developed. The device is described and tested under controlled and field conditions, showing ready and easy access to collected values typical of a cloud-computing-based approach. The device proved to be usable in indoor and open-air environments for a long time, and the sensors were arranged in multiple configurations to evaluate simultaneous concentrations and flows, while the low-cost, low-power (LP IoT-compliant) design is achieved by a specific design of the printed circuit board and a firmware code fitting the characteristics of the controller.}, } @article {pmid36904779, year = {2023}, author = {Kwon, Y and Kim, W and Jung, I}, title = {Neural Network Models for Driving Control of Indoor Autonomous Vehicles in Mobile Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, doi = {10.3390/s23052575}, pmid = {36904779}, issn = {1424-8220}, abstract = {Mobile edge computing has been proposed as a solution for solving the latency problem of traditional cloud computing. In particular, mobile edge computing is needed in areas such as autonomous driving, which requires large amounts of data to be processed without latency for safety. Indoor autonomous driving is attracting attention as one of the mobile edge computing services. Furthermore, it relies on its sensors for location recognition because indoor autonomous driving cannot use a GPS device, as is the case with outdoor driving. However, while the autonomous vehicle is being driven, the real-time processing of external events and the correction of errors are required for safety. Furthermore, an efficient autonomous driving system is required because it is a mobile environment with resource constraints. This study proposes neural network models as a machine-learning method for autonomous driving in an indoor environment. The neural network model predicts the most appropriate driving command for the current location based on the range data measured with the LiDAR sensor. We designed six neural network models to be evaluated according to the number of input data points. In addition, we made an autonomous vehicle based on the Raspberry Pi for driving and learning and an indoor circular driving track for collecting data and performance evaluation. Finally, we evaluated six neural network models in terms of confusion matrix, response time, battery consumption, and driving command accuracy. In addition, when neural network learning was applied, the effect of the number of inputs was confirmed in the usage of resources. The result will influence the choice of an appropriate neural network model for an indoor autonomous vehicle.}, } @article {pmid36904650, year = {2023}, author = {Kumar, MS and Karri, GR}, title = {EEOA: Cost and Energy Efficient Task Scheduling in a Cloud-Fog Framework.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, doi = {10.3390/s23052445}, pmid = {36904650}, issn = {1424-8220}, abstract = {Cloud-fog computing is a wide range of service environments created to provide quick, flexible services to customers, and the phenomenal growth of the Internet of Things (IoT) has produced an immense amount of data on a daily basis. To complete tasks and meet service-level agreement (SLA) commitments, the provider assigns appropriate resources and employs scheduling techniques to efficiently manage the execution of received IoT tasks in fog or cloud systems. The effectiveness of cloud services is directly impacted by some other important criteria, such as energy usage and cost, which are not taken into account by many of the existing methodologies. To resolve the aforementioned problems, an effective scheduling algorithm is required to schedule the heterogeneous workload and enhance the quality of service (QoS). Therefore, a nature-inspired multi-objective task scheduling algorithm called the electric earthworm optimization algorithm (EEOA) is proposed in this paper for IoT requests in a cloud-fog framework. This method was created using the combination of the earthworm optimization algorithm (EOA) and the electric fish optimization algorithm (EFO) to improve EFO's potential to be exploited while looking for the best solution to the problem at hand. Concerning execution time, cost, makespan, and energy consumption, the suggested scheduling technique's performance was assessed using significant instances of real-world workloads such as CEA-CURIE and HPC2N. Based on simulation results, our proposed approach improves efficiency by 89%, energy consumption by 94%, and total cost by 87% over existing algorithms for the scenarios considered using different benchmarks. Detailed simulations demonstrate that the suggested approach provides a superior scheduling scheme with better results than the existing scheduling techniques.}, } @article {pmid36904580, year = {2023}, author = {Kalinagac, O and Gür, G and Alagöz, F}, title = {Prioritization Based Task Offloading in UAV-Assisted Edge Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, doi = {10.3390/s23052375}, pmid = {36904580}, issn = {1424-8220}, abstract = {Under demanding operational conditions such as traffic surges, coverage issues, and low latency requirements, terrestrial networks may become inadequate to provide the expected service levels to users and applications. Moreover, when natural disasters or physical calamities occur, the existing network infrastructure may collapse, leading to formidable challenges for emergency communications in the area served. In order to provide wireless connectivity as well as facilitate a capacity boost under transient high service load situations, a substitute or auxiliary fast-deployable network is needed. Unmanned Aerial Vehicle (UAV) networks are well suited for such needs thanks to their high mobility and flexibility. In this work, we consider an edge network consisting of UAVs equipped with wireless access points. These software-defined network nodes serve a latency-sensitive workload of mobile users in an edge-to-cloud continuum setting. We investigate prioritization-based task offloading to support prioritized services in this on-demand aerial network. To serve this end, we construct an offloading management optimization model to minimize the overall penalty due to priority-weighted delay against task deadlines. Since the defined assignment problem is NP-hard, we also propose three heuristic algorithms as well as a branch and bound style quasi-optimal task offloading algorithm and investigate how the system performs under different operating conditions by conducting simulation-based experiments. Moreover, we made an open-source contribution to Mininet-WiFi to have independent Wi-Fi mediums, which were compulsory for simultaneous packet transfers on different Wi-Fi mediums.}, } @article {pmid36900055, year = {2023}, author = {Barany, L and Hore, N and Stadlbauer, A and Buchfelder, M and Brandner, S}, title = {Prediction of the Topography of the Corticospinal Tract on T1-Weighted MR Images Using Deep-Learning-Based Segmentation.}, journal = {Diagnostics (Basel, Switzerland)}, volume = {13}, number = {5}, pages = {}, doi = {10.3390/diagnostics13050911}, pmid = {36900055}, issn = {2075-4418}, abstract = {INTRODUCTION: Tractography is an invaluable tool in the planning of tumor surgery in the vicinity of functionally eloquent areas of the brain as well as in the research of normal development or of various diseases. The aim of our study was to compare the performance of a deep-learning-based image segmentation for the prediction of the topography of white matter tracts on T1-weighted MR images to the performance of a manual segmentation.

METHODS: T1-weighted MR images of 190 healthy subjects from 6 different datasets were utilized in this study. Using deterministic diffusion tensor imaging, we first reconstructed the corticospinal tract on both sides. After training a segmentation model on 90 subjects of the PIOP2 dataset using the nnU-Net in a cloud-based environment with graphical processing unit (Google Colab), we evaluated its performance using 100 subjects from 6 different datasets.

RESULTS: Our algorithm created a segmentation model that predicted the topography of the corticospinal pathway on T1-weighted images in healthy subjects. The average dice score was 0.5479 (0.3513-0.7184) on the validation dataset.

CONCLUSIONS: Deep-learning-based segmentation could be applicable in the future to predict the location of white matter pathways in T1-weighted scans.}, } @article {pmid36899558, year = {2023}, author = {Zhang, H and Wang, P and Zhang, S and Wu, Z}, title = {An adaptive offloading framework for license plate detection in collaborative edge and cloud computing.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {20}, number = {2}, pages = {2793-2814}, doi = {10.3934/mbe.2023131}, pmid = {36899558}, issn = {1551-0018}, abstract = {With the explosive growth of edge computing, huge amounts of data are being generated in billions of edge devices. It is really difficult to balance detection efficiency and detection accuracy at the same time for object detection on multiple edge devices. However, there are few studies to investigate and improve the collaboration between cloud computing and edge computing considering realistic challenges, such as limited computation capacities, network congestion and long latency. To tackle these challenges, we propose a new multi-model license plate detection hybrid methodology with the tradeoff between efficiency and accuracy to process the tasks of license plate detection at the edge nodes and the cloud server. We also design a new probability-based offloading initialization algorithm that not only obtains reasonable initial solutions but also facilitates the accuracy of license plate detection. In addition, we introduce an adaptive offloading framework by gravitational genetic searching algorithm (GGSA), which can comprehensively consider influential factors such as license plate detection time, queuing time, energy consumption, image quality, and accuracy. GGSA is helpful for Quality-of-Service (QoS) enhancement. Extensive experiments show that our proposed GGSA offloading framework exhibits good performance in collaborative edge and cloud computing of license plate detection compared with other methods. It demonstrate that when compared with traditional all tasks are executed on the cloud server (AC), the offloading effect of GGSA can be improved by 50.31%. Besides, the offloading framework has strong portability when making real-time offloading decisions.}, } @article {pmid36878917, year = {2023}, author = {Grossman, RL}, title = {Ten lessons for data sharing with a data commons.}, journal = {Scientific data}, volume = {10}, number = {1}, pages = {120}, pmid = {36878917}, issn = {2052-4463}, abstract = {A data commons is a cloud-based data platform with a governance structure that allows a community to manage, analyze and share its data. Data commons provide a research community with the ability to manage and analyze large datasets using the elastic scalability provided by cloud computing and to share data securely and compliantly, and, in this way, accelerate the pace of research. Over the past decade, a number of data commons have been developed and we discuss some of the lessons learned from this effort.}, } @article {pmid36867158, year = {2023}, author = {Kumar, D and Mandal, N and Kumar, Y}, title = {Cloud-Based Advanced Shuffled Frog Leaping Algorithm for Tasks Scheduling.}, journal = {Big data}, volume = {}, number = {}, pages = {}, doi = {10.1089/big.2022.0095}, pmid = {36867158}, issn = {2167-647X}, abstract = {In recent years, the world has seen incremental growth in online activities owing to which the volume of data in cloud servers has also been increasing exponentially. With rapidly increasing data, load on cloud servers has increased in the cloud computing environment. With rapidly evolving technology, various cloud-based systems were developed to enhance the user experience. But, the increased online activities around the globe have also increased data load on the cloud-based systems. To maintain the efficiency and performance of the applications hosted in cloud servers, task scheduling has become very important. The task scheduling process helps in reducing the makespan time and average cost by scheduling the tasks to virtual machines (VMs). The task scheduling depends on assigning tasks to VMs to process the incoming tasks. The task scheduling should follow some algorithm for assigning tasks to VMs. Many researchers have proposed different scheduling algorithms for task scheduling in the cloud computing environment. In this article, an advanced form of the shuffled frog optimization algorithm, which works on the nature and behavior of frogs searching for food, has been proposed. The authors have introduced a new algorithm to shuffle the position of frogs in memeplex to obtain the best result. By using this optimization technique, the cost function of the central processing unit, makespan, and fitness function were calculated. The fitness function is the sum of the budget cost function and the makespan time. The proposed method helps in reducing the makespan time as well as the average cost by scheduling the tasks to VMs effectively. Finally, the performance of the proposed advanced shuffled frog optimization method is compared with existing task scheduling methods such as whale optimization-based scheduler (W-Scheduler), sliced particle swarm optimization (SPSO-SA), inverted ant colony optimization algorithm, and static learning particle swarm optimization (SLPSO-SA) in terms of average cost and metric makespan. Experimentally, it was concluded that the proposed advanced frog optimization algorithm can schedule tasks to the VMs more effectively as compared with other scheduling methods with a makespan of 6, average cost of 4, and fitness of 10.}, } @article {pmid36860419, year = {2023}, author = {Singh, J and Chen, J and Singh, SP and Singh, MP and Hassan, MM and Hassan, MM and Awal, H}, title = {Load-Balancing Strategy: Employing a Capsule Algorithm for Cutting Down Energy Consumption in Cloud Data Centers for Next Generation Wireless Systems.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {6090282}, pmid = {36860419}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Data Accuracy ; Electric Power Supplies ; Happiness ; }, abstract = {Per-user pricing is possible with cloud computing, a relatively new technology. It provides remote testing and commissioning services through the web, and it utilizes virtualization to make available computing resources. In order to host and store firm data, cloud computing relies on data centers. Data centers are made up of networked computers, cables, power supplies, and other components. Cloud data centers have always had to prioritise high performance over energy efficiency. The biggest obstacle is finding a happy medium between system performance and energy consumption, namely, lowering energy use without compromising system performance or service quality. These results were obtained using the PlanetLab dataset. In order to implement the strategy we recommend, it is crucial to get a complete picture of how energy is being consumed in the cloud. Using proper optimization criteria and guided by energy consumption models, this article offers the Capsule Significance Level of Energy Consumption (CSLEC) pattern, which demonstrates how to conserve more energy in cloud data centers. Capsule optimization's prediction phase F1-score of 96.7 percent and 97 percent data accuracy allow for more precise projections of future value.}, } @article {pmid36855338, year = {2023}, author = {Calcaterra, D and Tomarchio, O}, title = {Policy-Based Holistic Application Management with BPMN and TOSCA.}, journal = {SN computer science}, volume = {4}, number = {3}, pages = {232}, pmid = {36855338}, issn = {2661-8907}, abstract = {With the wide adoption of cloud computing across technology industries and research institutions, an ever-growing interest in cloud orchestration frameworks has emerged over the past few years. These orchestration frameworks enable the automated provisioning and decommissioning of cloud applications in a timely and efficient manner, but they offer limited or no support for application management. While management functionalities, such as configuring, monitoring and scaling single components, can be directly covered by cloud providers and configuration management tools, holistic management features, such as backing up, testing and updating multiple components, cannot be automated using these approaches. In this paper, we propose a concept to automatically generate executable holistic management workflows based on the TOSCA standard. The practical feasibility of the approach is validated through a prototype implementation and a case study.}, } @article {pmid36852030, year = {2023}, author = {Manconi, A and Gnocchi, M and Milanesi, L and Marullo, O and Armano, G}, title = {Framing Apache Spark in life sciences.}, journal = {Heliyon}, volume = {9}, number = {2}, pages = {e13368}, pmid = {36852030}, issn = {2405-8440}, abstract = {Advances in high-throughput and digital technologies have required the adoption of big data for handling complex tasks in life sciences. However, the drift to big data led researchers to face technical and infrastructural challenges for storing, sharing, and analysing them. In fact, this kind of tasks requires distributed computing systems and algorithms able to ensure efficient processing. Cutting edge distributed programming frameworks allow to implement flexible algorithms able to adapt the computation to the data over on-premise HPC clusters or cloud architectures. In this context, Apache Spark is a very powerful HPC engine for large-scale data processing on clusters. Also thanks to specialised libraries for working with structured and relational data, it allows to support machine learning, graph-based computation, and stream processing. This review article is aimed at helping life sciences researchers to ascertain the features of Apache Spark and to assess whether it can be successfully used in their research activities.}, } @article {pmid36850940, year = {2023}, author = {Antonini, M and Pincheira, M and Vecchio, M and Antonelli, F}, title = {An Adaptable and Unsupervised TinyML Anomaly Detection System for Extreme Industrial Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850940}, issn = {1424-8220}, abstract = {Industrial assets often feature multiple sensing devices to keep track of their status by monitoring certain physical parameters. These readings can be analyzed with machine learning (ML) tools to identify potential failures through anomaly detection, allowing operators to take appropriate corrective actions. Typically, these analyses are conducted on servers located in data centers or the cloud. However, this approach increases system complexity and is susceptible to failure in cases where connectivity is unavailable. Furthermore, this communication restriction limits the approach's applicability in extreme industrial environments where operating conditions affect communication and access to the system. This paper proposes and evaluates an end-to-end adaptable and configurable anomaly detection system that uses the Internet of Things (IoT), edge computing, and Tiny-MLOps methodologies in an extreme industrial environment such as submersible pumps. The system runs on an IoT sensing Kit, based on an ESP32 microcontroller and MicroPython firmware, located near the data source. The processing pipeline on the sensing device collects data, trains an anomaly detection model, and alerts an external gateway in the event of an anomaly. The anomaly detection model uses the isolation forest algorithm, which can be trained on the microcontroller in just 1.2 to 6.4 s and detect an anomaly in less than 16 milliseconds with an ensemble of 50 trees and 80 KB of RAM. Additionally, the system employs blockchain technology to provide a transparent and irrefutable repository of anomalies.}, } @article {pmid36850847, year = {2023}, author = {Luo, G and He, B and Xiong, Y and Wang, L and Wang, H and Zhu, Z and Shi, X}, title = {An Optimized Convolutional Neural Network for the 3D Point-Cloud Compression.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850847}, issn = {1424-8220}, abstract = {Due to the tremendous volume taken by the 3D point-cloud models, knowing how to achieve the balance between a high compression ratio, a low distortion rate, and computing cost in point-cloud compression is a significant issue in the field of virtual reality (VR). Convolutional neural networks have been used in numerous point-cloud compression research approaches during the past few years in an effort to progress the research state. In this work, we have evaluated the effects of different network parameters, including neural network depth, stride, and activation function on point-cloud compression, resulting in an optimized convolutional neural network for compression. We first have analyzed earlier research on point-cloud compression based on convolutional neural networks before designing our own convolutional neural network. Then, we have modified our model parameters using the experimental data to further enhance the effect of point-cloud compression. Based on the experimental results, we have found that the neural network with the 4 layers and 2 strides parameter configuration using the Sigmoid activation function outperforms the default configuration by 208% in terms of the compression-distortion rate. The experimental results show that our findings are effective and universal and make a great contribution to the research of point-cloud compression using convolutional neural networks.}, } @article {pmid36850846, year = {2023}, author = {Liu, S and Yang, S and Zhang, H and Wu, W}, title = {A Federated Learning and Deep Reinforcement Learning-Based Method with Two Types of Agents for Computation Offload.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850846}, issn = {1424-8220}, abstract = {With the rise of latency-sensitive and computationally intensive applications in mobile edge computing (MEC) environments, the computation offloading strategy has been widely studied to meet the low-latency demands of these applications. However, the uncertainty of various tasks and the time-varying conditions of wireless networks make it difficult for mobile devices to make efficient decisions. The existing methods also face the problems of long-delay decisions and user data privacy disclosures. In this paper, we present the FDRT, a federated learning and deep reinforcement learning-based method with two types of agents for computation offload, to minimize the system latency. FDRT uses a multi-agent collaborative computation offloading strategy, namely, DRT. DRT divides the offloading decision into whether to compute tasks locally and whether to offload tasks to MEC servers. The designed DDQN agent considers the task information, its own resources, and the network status conditions of mobile devices, and the designed D3QN agent considers these conditions of all MEC servers in the collaborative cloud-side end MEC system; both jointly learn the optimal decision. FDRT also applies federated learning to reduce communication overhead and optimize the model training of DRT by designing a new parameter aggregation method, while protecting user data privacy. The simulation results showed that DRT effectively reduced the average task execution delay by up to 50% compared with several baselines and state-of-the-art offloading strategies. FRDT also accelerates the convergence rate of multi-agent training and reduces the training time of DRT by 61.7%.}, } @article {pmid36850813, year = {2023}, author = {Vaño, R and Lacalle, I and Sowiński, P and S-Julián, R and Palau, CE}, title = {Cloud-Native Workload Orchestration at the Edge: A Deployment Review and Future Directions.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850813}, issn = {1424-8220}, abstract = {Cloud-native computing principles such as virtualization and orchestration are key to transferring to the promising paradigm of edge computing. Challenges of containerization, operative models and scarce availability of established tools make a thorough review indispensable. Therefore, the authors have described the practical methods and tools found in the literature as well as in current community-led development projects, and have thoroughly exposed the future directions of the field. Container virtualization and its orchestration through Kubernetes have dominated the cloud computing domain, while major efforts have been recently recorded focused on the adaptation of these technologies to the edge. Such initiatives have addressed either the reduction of container engines and the development of specific tailored operating systems or the development of smaller K8s distributions and edge-focused adaptations (such as KubeEdge). Finally, new workload virtualization approaches, such as WebAssembly modules together with the joint orchestration of these heterogeneous workloads, seem to be the topics to pay attention to in the short to medium term.}, } @article {pmid36850794, year = {2023}, author = {Lee, S}, title = {Distributed Detection of Malicious Android Apps While Preserving Privacy Using Federated Learning.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850794}, issn = {1424-8220}, abstract = {Recently, deep learning has been widely used to solve existing computing problems through large-scale data mining. Conventional training of the deep learning model is performed on a central (cloud) server that is equipped with high computing power, by integrating data via high computational intensity. However, integrating raw data from multiple clients raises privacy concerns that are increasingly being focused on. In federated learning (FL), clients train deep learning models in a distributed fashion using their local data; instead of sending raw data to a central server, they send parameter values of the trained local model to a central server for integration. Because FL does not transmit raw data to the outside, it is free from privacy issues. In this paper, we perform an experimental study that explores the dynamics of the FL-based Android malicious app detection method under three data distributions across clients, i.e., (i) independent and identically distributed (IID), (ii) non-IID, (iii) non-IID and unbalanced. Our experiments demonstrate that the application of FL is feasible and efficient in detecting malicious Android apps in a distributed manner on cellular networks.}, } @article {pmid36850785, year = {2023}, author = {Chang, RC and Wang, CY and Li, YH and Chiu, CD}, title = {Design of Low-Complexity Convolutional Neural Network Accelerator for Finger Vein Identification System.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850785}, issn = {1424-8220}, abstract = {In the biometric field, vein identification is a vital process that is constrained by the invisibility of veins as well as other unique features. Moreover, users generally do not wish to have their personal information uploaded to the cloud, so edge computing has become popular for the sake of protecting user privacy. In this paper, we propose a low-complexity and lightweight convolutional neural network (CNN) and we design intellectual property (IP) for shortening the inference time in finger vein recognition. This neural network system can operate independently in client mode. After fetching the user's finger vein image via a near-infrared (NIR) camera mounted on an embedded system, vein features can be efficiently extracted by vein curving algorithms and user identification can be completed quickly. Better image quality and higher recognition accuracy can be obtained by combining several preprocessing techniques and the modified CNN. Experimental data were collected by the finger vein image capture equipment developed in our laboratory based on the specifications of similar products currently on the market. Extensive experiments demonstrated the practicality and robustness of the proposed finger vein identification system.}, } @article {pmid36850784, year = {2023}, author = {Mohamed, AA and Abualigah, L and Alburaikan, A and Khalifa, HAE}, title = {AOEHO: A New Hybrid Data Replication Method in Fog Computing for IoT Application.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850784}, issn = {1424-8220}, abstract = {Recently, the concept of the internet of things and its services has emerged with cloud computing. Cloud computing is a modern technology for dealing with big data to perform specified operations. The cloud addresses the problem of selecting and placing iterations across nodes in fog computing. Previous studies focused on original swarm intelligent and mathematical models; thus, we proposed a novel hybrid method based on two modern metaheuristic algorithms. This paper combined the Aquila Optimizer (AO) algorithm with the elephant herding optimization (EHO) for solving dynamic data replication problems in the fog computing environment. In the proposed method, we present a set of objectives that determine data transmission paths, choose the least cost path, reduce network bottlenecks, bandwidth, balance, and speed data transfer rates between nodes in cloud computing. A hybrid method, AOEHO, addresses the optimal and least expensive path, determines the best replication via cloud computing, and determines optimal nodes to select and place data replication near users. Moreover, we developed a multi-objective optimization based on the proposed AOEHO to decrease the bandwidth and enhance load balancing and cloud throughput. The proposed method is evaluated based on data replication using seven criteria. These criteria are data replication access, distance, costs, availability, SBER, popularity, and the Floyd algorithm. The experimental results show the superiority of the proposed AOEHO strategy performance over other algorithms, such as bandwidth, distance, load balancing, data transmission, and least cost path.}, } @article {pmid36850763, year = {2023}, author = {da Silva, JCF and Silva, MC and Luz, EJS and Delabrida, S and Oliveira, RAR}, title = {Using Mobile Edge AI to Detect and Map Diseases in Citrus Orchards.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850763}, issn = {1424-8220}, abstract = {Deep Learning models have presented promising results when applied to Agriculture 4.0. Among other applications, these models can be used in disease detection and fruit counting. Deep Learning models usually have many layers in the architecture and millions of parameters. This aspect hinders the use of Deep Learning on mobile devices as they require a large amount of processing power for inference. In addition, the lack of high-quality Internet connectivity in the field impedes the usage of cloud computing, pushing the processing towards edge devices. This work describes the proposal of an edge AI application to detect and map diseases in citrus orchards. The proposed system has low computational demand, enabling the use of low-footprint models for both detection and classification tasks. We initially compared AI algorithms to detect fruits on trees. Specifically, we analyzed and compared YOLO and Faster R-CNN. Then, we studied lean AI models to perform the classification task. In this context, we tested and compared the performance of MobileNetV2, EfficientNetV2-B0, and NASNet-Mobile. In the detection task, YOLO and Faster R-CNN had similar AI performance metrics, but YOLO was significantly faster. In the image classification task, MobileNetMobileV2 and EfficientNetV2-B0 obtained an accuracy of 100%, while NASNet-Mobile had a 98% performance. As for the timing performance, MobileNetV2 and EfficientNetV2-B0 were the best candidates, while NASNet-Mobile was significantly worse. Furthermore, MobileNetV2 had a 10% better performance than EfficientNetV2-B0. Finally, we provide a method to evaluate the results from these algorithms towards describing the disease spread using statistical parametric models and a genetic algorithm to perform the parameters' regression. With these results, we validated the proposed pipeline, enabling the usage of adequate AI models to develop a mobile edge AI solution.}, } @article {pmid36850711, year = {2023}, author = {Chen, Z and Amani, AM and Yu, X and Jalili, M}, title = {Control and Optimisation of Power Grids Using Smart Meter Data: A Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850711}, issn = {1424-8220}, abstract = {This paper provides a comprehensive review of the applications of smart meters in the control and optimisation of power grids to support a smooth energy transition towards the renewable energy future. The smart grids become more complicated due to the presence of small-scale low inertia generators and the implementation of electric vehicles (EVs), which are mainly based on intermittent and variable renewable energy resources. Optimal and reliable operation of this environment using conventional model-based approaches is very difficult. Advancements in measurement and communication technologies have brought the opportunity of collecting temporal or real-time data from prosumers through Advanced Metering Infrastructure (AMI). Smart metering brings the potential of applying data-driven algorithms for different power system operations and planning services, such as infrastructure sizing and upgrade and generation forecasting. It can also be used for demand-side management, especially in the presence of new technologies such as EVs, 5G/6G networks and cloud computing. These algorithms face privacy-preserving and cybersecurity challenges that need to be well addressed. This article surveys the state-of-the-art of each of these topics, reviewing applications, challenges and opportunities of using smart meters to address them. It also stipulates the challenges that smart grids present to smart meters and the benefits that smart meters can bring to smart grids. Furthermore, the paper is concluded with some expected future directions and potential research questions for smart meters, smart grids and their interplay.}, } @article {pmid36850688, year = {2023}, author = {Fathy, C and Ali, HM}, title = {A Secure IoT-Based Irrigation System for Precision Agriculture Using the Expeditious Cipher.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850688}, issn = {1424-8220}, abstract = {Due to the recent advances in the domain of smart agriculture as a result of integrating traditional agriculture and the latest information technologies including the Internet of Things (IoT), cloud computing, and artificial intelligence (AI), there is an urgent need to address the information security-related issues and challenges in this field. In this article, we propose the integration of lightweight cryptography techniques into the IoT ecosystem for smart agriculture to meet the requirements of resource-constrained IoT devices. Moreover, we investigate the adoption of a lightweight encryption protocol, namely, the Expeditious Cipher (X-cipher), to create a secure channel between the sensing layer and the broker in the Message Queue Telemetry Transport (MQTT) protocol as well as a secure channel between the broker and its subscribers. Our case study focuses on smart irrigation systems, and the MQTT protocol is deployed as the application messaging protocol in these systems. Smart irrigation strives to decrease the misuse of natural resources by enhancing the efficiency of agricultural irrigation. This secure channel is utilized to eliminate the main security threat in precision agriculture by protecting sensors' published data from eavesdropping and theft, as well as from unauthorized changes to sensitive data that can negatively impact crops' development. In addition, the secure channel protects the irrigation decisions made by the data analytics (DA) entity regarding the irrigation time and the quantity of water that is returned to actuators from any alteration. Performance evaluation of our chosen lightweight encryption protocol revealed an improvement in terms of power consumption, execution time, and required memory usage when compared with the Advanced Encryption Standard (AES). Moreover, the selected lightweight encryption protocol outperforms the PRESENT lightweight encryption protocol in terms of throughput and memory usage.}, } @article {pmid36850563, year = {2023}, author = {Shahid, MA and Alam, MM and Su'ud, MM}, title = {Achieving Reliability in Cloud Computing by a Novel Hybrid Approach.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850563}, issn = {1424-8220}, abstract = {Cloud computing (CC) benefits and opportunities are among the fastest growing technologies in the computer industry. Cloud computing's challenges include resource allocation, security, quality of service, availability, privacy, data management, performance compatibility, and fault tolerance. Fault tolerance (FT) refers to a system's ability to continue performing its intended task in the presence of defects. Fault-tolerance challenges include heterogeneity and a lack of standards, the need for automation, cloud downtime reliability, consideration for recovery point objects, recovery time objects, and cloud workload. The proposed research includes machine learning (ML) algorithms such as naïve Bayes (NB), library support vector machine (LibSVM), multinomial logistic regression (MLR), sequential minimal optimization (SMO), K-nearest neighbor (KNN), and random forest (RF) as well as a fault-tolerance method known as delta-checkpointing to achieve higher accuracy, lesser fault prediction error, and reliability. Furthermore, the secondary data were collected from the homonymous, experimental high-performance computing (HPC) system at the Swiss Federal Institute of Technology (ETH), Zurich, and the primary data were generated using virtual machines (VMs) to select the best machine learning classifier. In this article, the secondary and primary data were divided into two split ratios of 80/20 and 70/30, respectively, and cross-validation (5-fold) was used to identify more accuracy and less prediction of faults in terms of true, false, repair, and failure of virtual machines. Secondary data results show that naïve Bayes performed exceptionally well on CPU-Mem mono and multi blocks, and sequential minimal optimization performed very well on HDD mono and multi blocks in terms of accuracy and fault prediction. In the case of greater accuracy and less fault prediction, primary data results revealed that random forest performed very well in terms of accuracy and fault prediction but not with good time complexity. Sequential minimal optimization has good time complexity with minor differences in random forest accuracy and fault prediction. We decided to modify sequential minimal optimization. Finally, the modified sequential minimal optimization (MSMO) algorithm with the fault-tolerance delta-checkpointing (D-CP) method is proposed to improve accuracy, fault prediction error, and reliability in cloud computing.}, } @article {pmid36850350, year = {2023}, author = {Alsokhiry, F and Annuk, A and Mohamed, MA and Marinho, M}, title = {An Innovative Cloud-Fog-Based Smart Grid Scheme for Efficient Resource Utilization.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850350}, issn = {1424-8220}, abstract = {Smart grids (SGs) enhance the effectiveness, reliability, resilience, and energy-efficient operation of electrical networks. Nonetheless, SGs suffer from big data transactions which limit their capabilities and can cause delays in the optimal operation and management tasks. Therefore, it is clear that a fast and reliable architecture is needed to make big data management in SGs more efficient. This paper assesses the optimal operation of the SGs using cloud computing (CC), fog computing, and resource allocation to enhance the management problem. Technically, big data management makes SG more efficient if cloud and fog computing (CFC) are integrated. The integration of fog computing (FC) with CC minimizes cloud burden and maximizes resource allocation. There are three key features for the proposed fog layer: awareness of position, short latency, and mobility. Moreover, a CFC-driven framework is proposed to manage data among different agents. In order to make the system more efficient, FC allocates virtual machines (VMs) according to load-balancing techniques. In addition, the present study proposes a hybrid gray wolf differential evolution optimization algorithm (HGWDE) that brings gray wolf optimization (GWO) and improved differential evolution (IDE) together. Simulation results conducted in MATLAB verify the efficiency of the suggested algorithm according to the high data transaction and computational time. According to the results, the response time of HGWDE is 54 ms, 82.1 ms, and 81.6 ms faster than particle swarm optimization (PSO), differential evolution (DE), and GWO. HGWDE's processing time is 53 ms, 81.2 ms, and 80.6 ms faster than PSO, DE, and GWO. Although GWO is a bit more efficient than HGWDE, the difference is not very significant.}, } @article {pmid36847779, year = {2023}, author = {Krog, D and Enghoff, MB and Köhn, C}, title = {A Monte Carlo approach to study the effect of ions on the nucleation of sulfuric acid-water clusters.}, journal = {Journal of computational chemistry}, volume = {}, number = {}, pages = {}, doi = {10.1002/jcc.27076}, pmid = {36847779}, issn = {1096-987X}, abstract = {The nucleation of sulfuric acid-water clusters is a significant contribution to the formation of aerosols as precursors of cloud condensation nuclei (CCN). Depending on the temperature, there is an interplay between the clustering of particles and their evaporation controlling the efficiency of cluster growth. For typical temperatures in the atmosphere, the evaporation of H 2 SO 4 (?) H 2 O clusters is more efficient than the clustering of the first, small clusters, and thus their growth is dampened at its early stages. Since the evaporation rates of small clusters containing an HSO 4 - $$ {\mathrm{HSO} } _4^{-} $$ ion are much smaller than for purely neutral sulfuric acid clusters, they can serve as a central body for the further attachment of H 2 SO 4 (?) H 2 O molecules. We here present an innovative Monte Carlo model to study the growth of aqueous sulfuric acid clusters around central ions. Unlike classical thermodynamic nucleation theory or kinetic models, this model allows to trace individual particles and thus to determine properties for each individual particle. As a benchmarking case, we have performed simulations at T = 300 K $$ T=300\kern0.5em \mathrm{K} $$ a relative humidity of 50% with dipole and ion concentrations of c dipole = 5 × 10 8 - 10 9 cm - 3 $$ {c} _{dipole} =5\kern0.5em \times \kern0.5em {10} ^8-{10} ^9\kern0.5em {\mathrm{cm} } ^{-3} $$ and c ion = 0 - 10 7 cm - 3 $$ {c} _{ion} =0-{10} ^7\kern0.5em {\mathrm{cm} } ^{-3} $$ . We discuss the runtime of our simulations and present the velocity distribution of ionic clusters, the size distribution of the clusters as well as the formation rate of clusters with radii R ≥ 0.85 nm $$ R\ge 0.85\kern0.5em \mathrm{nm} $$ . Simulations give reasonable velocity and size distributions and there is a good agreement of the formation rates with previous results, including the relevance of ions for the initial growth of sulfuric acid-water clusters. Conclusively, we present a computational method which allows studying detailed particle properties during the growth of aerosols as a precursor of CCN.}, } @article {pmid36846250, year = {2023}, author = {Gustafsson, W and Dórea, FC and Widgren, S and Frössling, J and Vidal, G and Kim, H and Cha, W and Comin, A and Rodriguez Ewerlöf, I and Rosendal, T}, title = {Data workflows and visualization in support of surveillance practice.}, journal = {Frontiers in veterinary science}, volume = {10}, number = {}, pages = {1129863}, pmid = {36846250}, issn = {2297-1769}, abstract = {The Swedish National Veterinary Institute (SVA) is working on implementing reusable and adaptable workflows for epidemiological analysis and dynamic report generation to improve disease surveillance. Important components of this work include: data access, development environment, computational resources and cloud-based management. The development environment relies on Git for code collaboration and version control and the R language for statistical computing and data visualization. The computational resources include both local and cloud-based systems, with automatic workflows managed in the cloud. The workflows are designed to be flexible and adaptable to changing data sources and stakeholder demands, with the ultimate goal to create a robust infrastructure for the delivery of actionable epidemiological information.}, } @article {pmid36842917, year = {2023}, author = {Johnson, E and Campos-Cerqueira, M and Jumail, A and Yusni, ASA and Salgado-Lynn, M and Fornace, K}, title = {Applications and advances in acoustic monitoring for infectious disease epidemiology.}, journal = {Trends in parasitology}, volume = {}, number = {}, pages = {}, doi = {10.1016/j.pt.2023.01.008}, pmid = {36842917}, issn = {1471-5007}, abstract = {Emerging infectious diseases continue to pose a significant burden on global public health, and there is a critical need to better understand transmission dynamics arising at the interface of human activity and wildlife habitats. Passive acoustic monitoring (PAM), more typically applied to questions of biodiversity and conservation, provides an opportunity to collect and analyse audio data in relative real time and at low cost. Acoustic methods are increasingly accessible, with the expansion of cloud-based computing, low-cost hardware, and machine learning approaches. Paired with purposeful experimental design, acoustic data can complement existing surveillance methods and provide a novel toolkit to investigate the key biological parameters and ecological interactions that underpin infectious disease epidemiology.}, } @article {pmid36842572, year = {2023}, author = {Andaryani, S and Nourani, V and Abbasnejad, H and Koch, J and Stisen, S and Klöve, B and Haghighi, AT}, title = {Spatio-temporal analysis of climate and irrigated vegetation cover changes and their role in lake water level depletion using a pixel-based approach and canonical correlation analysis.}, journal = {The Science of the total environment}, volume = {}, number = {}, pages = {162326}, doi = {10.1016/j.scitotenv.2023.162326}, pmid = {36842572}, issn = {1879-1026}, abstract = {Lake Urmia, located in northwest Iran, was among the world's largest hypersaline lakes but has now experienced a 7 m decrease in water level, from 1278 m to 1271 over 1996 to 2019. There is doubt as to whether the pixel-based analysis (PBA) approach's answer to the lake's drying is a natural process or a result of human intervention. Here, a non-parametric Mann-Kendall trend test was applied to a 21-year record (2000-2020) of satellite data products, i.e., temperature, precipitation, snow cover, and irrigated vegetation cover (IVC). The Google Earth Engine (GEE) cloud-computing platform utilized over 10 sub-basins in three provinces surrounding Lake Urmia to obtain and calculate pixel-based monthly and seasonal scales for the products. Canonical correlation analysis was employed in order to understand the correlation between variables and lake water level (LWL). The trend analysis results show significant increases in temperature (from 1 to 2 °C during 2000-2020) over May-September, i.e., in 87 %-25 % of the basin. However, precipitation has seen an insignificant decrease (from 3 to 9 mm during 2000-2019) in the rainy months (April and May). Snow cover has also decreased and, when compared with precipitation, shows a change in precipitation patterns from snow to rain. IVC has increased significantly in all sub-basins, especially the southern parts of the lake, with the West province making the largest contribution to the development of IVC. According to the PBA, this analysis underpins the very high contribution of IVC to the drying of the lake in more detail, although the contribution of climate change in this matter is also apparent. The development of IVC leads to increased water consumption through evapotranspiration and excess evaporation caused by the storage of water for irrigation. Due to the decreased runoff caused by consumption exceeding the basin's capacity, the lake cannot be fed sufficiently.}, } @article {pmid36832716, year = {2023}, author = {Yuan, L and Wang, Z and Sun, P and Wei, Y}, title = {An Efficient Virtual Machine Consolidation Algorithm for Cloud Computing.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {2}, pages = {}, doi = {10.3390/e25020351}, pmid = {36832716}, issn = {1099-4300}, abstract = {With the rapid development of integration in blockchain and IoT, virtual machine consolidation (VMC) has become a heated topic because it can effectively improve the energy efficiency and service quality of cloud computing in the blockchain. The current VMC algorithm is not effective enough because it does not regard the load of the virtual machine (VM) as an analyzed time series. Therefore, we proposed a VMC algorithm based on load forecast to improve efficiency. First, we proposed a migration VM selection strategy based on load increment prediction called LIP. Combined with the current load and load increment, this strategy can effectively improve the accuracy of selecting VM from the overloaded physical machines (PMs). Then, we proposed a VM migration point selection strategy based on the load sequence prediction called SIR. We merged VMs with complementary load series into the same PM, effectively improving the stability of the PM load, thereby reducing the service level agreement violation (SLAV) and the number of VM migrations due to the resource competition of the PM. Finally, we proposed a better virtual machine consolidation (VMC) algorithm based on the load prediction of LIP and SIR. The experimental results show that our VMC algorithm can effectively improve energy efficiency.}, } @article {pmid36832692, year = {2023}, author = {Tsuruyama, T}, title = {Kullback-Leibler Divergence of an Open-Queuing Network of a Cell-Signal-Transduction Cascade.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {2}, pages = {}, doi = {10.3390/e25020326}, pmid = {36832692}, issn = {1099-4300}, abstract = {Queuing networks (QNs) are essential models in operations research, with applications in cloud computing and healthcare systems. However, few studies have analyzed the cell's biological signal transduction using QN theory. This study entailed the modeling of signal transduction as an open Jackson's QN (JQN) to theoretically determine cell signal transduction, under the assumption that the signal mediator queues in the cytoplasm, and the mediator is exchanged from one signaling molecule to another through interactions between the signaling molecules. Each signaling molecule was regarded as a network node in the JQN. The JQN Kullback-Leibler divergence (KLD) was defined using the ratio of the queuing time (λ) to the exchange time (μ), λ/μ. The mitogen-activated protein kinase (MAPK) signal-cascade model was applied, and the KLD rate per signal-transduction-period was shown to be conserved when the KLD was maximized. Our experimental study on MAPK cascade supported this conclusion. This result is similar to the entropy-rate conservation of chemical kinetics and entropy coding reported in our previous studies. Thus, JQN can be used as a novel framework to analyze signal transduction.}, } @article {pmid36832652, year = {2023}, author = {Chen, D and Zhang, Y}, title = {Diversity-Aware Marine Predators Algorithm for Task Scheduling in Cloud Computing.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {2}, pages = {}, doi = {10.3390/e25020285}, pmid = {36832652}, issn = {1099-4300}, abstract = {With the increase in cloud users and internet of things (IoT) applications, advanced task scheduling (TS) methods are required to reasonably schedule tasks in cloud computing. This study proposes a diversity-aware marine predators algorithm (DAMPA) for solving TS in cloud computing. In DAMPA, to enhance the premature convergence avoidance ability, the predator crowding degree ranking and comprehensive learning strategies were adopted in the second stage to maintain the population diversity and thereby inhibit premature convergence. Additionally, a stage-independent control of the stepsize-scaling strategy that uses different control parameters in three stages was designed to balance the exploration and exploitation abilities. Two case experiments were conducted to evaluate the proposed algorithm. Compared with the latest algorithm, in the first case, DAMPA reduced the makespan and energy consumption by 21.06% and 23.47% at most, respectively. In the second case, the makespan and energy consumption are reduced by 34.35% and 38.60% on average, respectively. Meanwhile, the algorithm achieved greater throughput in both cases.}, } @article {pmid36832648, year = {2023}, author = {Liu, Y and Luo, J and Yang, Y and Wang, X and Gheisari, M and Luo, F}, title = {ShrewdAttack: Low Cost High Accuracy Model Extraction.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {2}, pages = {}, doi = {10.3390/e25020282}, pmid = {36832648}, issn = {1099-4300}, abstract = {Machine learning as a service (MLaaS) plays an essential role in the current ecosystem. Enterprises do not need to train models by themselves separately. Instead, they can use well-trained models provided by MLaaS to support business activities. However, such an ecosystem could be threatened by model extraction attacks-an attacker steals the functionality of a trained model provided by MLaaS and builds a substitute model locally. In this paper, we proposed a model extraction method with low query costs and high accuracy. In particular, we use pre-trained models and task-relevant data to decrease the size of query data. We use instance selection to reduce query samples. In addition, we divided query data into two categories, namely low-confidence data and high-confidence data, to reduce the budget and improve accuracy. We then conducted attacks on two models provided by Microsoft Azure as our experiments. The results show that our scheme achieves high accuracy at low cost, with the substitution models achieving 96.10% and 95.24% substitution while querying only 7.32% and 5.30% of their training data on the two models, respectively. This new attack approach creates additional security challenges for models deployed on cloud platforms. It raises the need for novel mitigation strategies to secure the models. In future work, generative adversarial networks and model inversion attacks can be used to generate more diverse data to be applied to the attacks.}, } @article {pmid36832632, year = {2023}, author = {Byrne, E and Gnilke, OW and Kliewer, J}, title = {Straggler- and Adversary-Tolerant Secure Distributed Matrix Multiplication Using Polynomial Codes.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {2}, pages = {}, doi = {10.3390/e25020266}, pmid = {36832632}, issn = {1099-4300}, abstract = {Large matrix multiplications commonly take place in large-scale machine-learning applications. Often, the sheer size of these matrices prevent carrying out the multiplication at a single server. Therefore, these operations are typically offloaded to a distributed computing platform with a master server and a large amount of workers in the cloud, operating in parallel. For such distributed platforms, it has been recently shown that coding over the input data matrices can reduce the computational delay by introducing a tolerance against straggling workers, i.e., workers for which execution time significantly lags with respect to the average. In addition to exact recovery, we impose a security constraint on both matrices to be multiplied. Specifically, we assume that workers can collude and eavesdrop on the content of these matrices. For this problem, we introduce a new class of polynomial codes with fewer non-zero coefficients than the degree +1. We provide closed-form expressions for the recovery threshold and show that our construction improves the recovery threshold of existing schemes in the literature, in particular for larger matrix dimensions and a moderate to large number of colluding workers. In the absence of any security constraints, we show that our construction is optimal in terms of recovery threshold.}, } @article {pmid36819757, year = {2023}, author = {Borhani, F and Shafiepour Motlagh, M and Ehsani, AH and Rashidi, Y and Ghahremanloo, M and Amani, M and Moghimi, A}, title = {Current Status and Future Forecast of Short-lived Climate-Forced Ozone in Tehran, Iran, derived from Ground-Based and Satellite Observations.}, journal = {Water, air, and soil pollution}, volume = {234}, number = {2}, pages = {134}, pmid = {36819757}, issn = {0049-6979}, abstract = {In this study, the distribution and alterations of ozone concentrations in Tehran, Iran, in 2021 were investigated. The impacts of precursors (i.e., CO, NO2, and NO) on ozone were examined using the data collected over 12 months (i.e., January 2021 to December 2021) from 21 stations of the Air Quality Control Company (AQCC). The results of monthly heat mapping of tropospheric ozone concentrations indicated the lowest value in December and the highest value in July. The lowest and highest seasonal concentrations were in winter and summer, respectively. Moreover, there was a negative correlation between ozone and its precursors. The Inverse Distance Weighting (IDW) method was then implemented to obtain air pollution zoning maps. Then, ozone concentration modeled by the IDW method was compared with the average monthly change of total column density of ozone derived from Sentinel-5 satellite data in the Google Earth Engine (GEE) cloud platform. A good agreement was discovered despite the harsh circumstances that both ground-based and satellite measurements were subjected to. The results obtained from both datasets showed that the west of the city of Tehran had the highest averaged O3 concentration. In this study, the status of the concentration of ozone precursors and tropospheric ozone in 2022 was also predicted. For this purpose, the Box-Jenkins Seasonal Autoregressive Integrated Moving Average (SARIMA) approach was implemented to predict the monthly air quality parameters. Overall, it was observed that the SARIMA approach was an efficient tool for forecasting air quality. Finally, the results showed that the trends of ozone obtained from terrestrial and satellite observations throughout 2021 were slightly different due to the contribution of the tropospheric ozone precursor concentration and meteorology conditions.}, } @article {pmid36818051, year = {2023}, author = {Stewart, CA and Costa, CM and Wernert, JA and Snapp-Childs, W and Bland, M and Blood, P and Campbell, T and Couvares, P and Fischer, J and Hancock, DY and Hart, DL and Jankowski, H and Knepper, R and McMullen, DF and Mehringer, S and Pierce, M and Rogers, G and Sinkovits, RS and Towns, J}, title = {Use of accounting concepts to study research: return on investment in XSEDE, a US cyberinfrastructure service.}, journal = {Scientometrics}, volume = {}, number = {}, pages = {1-31}, pmid = {36818051}, issn = {0138-9130}, abstract = {This paper uses accounting concepts-particularly the concept of Return on Investment (ROI)-to reveal the quantitative value of scientific research pertaining to a major US cyberinfrastructure project (XSEDE-the eXtreme Science and Engineering Discovery Environment). XSEDE provides operational and support services for advanced information technology systems, cloud systems, and supercomputers supporting non-classified US research, with an average budget for XSEDE of US$20M+ per year over the period studied (2014-2021). To assess the financial effectiveness of these services, we calculated a proxy for ROI, and converted quantitative measures of XSEDE service delivery into financial values using costs for service from the US marketplace. We calculated two estimates of ROI: a Conservative Estimate, functioning as a lower bound and using publicly available data for a lower valuation of XSEDE services; and a Best Available Estimate, functioning as a more accurate estimate, but using some unpublished valuation data. Using the largest dataset assembled for analysis of ROI for a cyberinfrastructure project, we found a Conservative Estimate of ROI of 1.87, and a Best Available Estimate of ROI of 3.24. Through accounting methods, we show that XSEDE services offer excellent value to the US government, that the services offered uniquely by XSEDE (that is, not otherwise available for purchase) were the most valuable to the facilitation of US research activities, and that accounting-based concepts hold great value for understanding the mechanisms of scientific research generally.}, } @article {pmid36812648, year = {2022}, author = {Jiang, P and Gao, F and Liu, S and Zhang, S and Zhang, X and Xia, Z and Zhang, W and Jiang, T and Zhu, JL and Zhang, Z and Shu, Q and Snyder, M and Li, J}, title = {Longitudinally tracking personal physiomes for precision management of childhood epilepsy.}, journal = {PLOS digital health}, volume = {1}, number = {12}, pages = {e0000161}, pmid = {36812648}, issn = {2767-3170}, abstract = {Our current understanding of human physiology and activities is largely derived from sparse and discrete individual clinical measurements. To achieve precise, proactive, and effective health management of an individual, longitudinal, and dense tracking of personal physiomes and activities is required, which is only feasible by utilizing wearable biosensors. As a pilot study, we implemented a cloud computing infrastructure to integrate wearable sensors, mobile computing, digital signal processing, and machine learning to improve early detection of seizure onsets in children. We recruited 99 children diagnosed with epilepsy and longitudinally tracked them at single-second resolution using a wearable wristband, and prospectively acquired more than one billion data points. This unique dataset offered us an opportunity to quantify physiological dynamics (e.g., heart rate, stress response) across age groups and to identify physiological irregularities upon epilepsy onset. The high-dimensional personal physiome and activity profiles displayed a clustering pattern anchored by patient age groups. These signatory patterns included strong age and sex-specific effects on varying circadian rhythms and stress responses across major childhood developmental stages. For each patient, we further compared the physiological and activity profiles associated with seizure onsets with the personal baseline and developed a machine learning framework to accurately capture these onset moments. The performance of this framework was further replicated in another independent patient cohort. We next referenced our predictions with the electroencephalogram (EEG) signals on selected patients and demonstrated that our approach could detect subtle seizures not recognized by humans and could detect seizures prior to clinical onset. Our work demonstrated the feasibility of a real-time mobile infrastructure in a clinical setting, which has the potential to be valuable in caring for epileptic patients. Extension of such a system has the potential to be leveraged as a health management device or longitudinal phenotyping tool in clinical cohort studies.}, } @article {pmid36812592, year = {2023}, author = {Tabata, K and Mihara, H and Nanjo, S and Motoo, I and Ando, T and Teramoto, A and Fujinami, H and Yasuda, I}, title = {Artificial intelligence model for analyzing colonic endoscopy images to detect changes associated with irritable bowel syndrome.}, journal = {PLOS digital health}, volume = {2}, number = {2}, pages = {e0000058}, doi = {10.1371/journal.pdig.0000058}, pmid = {36812592}, issn = {2767-3170}, abstract = {IBS is not considered to be an organic disease and usually shows no abnormality on lower gastrointestinal endoscopy, although biofilm formation, dysbiosis, and histological microinflammation have recently been reported in patients with IBS. In this study, we investigated whether an artificial intelligence (AI) colorectal image model can identify minute endoscopic changes, which cannot typically be detected by human investigators, that are associated with IBS. Study subjects were identified based on electronic medical records and categorized as IBS (Group I; n = 11), IBS with predominant constipation (IBS-C; Group C; n = 12), and IBS with predominant diarrhea (IBS-D; Group D; n = 12). The study subjects had no other diseases. Colonoscopy images from IBS patients and from asymptomatic healthy subjects (Group N; n = 88) were obtained. Google Cloud Platform AutoML Vision (single-label classification) was used to construct AI image models to calculate sensitivity, specificity, predictive value, and AUC. A total of 2479, 382, 538, and 484 images were randomly selected for Groups N, I, C and D, respectively. The AUC of the model discriminating between Group N and I was 0.95. Sensitivity, specificity, positive predictive value, and negative predictive value of Group I detection were 30.8%, 97.6%, 66.7%, and 90.2%, respectively. The overall AUC of the model discriminating between Groups N, C, and D was 0.83; sensitivity, specificity, and positive predictive value of Group N were 87.5%, 46.2%, and 79.9%, respectively. Using the image AI model, colonoscopy images of IBS could be discriminated from healthy subjects at AUC 0.95. Prospective studies are needed to further validate whether this externally validated model has similar diagnostic capabilities at other facilities and whether it can be used to determine treatment efficacy.}, } @article {pmid36805192, year = {2023}, author = {Brinkhaus, HO and Rajan, K and Schaub, J and Zielesny, A and Steinbeck, C}, title = {Open data and algorithms for open science in AI-driven molecular informatics.}, journal = {Current opinion in structural biology}, volume = {79}, number = {}, pages = {102542}, doi = {10.1016/j.sbi.2023.102542}, pmid = {36805192}, issn = {1879-033X}, abstract = {Recent years have seen a sharp increase in the development of deep learning and artificial intelligence-based molecular informatics. There has been a growing interest in applying deep learning to several subfields, including the digital transformation of synthetic chemistry, extraction of chemical information from the scientific literature, and AI in natural product-based drug discovery. The application of AI to molecular informatics is still constrained by the fact that most of the data used for training and testing deep learning models are not available as FAIR and open data. As open science practices continue to grow in popularity, initiatives which support FAIR and open data as well as open-source software have emerged. It is becoming increasingly important for researchers in the field of molecular informatics to embrace open science and to submit data and software in open repositories. With the advent of open-source deep learning frameworks and cloud computing platforms, academic researchers are now able to deploy and test their own deep learning models with ease. With the development of new and faster hardware for deep learning and the increasing number of initiatives towards digital research data management infrastructures, as well as a culture promoting open data, open source, and open science, AI-driven molecular informatics will continue to grow. This review examines the current state of open data and open algorithms in molecular informatics, as well as ways in which they could be improved in future.}, } @article {pmid36797269, year = {2023}, author = {Lall, A and Tallur, S}, title = {Deep reinforcement learning-based pairwise DNA sequence alignment method compatible with embedded edge devices.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {2773}, pmid = {36797269}, issn = {2045-2322}, mesh = {Sequence Alignment ; *Algorithms ; *Neural Networks, Computer ; Computers ; DNA ; }, abstract = {Sequence alignment is an essential component of bioinformatics, for identifying regions of similarity that may indicate functional, structural, or evolutionary relationships between the sequences. Genome-based diagnostics relying on DNA sequencing have benefited hugely from the boom in computing power in recent decades, particularly due to cloud-computing and the rise of graphics processing units (GPUs) and other advanced computing platforms for running advanced algorithms. Translating the success of such breakthroughs in diagnostics to affordable solutions for low-cost healthcare requires development of algorithms that can operate on the edge instead of in the cloud, using low-cost and low-power electronic systems such as microcontrollers and field programmable gate arrays (FPGAs). In this work, we present EdgeAlign, a deep reinforcement learning based method for performing pairwise DNA sequence alignment on stand-alone edge devices. EdgeAlign uses deep reinforcement learning to train a deep Q-network (DQN) agent for performing sequence alignment on fixed length sub-sequences, using a sliding window that is scanned over the length of the entire sequence. The hardware resource-consumption for implementing this scheme is thus independent of the lengths of the sequences to be aligned, and is further optimized using a novel AutoML based method for neural network model size reduction. Unlike other algorithms for sequence alignment reported in literature, the model demonstrated in this work is highly compact and deployed on two edge devices (NVIDIA Jetson Nano Developer Kit and Digilent Arty A7-100T, containing Xilinx XC7A35T Artix-7 FPGA) for demonstration of alignment for sequences from the publicly available Influenza sequences at the National Center for Biotechnology Information (NCBI) Virus Data Hub.}, } @article {pmid36793418, year = {2023}, author = {A, A and Dahan, F and Alroobaea, R and Alghamdi, WY and Mustafa Khaja Mohammed, and Hajjej, F and Deema Mohammed Alsekait, and Raahemifar, K}, title = {A smart IoMT based architecture for E-healthcare patient monitoring system using artificial intelligence algorithms.}, journal = {Frontiers in physiology}, volume = {14}, number = {}, pages = {1125952}, pmid = {36793418}, issn = {1664-042X}, abstract = {Generally, cloud computing is integrated with wireless sensor network to enable the monitoring systems and it improves the quality of service. The sensed patient data are monitored with biosensors without considering the patient datatype and this minimizes the work of hospitals and physicians. Wearable sensor devices and the Internet of Medical Things (IoMT) have changed the health service, resulting in faster monitoring, prediction, diagnosis, and treatment. Nevertheless, there have been difficulties that need to be resolved by the use of AI methods. The primary goal of this study is to introduce an AI-powered, IoMT telemedicine infrastructure for E-healthcare. In this paper, initially the data collection from the patient body is made using the sensed devices and the information are transmitted through the gateway/Wi-Fi and is stored in IoMT cloud repository. The stored information is then acquired, preprocessed to refine the collected data. The features from preprocessed data are extracted by means of high dimensional Linear Discriminant analysis (LDA) and the best optimal features are selected using reconfigured multi-objective cuckoo search algorithm (CSA). The prediction of abnormal/normal data is made by using Hybrid ResNet 18 and GoogleNet classifier (HRGC). The decision is then made whether to send alert to hospitals/healthcare personnel or not. If the expected results are satisfactory, the participant information is saved in the internet for later use. At last, the performance analysis is carried so as to validate the efficiency of proposed mechanism.}, } @article {pmid36789435, year = {2023}, author = {Camacho, C and Boratyn, GM and Joukov, V and Alvarez, RV and Madden, TL}, title = {ElasticBLAST: Accelerating Sequence Search via Cloud Computing.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, doi = {10.1101/2023.01.04.522777}, pmid = {36789435}, abstract = {BACKGROUND: Biomedical researchers use alignments produced by BLAST (Basic Local Alignment Search Tool) to categorize their query sequences. Producing such alignments is an essential bioinformatics task that is well suited for the cloud. The cloud can perform many calculations quickly as well as store and access large volumes of data. Bioinformaticians can also use it to collaborate with other researchers, sharing their results, datasets and even their pipelines on a common platform.

RESULTS: We present ElasticBLAST, a cloud native application to perform BLAST alignments in the cloud. ElasticBLAST can handle anywhere from a few to many thousands of queries and run the searches on thousands of virtual CPUs (if desired), deleting resources when it is done. It uses cloud native tools for orchestration and can request discounted instances, lowering cloud costs for users. It is supported on Amazon Web Services and Google Cloud Platform. It can search BLAST databases that are user provided or from the National Center for Biotechnology Information.

CONCLUSION: We show that ElasticBLAST is a useful application that can efficiently perform BLAST searches for the user in the cloud, demonstrating that with two examples. At the same time, it hides much of the complexity of working in the cloud, lowering the threshold to move work to the cloud.}, } @article {pmid36789367, year = {2023}, author = {Guo, YG and Yin, Q and Wang, Y and Xu, J and Zhu, L}, title = {Efficiency and optimization of government service resource allocation in a cloud computing environment.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {18}, pmid = {36789367}, issn = {2192-113X}, abstract = {According to the connotation and structure of government service resources, data of government service resources in L city from 2019 to 2021 are used to calculate the efficiency of government service resource allocation in each county and region in different periods, particularly by adding the government cloud platform and cloud computing resources to the government service resource data and applying the data envelopment analysis (DEA) method, which has practical significance for the development and innovation of government services. On this basis, patterns and evolutionary trends of government service resource allocation efficiency in each region during the study period are analyzed and discussed. Results are as follows. i) Overall efficiency level in the allocation of government service resources in L city is not high, showing an increasing annual trend among the high and low staggering. ii) Relative difference of allocation efficiency of government service resources is a common phenomenon of regional development, the existence and evolution of which are the direct or indirect influence and reflection of various aspects, such as economic strength and reform effort. iii) Data analysis for the specific points indicates that increased input does not necessarily lead to increased efficiency, some indicators have insufficient input or redundant output. Therefore, optimization of the physical, human, and financial resource allocation methods; and the intelligent online processing of government services achieved by the adoption of government cloud platform and cloud computing resources are the current objective choices to realize maximum efficiency in the allocation of government service resources.}, } @article {pmid36788990, year = {2023}, author = {Shrestha, S and Stapp, J and Taylor, M and Leach, R and Carreiro, S and Indic, P}, title = {Towards Device Agnostic Detection of Stress and Craving in Patients with Substance Use Disorder.}, journal = {Proceedings of the ... Annual Hawaii International Conference on System Sciences. Annual Hawaii International Conference on System Sciences}, volume = {2023}, number = {}, pages = {3156-3163}, pmid = {36788990}, issn = {1530-1605}, abstract = {Novel technologies have great potential to improve the treatment of individuals with substance use disorder (SUD) and to reduce the current high rate of relapse (i.e. return to drug use). Wearable sensor-based systems that continuously measure physiology can provide information about behavior and opportunities for real-time interventions. We have previously developed an mHealth system which includes a wearable sensor, a mobile phone app, and a cloud-based server with embedded machine learning algorithms which detect stress and craving. The system functions as a just-in-time intervention tool to help patients de-escalate and as a tool for clinicians to tailor treatment based on stress and craving patterns observed. However, in our pilot work we found that to deploy the system to diverse socioeconomic populations and to increase usability, the system must be able to work efficiently with cost-effective and popular commercial wearable devices. To make the system device agnostic, methods to transform the data from a commercially available wearable for use in algorithms developed from research grade wearable sensor are proposed. The accuracy of these transformations in detecting stress and craving in individuals with SUD is further explored.}, } @article {pmid36785195, year = {2023}, author = {Zhao, Y and Bu, JW and Liu, W and Ji, JH and Yang, QH and Lin, SF}, title = {Implementation of a full-color holographic system using RGB-D salient object detection and divided point cloud gridding.}, journal = {Optics express}, volume = {31}, number = {2}, pages = {1641-1655}, doi = {10.1364/OE.477666}, pmid = {36785195}, issn = {1094-4087}, abstract = {At present, a real objects-based full-color holographic system usually uses a digital single-lens reflex (DSLR) camera array or depth camera to collect data. It then relies on a spatial light modulator to modulate the input light source for the reconstruction of the 3-D scene of the real objects. However, the main challenges the high-quality holographic 3-D display faced were the limitation of generation speed and the low accuracy of the computer-generated holograms. This research generates more effective and accurate point cloud data by developing an RGB-D salient object detection model in the acquisition unit. In addition, a divided point cloud gridding method is proposed to enhance the computing speed of hologram generation. In the RGB channels, we categorized each object point into depth grids with identical depth values. The depth girds are divided into M × N parts, and only the effective parts will be calculated. Compared with traditional methods, the calculation time is dramatically reduced. The feasibility of our proposed approach is established through experiments.}, } @article {pmid36776787, year = {2023}, author = {Zahid, MA and Shafiq, B and Vaidya, J and Afzal, A and Shamail, S}, title = {Collaborative Business Process Fault Resolution in the Services Cloud.}, journal = {IEEE transactions on services computing}, volume = {16}, number = {1}, pages = {162-176}, doi = {10.1109/tsc.2021.3112525}, pmid = {36776787}, issn = {1939-1374}, support = {R01 GM118574/GM/NIGMS NIH HHS/United States ; R35 GM134927/GM/NIGMS NIH HHS/United States ; }, abstract = {The emergence of cloud and edge computing has enabled rapid development and deployment of Internet-centric distributed applications. There are many platforms and tools that can facilitate users to develop distributed business process (BP) applications by composing relevant service components in a plug and play manner. However, there is no guarantee that a BP application developed in this way is fault-free. In this paper, we formalize the problem of collaborative BP fault resolution which aims to utilize information from existing fault-free BPs that use similar services to resolve faults in a user developed BP. We present an approach based on association analysis of pairwise transformations between a faulty BP and existing BPs to identify the smallest possible set of transformations to resolve the fault(s) in the user developed BP. An extensive experimental evaluation over both synthetically generated faulty BPs and real BPs developed by users shows the effectiveness of our approach.}, } @article {pmid36772751, year = {2023}, author = {Gayathri, R and Usharani, S and Mahdal, M and Vezhavendhan, R and Vincent, R and Rajesh, M and Elangovan, M}, title = {Detection and Mitigation of IoT-Based Attacks Using SNMP and Moving Target Defense Techniques.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772751}, issn = {1424-8220}, abstract = {This paper proposes a solution for ensuring the security of IoT devices in the cloud environment by protecting against distributed denial-of-service (DDoS) and false data injection attacks. The proposed solution is based on the integration of simple network management protocol (SNMP), Kullback-Leibler distance (KLD), access control rules (ACL), and moving target defense (MTD) techniques. The SNMP and KLD techniques are used to detect DDoS and false data sharing attacks, while the ACL and MTD techniques are applied to mitigate these attacks by hardening the target and reducing the attack surface. The effectiveness of the proposed framework is validated through experimental simulations on the Amazon Web Service (AWS) platform, which shows a significant reduction in attack probabilities and delays. The integration of IoT and cloud technologies is a powerful combination that can deliver customized and critical solutions to major business vendors. However, ensuring the confidentiality and security of data among IoT devices, storage, and access to the cloud is crucial to maintaining trust among internet users. This paper demonstrates the importance of implementing robust security measures to protect IoT devices in the cloud environment and highlights the potential of the proposed solution in protecting against DDoS and false data injection attacks.}, } @article {pmid36772680, year = {2023}, author = {Bourechak, A and Zedadra, O and Kouahla, MN and Guerrieri, A and Seridi, H and Fortino, G}, title = {At the Confluence of Artificial Intelligence and Edge Computing in IoT-Based Applications: A Review and New Perspectives.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, doi = {10.3390/s23031639}, pmid = {36772680}, issn = {1424-8220}, abstract = {Given its advantages in low latency, fast response, context-aware services, mobility, and privacy preservation, edge computing has emerged as the key support for intelligent applications and 5G/6G Internet of things (IoT) networks. This technology extends the cloud by providing intermediate services at the edge of the network and improving the quality of service for latency-sensitive applications. Many AI-based solutions with machine learning, deep learning, and swarm intelligence have exhibited the high potential to perform intelligent cognitive sensing, intelligent network management, big data analytics, and security enhancement for edge-based smart applications. Despite its many benefits, there are still concerns about the required capabilities of intelligent edge computing to deal with the computational complexity of machine learning techniques for big IoT data analytics. Resource constraints of edge computing, distributed computing, efficient orchestration, and synchronization of resources are all factors that require attention for quality of service improvement and cost-effective development of edge-based smart applications. In this context, this paper aims to explore the confluence of AI and edge in many application domains in order to leverage the potential of the existing research around these factors and identify new perspectives. The confluence of edge computing and AI improves the quality of user experience in emergency situations, such as in the Internet of vehicles, where critical inaccuracies or delays can lead to damage and accidents. These are the same factors that most studies have used to evaluate the success of an edge-based application. In this review, we first provide an in-depth analysis of the state of the art of AI in edge-based applications with a focus on eight application areas: smart agriculture, smart environment, smart grid, smart healthcare, smart industry, smart education, smart transportation, and security and privacy. Then, we present a qualitative comparison that emphasizes the main objective of the confluence, the roles and the use of artificial intelligence at the network edge, and the key enabling technologies for edge analytics. Then, open challenges, future research directions, and perspectives are identified and discussed. Finally, some conclusions are drawn.}, } @article {pmid36772662, year = {2023}, author = {Witanto, EN and Stanley, B and Lee, SG}, title = {Distributed Data Integrity Verification Scheme in Multi-Cloud Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, doi = {10.3390/s23031623}, pmid = {36772662}, issn = {1424-8220}, abstract = {Most existing data integrity auditing protocols in cloud storage rely on proof of probabilistic data possession. Consequently, the sampling rate of data integrity verification is low to prevent expensive costs to the auditor. However, in the case of a multi-cloud environment, the amount of stored data will be huge. As a result, a higher sampling rate is needed. It will also have an increased cost for the auditor as a consequence. Therefore, this paper proposes a blockchain-based distributed data integrity verification protocol in multi-cloud environments that enables data verification using multi-verifiers. The proposed scheme aims to increase the sampling rate of data verification without increasing the costs significantly. The performance analysis shows that this protocol achieved a lower time consumption required for verification tasks using multi-verifiers than a single verifier. Furthermore, utilizing multi-verifiers also decreases each verifier's computation and communication costs.}, } @article {pmid36772584, year = {2023}, author = {Alexandrescu, A}, title = {Parallel Processing of Sensor Data in a Distributed Rules Engine Environment through Clustering and Data Flow Reconfiguration.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, doi = {10.3390/s23031543}, pmid = {36772584}, issn = {1424-8220}, abstract = {An emerging reality is the development of smart buildings and cities, which improve residents' comfort. These environments employ multiple sensor networks, whose data must be acquired and processed in real time by multiple rule engines, which trigger events that enable specific actuators. The problem is how to handle those data in a scalable manner by using multiple processing instances to maximize the system throughput. This paper considers the types of sensors that are used in these scenarios and proposes a model for abstracting the information flow as a weighted dependency graph. Two parallel computing methods are then proposed for obtaining an efficient data flow: a variation of the parallel k-means clustering algorithm and a custom genetic algorithm. Simulation results show that the two proposed flow reconfiguration algorithms reduce the rule processing times and provide an efficient solution for increasing the scalability of the considered environment. Another aspect being discussed is using an open-source cloud solution to manage the system and how to use the two algorithms to increase efficiency. These methods allow for a seamless increase in the number of sensors in the environment by making smart use of the available resources.}, } @article {pmid36772562, year = {2023}, author = {Kim, SH and Kim, T}, title = {Local Scheduling in KubeEdge-Based Edge Computing Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, doi = {10.3390/s23031522}, pmid = {36772562}, issn = {1424-8220}, abstract = {KubeEdge is an open-source platform that orchestrates containerized Internet of Things (IoT) application services in IoT edge computing environments. Based on Kubernetes, it supports heterogeneous IoT device protocols on edge nodes and provides various functions necessary to build edge computing infrastructure, such as network management between cloud and edge nodes. However, the resulting cloud-based systems are subject to several limitations. In this study, we evaluated the performance of KubeEdge in terms of the computational resource distribution and delay between edge nodes. We found that forwarding traffic between edge nodes degrades the throughput of clusters and causes service delay in edge computing environments. Based on these results, we proposed a local scheduling scheme that handles user traffic locally at each edge node. The performance evaluation results revealed that local scheduling outperforms the existing load-balancing algorithm in the edge computing environment.}, } @article {pmid36772506, year = {2023}, author = {Wang, M and Li, C and Wang, X and Piao, Z and Yang, Y and Dai, W and Zhang, Q}, title = {Research on Comprehensive Evaluation and Early Warning of Transmission Lines' Operation Status Based on Dynamic Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, doi = {10.3390/s23031469}, pmid = {36772506}, issn = {1424-8220}, abstract = {The current methods for evaluating the operating condition of electricity transmission lines (ETLs) and providing early warning have several problems, such as the low correlation of data, ignoring the influence of seasonal factors, and strong subjectivity. This paper analyses the sensitive factors that influence dynamic key evaluation indices such as grounding resistance, sag, and wire corrosion, establishes the evaluation criteria of the ETL operation state, and proposes five ETL status levels and seven principles for selecting evaluation indices. Nine grade I evaluation indices and twenty-nine grade II evaluation indices, including passageway and meteorological environments, are determined. The cloud model theory is embedded and used to propose a warning technology for the operation state of ETLs based on inspection defect parameters and the cloud model. Combined with the inspection defect parameters of a line in the Baicheng district of Jilin Province and the critical evaluation index data such as grounding resistance, sag, and wire corrosion, which are used to calculate the timeliness of the data, the solid line is evaluated. The research shows that the dynamic evaluation model is correct and that the ETL status evaluation and early warning method have reasonable practicability.}, } @article {pmid36772424, year = {2023}, author = {Mangalampalli, S and Karri, GR and Elngar, AA}, title = {An Efficient Trust-Aware Task Scheduling Algorithm in Cloud Computing Using Firefly Optimization.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, doi = {10.3390/s23031384}, pmid = {36772424}, issn = {1424-8220}, abstract = {Task scheduling in the cloud computing paradigm poses a challenge for researchers as the workloads that come onto cloud platforms are dynamic and heterogeneous. Therefore, scheduling these heterogeneous tasks to the appropriate virtual resources is a huge challenge. The inappropriate assignment of tasks to virtual resources leads to the degradation of the quality of services and thereby leads to a violation of the SLA metrics, ultimately leading to the degradation of trust in the cloud provider by the cloud user. Therefore, to preserve trust in the cloud provider and to improve the scheduling process in the cloud paradigm, we propose an efficient task scheduling algorithm that considers the priorities of tasks as well as virtual machines, thereby scheduling tasks accurately to appropriate VMs. This scheduling algorithm is modeled using firefly optimization. The workload for this approach is considered by using fabricated datasets with different distributions and the real-time worklogs of HPC2N and NASA were considered. This algorithm was implemented by using a Cloudsim simulation environment and, finally, our proposed approach is compared over the baseline approaches of ACO, PSO, and the GA. The simulation results revealed that our proposed approach has shown a significant impact over the baseline approaches by minimizing the makespan, availability, success rate, and turnaround efficiency.}, } @article {pmid36772335, year = {2023}, author = {Markus, A and Al-Haboobi, A and Kecskemeti, G and Kertesz, A}, title = {Simulating IoT Workflows in DISSECT-CF-Fog.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, doi = {10.3390/s23031294}, pmid = {36772335}, issn = {1424-8220}, abstract = {The modelling of IoT applications utilising the resources of cloud and fog computing is not straightforward because they have to support various trigger-based events that make human life easier. The sequence of tasks, such as performing a service call, receiving a data packet in the form of a message sent by an IoT device, and managing actuators or executing a computational task on a virtual machine, are often associated with and composed of IoT workflows. The development and deployment of such IoT workflows and their management systems in real life, including communication and network operations, can be complicated due to high operation costs and access limitations. Therefore, simulation solutions are often applied for such purposes. In this paper, we introduce a novel simulator extension of the DISSECT-CF-Fog simulator that leverages the workflow scheduling and its execution capabilities to model real-life IoT use cases. We also show that state-of-the-art simulators typically omit the IoT factor in the case of the scientific workflow evaluation. Therefore, we present a scalability study focusing on scientific workflows and on the interoperability of scientific and IoT workflows in DISSECT-CF-Fog.}, } @article {pmid36772304, year = {2023}, author = {Yu, L and He, M and Liang, H and Xiong, L and Liu, Y}, title = {A Blockchain-Based Authentication and Authorization Scheme for Distributed Mobile Cloud Computing Services.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, doi = {10.3390/s23031264}, pmid = {36772304}, issn = {1424-8220}, abstract = {Authentication and authorization constitute the essential security component, access control, for preventing unauthorized access to cloud services in mobile cloud computing (MCC) environments. Traditional centralized access control models relying on third party trust face a critical challenge due to a high trust cost and single point of failure. Blockchain can achieve the distributed trust for access control designs in a mutual untrustworthy scenario, but it also leads to expensive storage overhead. Considering the above issues, this work constructed an authentication and authorization scheme based on blockchain that can provide a dynamic update of access permissions by utilizing the smart contract. Compared with the conventional authentication scheme, the proposed scheme integrates an extra authorization function without additional computation and communication costs in the authentication phase. To improve the storage efficiency and system scalability, only one transaction is required to be stored in blockchain to record a user's access privileges on different service providers (SPs). In addition, mobile users in the proposed scheme are able to register with an arbitrary SP once and then utilize the same credential to access different SPs with different access levels. The security analysis indicates that the proposed scheme is secure under the random oracle model. The performance analysis clearly shows that the proposed scheme possesses superior computation and communication efficiencies and requires a low blockchain storage capacity for accomplishing user registration and updates.}, } @article {pmid36772101, year = {2023}, author = {Yang, J and Zheng, J and Wang, H and Li, J and Sun, H and Han, W and Jiang, N and Tan, YA}, title = {Edge-Cloud Collaborative Defense against Backdoor Attacks in Federated Learning.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, doi = {10.3390/s23031052}, pmid = {36772101}, issn = {1424-8220}, abstract = {Federated learning has a distributed collaborative training mode, widely used in IoT scenarios of edge computing intelligent services. However, federated learning is vulnerable to malicious attacks, mainly backdoor attacks. Once an edge node implements a backdoor attack, the embedded backdoor mode will rapidly expand to all relevant edge nodes, which poses a considerable challenge to security-sensitive edge computing intelligent services. In the traditional edge collaborative backdoor defense method, only the cloud server is trusted by default. However, edge computing intelligent services have limited bandwidth and unstable network connections, which make it impossible for edge devices to retrain their models or update the global model. Therefore, it is crucial to detect whether the data of edge nodes are polluted in time. This paper proposes a layered defense framework for edge-computing intelligent services. At the edge, we combine the gradient rising strategy and attention self-distillation mechanism to maximize the correlation between edge device data and edge object categories and train a clean model as much as possible. On the server side, we first implement a two-layer backdoor detection mechanism to eliminate backdoor updates and use the attention self-distillation mechanism to restore the model performance. Our results show that the two-stage defense mode is more suitable for the security protection of edge computing intelligent services. It can not only weaken the effectiveness of the backdoor at the edge end but also conduct this defense at the server end, making the model more secure. The precision of our model on the main task is almost the same as that of the clean model.}, } @article {pmid36770943, year = {2023}, author = {Kumar, A and Arantes, PR and Saha, A and Palermo, G and Wong, BM}, title = {GPU-Enhanced DFTB Metadynamics for Efficiently Predicting Free Energies of Biochemical Systems.}, journal = {Molecules (Basel, Switzerland)}, volume = {28}, number = {3}, pages = {}, doi = {10.3390/molecules28031277}, pmid = {36770943}, issn = {1420-3049}, support = {R01GM141329/NH/NIH HHS/United States ; }, abstract = {Metadynamics calculations of large chemical systems with ab initio methods are computationally prohibitive due to the extensive sampling required to simulate the large degrees of freedom in these systems. To address this computational bottleneck, we utilized a GPU-enhanced density functional tight binding (DFTB) approach on a massively parallelized cloud computing platform to efficiently calculate the thermodynamics and metadynamics of biochemical systems. To first validate our approach, we calculated the free-energy surfaces of alanine dipeptide and showed that our GPU-enhanced DFTB calculations qualitatively agree with computationally-intensive hybrid DFT benchmarks, whereas classical force fields give significant errors. Most importantly, we show that our GPU-accelerated DFTB calculations are significantly faster than previous approaches by up to two orders of magnitude. To further extend our GPU-enhanced DFTB approach, we also carried out a 10 ns metadynamics simulation of remdesivir, which is prohibitively out of reach for routine DFT-based metadynamics calculations. We find that the free-energy surfaces of remdesivir obtained from DFTB and classical force fields differ significantly, where the latter overestimates the internal energy contribution of high free-energy states. Taken together, our benchmark tests, analyses, and extensions to large biochemical systems highlight the use of GPU-enhanced DFTB simulations for efficiently predicting the free-energy surfaces/thermodynamics of large biochemical systems.}, } @article {pmid36768346, year = {2023}, author = {Sarkar, C and Das, B and Rawat, VS and Wahlang, JB and Nongpiur, A and Tiewsoh, I and Lyngdoh, NM and Das, D and Bidarolli, M and Sony, HT}, title = {Artificial Intelligence and Machine Learning Technology Driven Modern Drug Discovery and Development.}, journal = {International journal of molecular sciences}, volume = {24}, number = {3}, pages = {}, doi = {10.3390/ijms24032026}, pmid = {36768346}, issn = {1422-0067}, abstract = {The discovery and advances of medicines may be considered as the ultimate relevant translational science effort that adds to human invulnerability and happiness. But advancing a fresh medication is a quite convoluted, costly, and protracted operation, normally costing USD ~2.6 billion and consuming a mean time span of 12 years. Methods to cut back expenditure and hasten new drug discovery have prompted an arduous and compelling brainstorming exercise in the pharmaceutical industry. The engagement of Artificial Intelligence (AI), including the deep-learning (DL) component in particular, has been facilitated by the employment of classified big data, in concert with strikingly reinforced computing prowess and cloud storage, across all fields. AI has energized computer-facilitated drug discovery. An unrestricted espousing of machine learning (ML), especially DL, in many scientific specialties, and the technological refinements in computing hardware and software, in concert with various aspects of the problem, sustain this progress. ML algorithms have been extensively engaged for computer-facilitated drug discovery. DL methods, such as artificial neural networks (ANNs) comprising multiple buried processing layers, have of late seen a resurgence due to their capability to power automatic attribute elicitations from the input data, coupled with their ability to obtain nonlinear input-output pertinencies. Such features of DL methods augment classical ML techniques which bank on human-contrived molecular descriptors. A major part of the early reluctance concerning utility of AI in pharmaceutical discovery has begun to melt, thereby advancing medicinal chemistry. AI, along with modern experimental technical knowledge, is anticipated to invigorate the quest for new and improved pharmaceuticals in an expeditious, economical, and increasingly compelling manner. DL-facilitated methods have just initiated kickstarting for some integral issues in drug discovery. Many technological advances, such as "message-passing paradigms", "spatial-symmetry-preserving networks", "hybrid de novo design", and other ingenious ML exemplars, will definitely come to be pervasively widespread and help dissect many of the biggest, and most intriguing inquiries. Open data allocation and model augmentation will exert a decisive hold during the progress of drug discovery employing AI. This review will address the impending utilizations of AI to refine and bolster the drug discovery operation.}, } @article {pmid36763944, year = {2023}, author = {Shahinyan, GK and Hu, MY and Jiang, T and Osadchiy, V and Sigalos, JT and Mills, JN and Kachroo, N and Eleswarapu, SV}, title = {Cannabis and male sexual health: contemporary qualitative review and insight into perspectives of young men on the internet.}, journal = {Sexual medicine reviews}, volume = {}, number = {}, pages = {}, doi = {10.1093/sxmrev/qeac010}, pmid = {36763944}, issn = {2050-0521}, abstract = {INTRODUCTION: Cannabis use is increasing across the United States, yet its short- and long-term effects on sexual function remain controversial. Currently, there is a paucity of studies exploring the relationship between cannabis and men's health.

OBJECTIVES: To summarize the available literature on cannabis and men's health and provide insight into lay perceptions of this topic.

METHODS: We performed a qualitative PubMed review of the existing literature on cannabis and men's health according to the PRISMA guidelines. Separately, we analyzed relevant themes in online men's health forums. We utilized a Google cloud-based platform (BigQuery) to extract relevant posts from 5 men's health Reddit forums from August 2018 to August 2019. We conducted a qualitative thematic analysis of the posts and quantitatively analyzed them using natural language processing and a meaning extraction method with principal component analysis.

RESULTS: Our literature review revealed a mix of animal and human studies demonstrating the negative effects of cannabis on semen parameters and varying effects on erectile function and hormone levels. In our analysis of 372 686 Reddit posts, 1190 (0.3%) included relevant discussion on cannabis and men's health. An overall 272 posts were manually analyzed, showing that online discussions revolve around seeking answers and sharing the effects of cannabis on various aspects of sexual health and quality of life, often with conflicting experiences. Quantitative analysis revealed 1 thematic cluster related to cannabis, insecurity, and mental/physical health.

CONCLUSIONS: There is a limited number of quality human studies investigating the effects of cannabis on men's health. Men online are uncertain about how cannabis affects their sexual health and seek more information. As the prevalence of cannabis use increases, so does the need for research in this area.}, } @article {pmid36761837, year = {2022}, author = {Tercan, B and Qin, G and Kim, TK and Aguilar, B and Phan, J and Longabaugh, W and Pot, D and Kemp, CJ and Chambwe, N and Shmulevich, I}, title = {SL-Cloud: A Cloud-based resource to support synthetic lethal interaction discovery.}, journal = {F1000Research}, volume = {11}, number = {}, pages = {493}, doi = {10.12688/f1000research.110903.1}, pmid = {36761837}, issn = {2046-1402}, abstract = {Synthetic lethal interactions (SLIs), genetic interactions in which the simultaneous inactivation of two genes leads to a lethal phenotype, are promising targets for therapeutic intervention in cancer, as exemplified by the recent success of PARP inhibitors in treating BRCA1/2-deficient tumors. We present SL-Cloud, a new component of the Institute for Systems Biology Cancer Gateway in the Cloud (ISB-CGC), that provides an integrated framework of cloud-hosted data resources and curated workflows to enable facile prediction of SLIs. This resource addresses two main challenges related to SLI inference: the need to wrangle and preprocess large multi-omic datasets and the availability of multiple comparable prediction approaches. SL-Cloud enables customizable computational inference of SLIs and testing of prediction approaches across multiple datasets. We anticipate that cancer researchers will find utility in this tool for discovery of SLIs to support further investigation into potential drug targets for anticancer therapies.}, } @article {pmid36757918, year = {2023}, author = {Pollak, DJ and Chawla, G and Andreev, A and Prober, DA}, title = {First steps into the cloud: Using Amazon data storage and computing with Python notebooks.}, journal = {PloS one}, volume = {18}, number = {2}, pages = {e0278316}, pmid = {36757918}, issn = {1932-6203}, abstract = {With the oncoming age of big data, biologists are encountering more use cases for cloud-based computing to streamline data processing and storage. Unfortunately, cloud platforms are difficult to learn, and there are few resources for biologists to demystify them. We have developed a guide for experimental biologists to set up cloud processing on Amazon Web Services to cheaply outsource data processing and storage. Here we provide a guide for setting up a computing environment in the cloud and showcase examples of using Python and Julia programming languages. We present example calcium imaging data in the zebrafish brain and corresponding analysis using suite2p software. Tools for budget and user management are further discussed in the attached protocol. Using this guide, researchers with limited coding experience can get started with cloud-based computing or move existing coding infrastructure into the cloud environment.}, } @article {pmid36754821, year = {2023}, author = {Bosia, F and Zheng, P and Vaucher, A and Weymuth, T and Dral, PO and Reiher, M}, title = {Ultra-fast semi-empirical quantum chemistry for high-throughput computational campaigns with Sparrow.}, journal = {The Journal of chemical physics}, volume = {158}, number = {5}, pages = {054118}, doi = {10.1063/5.0136404}, pmid = {36754821}, issn = {1089-7690}, abstract = {Semi-empirical quantum chemical approaches are known to compromise accuracy for the feasibility of calculations on huge molecules. However, the need for ultrafast calculations in interactive quantum mechanical studies, high-throughput virtual screening, and data-driven machine learning has shifted the emphasis toward calculation runtimes recently. This comes with new constraints for the software implementation as many fast calculations would suffer from a large overhead of the manual setup and other procedures that are comparatively fast when studying a single molecular structure, but which become prohibitively slow for high-throughput demands. In this work, we discuss the effect of various well-established semi-empirical approximations on calculation speed and relate this to data transfer rates from the raw-data source computer to the results of the visualization front end. For the former, we consider desktop computers, local high performance computing, and remote cloud services in order to elucidate the effect on interactive calculations, for web and cloud interfaces in local applications, and in world-wide interactive virtual sessions. The models discussed in this work have been implemented into our open-source software SCINE Sparrow.}, } @article {pmid36753980, year = {2023}, author = {Cubillos, LH and Augenstein, TE and Ranganathan, R and Krishnan, C}, title = {Breaking the barriers to designing online experiments: A novel open-source platform for supporting procedural skill learning experiments.}, journal = {Computers in biology and medicine}, volume = {154}, number = {}, pages = {106627}, doi = {10.1016/j.compbiomed.2023.106627}, pmid = {36753980}, issn = {1879-0534}, abstract = {BACKGROUND: Motor learning experiments are typically performed in laboratory environments, which can be time-consuming and require dedicated equipment/personnel, thus limiting the ability to gather data from large samples. To address this problem, some researchers have transitioned to unsupervised online experiments, showing advantages in participant recruitment without losing validity. However, most online platforms require coding experience or time-consuming setups to create and run experiments, limiting their usage across the field.

METHOD: To tackle this issue, an open-source web-based platform was developed (https://experiments.neurro-lab.engin.umich.edu/) to create, run, and manage procedural skill learning experiments without coding or setup requirements. The feasibility of the platform and the comparability of the results between supervised (n = 17) and unsupervised (n = 24) were tested in 41 naive right-handed participants using an established sequential finger tapping task. The study also tested if a previously reported rapid form of offline consolidation (i.e., microscale learning) in procedural skill learning could be replicated with the developed platform and evaluated the extent of interlimb transfer associated with the finger tapping task.

RESULTS: The results indicated that the performance metrics were comparable between the supervised and unsupervised groups (all p's > 0.05). The learning curves, mean tapping speeds, and micro-scale learning were similar to previous studies. Training led to significant improvements in mean tapping speed (2.22 ± 1.48 keypresses/s, p < 0.001) and a significant interlimb transfer of learning (1.22 ± 1.43 keypresses/s, p < 0.05).

CONCLUSIONS: The results show that the presented platform may serve as a valuable tool for conducting online procedural skill-learning experiments.}, } @article {pmid36750410, year = {2022}, author = {Raucci, U and Weir, H and Sakshuwong, S and Seritan, S and Hicks, CB and Vannucci, F and Rea, F and Martínez, TJ}, title = {Interactive Quantum Chemistry Enabled by Machine Learning, Graphical Processing Units, and Cloud Computing.}, journal = {Annual review of physical chemistry}, volume = {}, number = {}, pages = {}, doi = {10.1146/annurev-physchem-061020-053438}, pmid = {36750410}, issn = {1545-1593}, abstract = {Modern quantum chemistry algorithms are increasingly able to accurately predict molecular properties that are useful for chemists in research and education. Despite this progress, performing such calculations is currently unattainable to the wider chemistry community, as they often require domain expertise, computer programming skills, and powerful computer hardware. In this review, we outline methods to eliminate these barriers using cutting-edge technologies. We discuss the ingredients needed to create accessible platforms that can compute quantum chemistry properties in real time, including graphical processing units-accelerated quantum chemistry in the cloud, artificial intelligence-driven natural molecule input methods, and extended reality visualization. We end by highlighting a series of exciting applications that assemble these components to create uniquely interactive platforms for computing and visualizing spectra, 3D structures, molecular orbitals, and many other chemical properties. Expected final online publication date for the Annual Review of Physical Chemistry, Volume 74 is April 2023. Please see http://www.annualreviews.org/page/journal/pubdates for revised estimates.}, } @article {pmid36747613, year = {2023}, author = {Koenig, Z and Yohannes, MT and Nkambule, LL and Goodrich, JK and Kim, HA and Zhao, X and Wilson, MW and Tiao, G and Hao, SP and Sahakian, N and Chao, KR and , and Talkowski, ME and Daly, MJ and Brand, H and Karczewski, KJ and Atkinson, EG and Martin, AR}, title = {A harmonized public resource of deeply sequenced diverse human genomes.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, doi = {10.1101/2023.01.23.525248}, pmid = {36747613}, abstract = {Underrepresented populations are often excluded from genomic studies due in part to a lack of resources supporting their analysis. The 1000 Genomes Project (1kGP) and Human Genome Diversity Project (HGDP), which have recently been sequenced to high coverage, are valuable genomic resources because of the global diversity they capture and their open data sharing policies. Here, we harmonized a high quality set of 4,096 whole genomes from HGDP and 1kGP with data from gnomAD and identified over 155 million high-quality SNVs, indels, and SVs. We performed a detailed ancestry analysis of this cohort, characterizing population structure and patterns of admixture across populations, analyzing site frequency spectra, and measuring variant counts at global and subcontinental levels. We also demonstrate substantial added value from this dataset compared to the prior versions of the component resources, typically combined via liftover and variant intersection; for example, we catalog millions of new genetic variants, mostly rare, compared to previous releases. In addition to unrestricted individual-level public release, we provide detailed tutorials for conducting many of the most common quality control steps and analyses with these data in a scalable cloud-computing environment and publicly release this new phased joint callset for use as a haplotype resource in phasing and imputation pipelines. This jointly called reference panel will serve as a key resource to support research of diverse ancestry populations.}, } @article {pmid36733938, year = {2023}, author = {Healthcare Engineering, JO}, title = {Retracted: Discussion on Health Service System of Mobile Medical Institutions Based on Internet of Things and Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2023}, number = {}, pages = {9892481}, pmid = {36733938}, issn = {2040-2309}, abstract = {[This retracts the article DOI: 10.1155/2022/5235349.].}, } @article {pmid36723167, year = {2023}, author = {Klukowski, P and Riek, R and Güntert, P}, title = {NMRtist: an online platform for automated biomolecular NMR spectra analysis.}, journal = {Bioinformatics (Oxford, England)}, volume = {}, number = {}, pages = {}, doi = {10.1093/bioinformatics/btad066}, pmid = {36723167}, issn = {1367-4811}, abstract = {UNLABELLED: We present NMRtist, an online platform that combines deep learning, large-scale optimization, and cloud computing to automate protein NMR spectra analysis. Our website provides virtual storage for NMR spectra deposition together with a set of applications designed for automated peak picking, chemical shift assignment, and protein structure determination. The system can be used by non-experts and allows protein assignments and structures to be determined within hours after the measurements, strictly without any human intervention.

AVAILABILITY: NMRtist is freely available to non-commercial users at https://nmrtist.org.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid36721327, year = {2023}, author = {Batorsky, A and Bowden, AE and Darwin, J and Fields, AJ and Greco, CM and Harris, RE and Hue, TF and Kakyomya, J and Mehling, W and O'Neill, C and Patterson, CG and Piva, SR and Sollmann, N and Toups, V and Wasan, AD and Wasserman, R and Williams, DA and Vo, NV and Psioda, MA and McCumber, M}, title = {The BACPAC Research Program Data Harmonization: Rationale for Data Elements and Standards.}, journal = {Pain medicine (Malden, Mass.)}, volume = {}, number = {}, pages = {}, doi = {10.1093/pm/pnad008}, pmid = {36721327}, issn = {1526-4637}, abstract = {OBJECTIVE: One aim of the Back Pain Consortium (BACPAC) Research Program is to develop an integrated model of chronic low back pain that is informed by combined data from translational research and clinical trials. We describe efforts to maximize data harmonization and accessibility to facilitate Consortium-wide analyses.

METHODS: Consortium-wide working groups established harmonized data elements to be collected in all studies and developed standards for tabular and non-tabular data (e.g., imaging and omics). The BACPAC Data Portal was developed to facilitate research collaboration across the Consortium.

RESULTS: Clinical experts developed the BACPAC Minimum Dataset with required domains and outcome measures to be collected using questionnaires across projects. Other non-required domain-specific measures are collected by multiple studies. To optimize cross-study analyses, a modified data standard was developed based on the Clinical Data Interchange Standards Consortium Study Data Tabulation Model to harmonize data structures and facilitate integration of baseline characteristics, participant-reported outcomes, chronic low back pain treatments, clinical exam, functional performance, psychosocial characteristics, quantitative sensory testing, imaging and biomechanical data. Standards to accommodate the unique features of chronic low back pain data were adopted. Research units submit standardized study data to the BACPAC Data Portal, developed as a secure cloud-based central data repository and computing infrastructure for researchers to access and conduct analyses on data collected by or acquired for BACPAC.

CONCLUSIONS: BACPAC harmonization efforts and data standards serve as an innovative model for data integration that could be used as a framework for other consortia with multiple, decentralized research programs.}, } @article {pmid36720454, year = {2023}, author = {Firoz, A and Ravanan, P and Saha, P and Prashar, T and Talwar, P}, title = {Genome-wide screening and identification of potential kinases involved in endoplasmic reticulum stress responses.}, journal = {Life sciences}, volume = {}, number = {}, pages = {121452}, doi = {10.1016/j.lfs.2023.121452}, pmid = {36720454}, issn = {1879-0631}, abstract = {AIM: This study aims to identify endoplasmic reticulum stress response elements (ERSE) in the human genome to explore potentially regulated genes, including kinases and transcription factors, involved in the endoplasmic reticulum (ER) stress and its related diseases.

MATERIALS AND METHODS: Python-based whole genome screening of ERSE was performed using the Amazon Web Services elastic computing system. The Kinome database was used to filter out the kinases from the extracted list of ERSE-related genes. Additionally, network analysis and genome enrichment were achieved using NDEx, the Network and Data Exchange software, and web-based computational tools. To validate the gene expression, quantitative RT-PCR was performed for selected kinases from the list by exposing the HeLa cells to tunicamycin, an ER stress inducer, for various time points.

KEY FINDINGS: The overall number of ERSE-associated genes follows a similar pattern in humans, mice, and rats, demonstrating the ERSE's conservation in mammals. A total of 2705 ERSE sequences were discovered in the human genome (GRCh38.p14), from which we identified 36 kinases encoding genes. Gene expression analysis has shown a significant change in the expression of selected genes under ER stress conditions in HeLa cells, supporting our finding.

SIGNIFICANCE: In this study, we have introduced a rapid method using Amazon cloud-based services for genome-wide screening of ERSE sequences from both positive and negative strands, which covers the entire genome reference sequences. Approximately 10 % of human protein-protein interactomes were found to be associated with ERSE-related genes. Our study also provides a rich resource of human ER stress-response-based protein networks and transcription factor interactions and a reference point for future research aiming at targeted therapeutics.}, } @article {pmid36717471, year = {2023}, author = {Nandasena, WDKV and Brabyn, L and Serrao-Neumann, S}, title = {Monitoring invasive pines using remote sensing: a case study from Sri Lanka.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {2}, pages = {347}, pmid = {36717471}, issn = {1573-2959}, abstract = {Production plantation forestry has many economic benefits but can also have negative environmental impacts such as the spreading of invasive pines to native forest habitats. Monitoring forest for the presence of invasive pines helps with the management of this issue. However, detection of vegetation change over a large time period is difficult due to changes in image quality and sensor types, and by the spectral similarity of evergreen species and frequent cloud cover in the study area. The costs of high-resolution images are also prohibitive for routine monitoring in resource-constrained countries. This research investigated the use of remote sensing to identify the spread of Pinus caribaea over a 21-year period (2000 to 2021) in Belihuloya, Sri Lanka, using Landsat images. It applied a range of techniques to produce cloud free images, extract vegetation features, and improve vegetation classification accuracy, followed by the use of Geographical Information System to spatially analyze the spread of invasive pines. The results showed most invading pines were found within 100 m of the pine plantations' borders where broadleaved forests and grasslands are vulnerable to invasion. However, the extent of invasive pine had an overall decline of 4 ha over the 21 years. The study confirmed that remote sensing combined with spatial analysis are effective tools for monitoring invasive pines in countries with limited resources. This study also provides information to conservationists and forest managers to conduct strategic planning for sustainable forest management and conservation in Sri Lanka.}, } @article {pmid33822891, year = {2021}, author = {Arisdakessian, CG and Nigro, OD and Steward, GF and Poisson, G and Belcaid, M}, title = {CoCoNet: an efficient deep learning tool for viral metagenome binning.}, journal = {Bioinformatics (Oxford, England)}, volume = {37}, number = {18}, pages = {2803-2810}, doi = {10.1093/bioinformatics/btab213}, pmid = {33822891}, issn = {1367-4811}, abstract = {MOTIVATION: Metagenomic approaches hold the potential to characterize microbial communities and unravel the intricate link between the microbiome and biological processes. Assembly is one of the most critical steps in metagenomics experiments. It consists of transforming overlapping DNA sequencing reads into sufficiently accurate representations of the community's genomes. This process is computationally difficult and commonly results in genomes fragmented across many contigs. Computational binning methods are used to mitigate fragmentation by partitioning contigs based on their sequence composition, abundance or chromosome organization into bins representing the community's genomes. Existing binning methods have been principally tuned for bacterial genomes and do not perform favorably on viral metagenomes.

RESULTS: We propose Composition and Coverage Network (CoCoNet), a new binning method for viral metagenomes that leverages the flexibility and the effectiveness of deep learning to model the co-occurrence of contigs belonging to the same viral genome and provide a rigorous framework for binning viral contigs. Our results show that CoCoNet substantially outperforms existing binning methods on viral datasets.

CoCoNet was implemented in Python and is available for download on PyPi (https://pypi.org/). The source code is hosted on GitHub at https://github.com/Puumanamana/CoCoNet and the documentation is available at https://coconet.readthedocs.io/en/latest/index.html. CoCoNet does not require extensive resources to run. For example, binning 100k contigs took about 4 h on 10 Intel CPU Cores (2.4 GHz), with a memory peak at 27 GB (see Supplementary Fig. S9). To process a large dataset, CoCoNet may need to be run on a high RAM capacity server. Such servers are typically available in high-performance or cloud computing settings.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid36714386, year = {2023}, author = {Patel, YS and Bedi, J}, title = {MAG-D: A multivariate attention network based approach for cloud workload forecasting.}, journal = {Future generations computer systems : FGCS}, volume = {142}, number = {}, pages = {376-392}, pmid = {36714386}, issn = {0167-739X}, abstract = {The Coronavirus pandemic and the work-from-home have drastically changed the working style and forced us to rapidly shift towards cloud-based platforms & services for seamless functioning. The pandemic has accelerated a permanent shift in cloud migration. It is estimated that over 95% of digital workloads will reside in cloud-native platforms. Real-time workload forecasting and efficient resource management are two critical challenges for cloud service providers. As cloud workloads are highly volatile and chaotic due to their time-varying nature; thus classical machine learning-based prediction models failed to acquire accurate forecasting. Recent advances in deep learning have gained massive popularity in forecasting highly nonlinear cloud workloads; however, they failed to achieve excellent forecasting outcomes. Consequently, demands for designing more accurate forecasting algorithms exist. Therefore, in this work, we propose 'MAG-D', a Multivariate Attention and Gated recurrent unit based Deep learning approach for Cloud workload forecasting in data centers. We performed an extensive set of experiments on the Google cluster traces, and we confirm that MAG-DL exploits the long-range nonlinear dependencies of cloud workload and improves the prediction accuracy on average compared to the recent techniques applying hybrid methods using Long Short Term Memory Network (LSTM), Convolutional Neural Network (CNN), Gated Recurrent Units (GRU), and Bidirectional Long Short Term Memory Network (BiLSTM).}, } @article {pmid36712619, year = {2023}, author = {He, R and Xie, W and Wu, B and Brandon, NP and Liu, X and Li, X and Yang, S}, title = {Towards interactional management for power batteries of electric vehicles.}, journal = {RSC advances}, volume = {13}, number = {3}, pages = {2036-2056}, pmid = {36712619}, issn = {2046-2069}, abstract = {With the ever-growing digitalization and mobility of electric transportation, lithium-ion batteries are facing performance and safety issues with the appearance of new materials and the advance of manufacturing techniques. This paper presents a systematic review of burgeoning multi-scale modelling and design for battery efficiency and safety management. The rise of cloud computing provides a tactical solution on how to efficiently achieve the interactional management and control of power batteries based on the battery system and traffic big data. The potential of selecting adaptive strategies in emerging digital management is covered systematically from principles and modelling, to machine learning. Specifically, multi-scale optimization is expounded in terms of materials, structures, manufacturing and grouping. The progress on modelling, state estimation and management methods is summarized and discussed in detail. Moreover, this review demonstrates the innovative progress of machine learning based data analysis in battery research so far, laying the foundation for future cloud and digital battery management to develop reliable onboard applications.}, } @article {pmid36711159, year = {2023}, author = {D'Souza, G and Reddy, NVS and Manjunath, KN}, title = {Localization of lung abnormalities on chest X-rays using self-supervised equivariant attention.}, journal = {Biomedical engineering letters}, volume = {13}, number = {1}, pages = {21-30}, pmid = {36711159}, issn = {2093-985X}, abstract = {UNLABELLED: Chest X-Ray (CXR) images provide most anatomical details and the abnormalities on a 2D plane. Therefore, a 2D view of the 3D anatomy is sometimes sufficient for the initial diagnosis. However, close to fourteen commonly occurring diseases are sometimes difficult to identify by visually inspecting the images. Therefore, there is a drift toward developing computer-aided assistive systems to help radiologists. This paper proposes a deep learning model for the classification and localization of chest diseases by using image-level annotations. The model consists of a modified Resnet50 backbone for extracting feature corpus from the images, a classifier, and a pixel correlation module (PCM). During PCM training, the network is a weight-shared siamese architecture where the first branch applies the affine transform to the image before feeding to the network, while the second applies the same transform to the network output. The method was evaluated on CXR from the clinical center in the ratio of 70:20 for training and testing. The model was developed and tested using the cloud computing platform Google Colaboratory (NVidia Tesla P100 GPU, 16 GB of RAM). A radiologist subjectively validated the results. Our model trained with the configurations mentioned in this paper outperformed benchmark results.

SUPPLEMENTARY INFORMATION: The online version contains supplementary material available at 10.1007/s13534-022-00249-5.}, } @article {pmid36713097, year = {2021}, author = {Roffi, M and Casadei, B and Gouillard, C and Nambatingué, N and Daval, G and Bardinet, I and Priori, SG}, title = {Digital transformation of major scientific meetings induced by the COVID-19 pandemic: insights from the ESC 2020 annual congress.}, journal = {European heart journal. Digital health}, volume = {2}, number = {4}, pages = {704-712}, doi = {10.1093/ehjdh/ztab076}, pmid = {36713097}, issn = {2634-3916}, abstract = {As a consequence of the COVID-19 pandemic, the European Society of Cardiology (ESC) was forced to pivot the scientific programme of the ESC Congress 2021 into a totally new format for online consumption, The Digital Experience. A variety of new suppliers were involved, including experts in TV studio, cloud infrastructure, online platforms, video management, and online analytics. An information technology platform able to support hundreds of thousands simultaneous connections was built and cloud computing technologies were put in place to help scale up and down the resources needed for the high number of users at peak times. The video management system was characterized by multiple layers of security and redundancy and offered the same fluidity, albeit at a different resolution, to all user independently of the performance of their internet connection. The event, free for all users, was an undisputed success, both from a scientific/educational as well as from a digital technology perspective. The number of registrations increased by almost four-fold when compared with the 2019 record-breaking edition in Paris, with a greater proportion of younger and female participants as well as of participants from low- and middle-income countries. No major technical failures were encountered. For the first time in history, attendees from all around the globe had the same real-time access to the world's most popular cardiovascular conference.}, } @article {pmid36704354, year = {2022}, author = {Alvarellos, M and Sheppard, HE and Knarston, I and Davison, C and Raine, N and Seeger, T and Prieto Barja, P and Chatzou Dunford, M}, title = {Democratizing clinical-genomic data: How federated platforms can promote benefits sharing in genomics.}, journal = {Frontiers in genetics}, volume = {13}, number = {}, pages = {1045450}, pmid = {36704354}, issn = {1664-8021}, abstract = {Since the first sequencing of the human genome, associated sequencing costs have dramatically lowered, leading to an explosion of genomic data. This valuable data should in theory be of huge benefit to the global community, although unfortunately the benefits of these advances have not been widely distributed. Much of today's clinical-genomic data is siloed and inaccessible in adherence with strict governance and privacy policies, with more than 97% of hospital data going unused, according to one reference. Despite these challenges, there are promising efforts to make clinical-genomic data accessible and useful without compromising security. Specifically, federated data platforms are emerging as key resources to facilitate secure data sharing without having to physically move the data from outside of its organizational or jurisdictional boundaries. In this perspective, we summarize the overarching progress in establishing federated data platforms, and highlight critical considerations on how they should be managed to ensure patient and public trust. These platforms are enabling global collaboration and improving representation of underrepresented groups, since sequencing efforts have not prioritized diverse population representation until recently. Federated data platforms, when combined with advances in no-code technology, can be accessible to the diverse end-users that make up the genomics workforce, and we discuss potential strategies to develop sustainable business models so that the platforms can continue to enable research long term. Although these platforms must be carefully managed to ensure appropriate and ethical use, they are democratizing access and insights to clinical-genomic data that will progress research and enable impactful therapeutic findings.}, } @article {pmid36702751, year = {2023}, author = {Bang, I and Lee, SM and Park, S and Park, JY and Nong, LK and Gao, Y and Palsson, BO and Kim, D}, title = {Deep-learning optimized DEOCSU suite provides an iterable pipeline for accurate ChIP-exo peak calling.}, journal = {Briefings in bioinformatics}, volume = {}, number = {}, pages = {}, doi = {10.1093/bib/bbad024}, pmid = {36702751}, issn = {1477-4054}, abstract = {Recognizing binding sites of DNA-binding proteins is a key factor for elucidating transcriptional regulation in organisms. ChIP-exo enables researchers to delineate genome-wide binding landscapes of DNA-binding proteins with near single base-pair resolution. However, the peak calling step hinders ChIP-exo application since the published algorithms tend to generate false-positive and false-negative predictions. Here, we report the development of DEOCSU (DEep-learning Optimized ChIP-exo peak calling SUite), a novel machine learning-based ChIP-exo peak calling suite. DEOCSU entails the deep convolutional neural network model which was trained with curated ChIP-exo peak data to distinguish the visualized data of bona fide peaks from false ones. Performance validation of the trained deep-learning model indicated its high accuracy, high precision and high recall of over 95%. Applying the new suite to both in-house and publicly available ChIP-exo datasets obtained from bacteria, eukaryotes and archaea revealed an accurate prediction of peaks containing canonical motifs, highlighting the versatility and efficiency of DEOCSU. Furthermore, DEOCSU can be executed on a cloud computing platform or the local environment. With visualization software included in the suite, adjustable options such as the threshold of peak probability, and iterable updating of the pre-trained model, DEOCSU can be optimized for users' specific needs.}, } @article {pmid36700091, year = {2021}, author = {Blum, BC and Emili, A}, title = {Omics Notebook: robust, reproducible and flexible automated multiomics exploratory analysis and reporting.}, journal = {Bioinformatics advances}, volume = {1}, number = {1}, pages = {vbab024}, pmid = {36700091}, issn = {2635-0041}, abstract = {SUMMARY: Mass spectrometry is an increasingly important tool for the global interrogation of diverse biomolecules. Unfortunately, the complexity of downstream data analysis is a major challenge for the routine use of these data by investigators from broader training backgrounds. Omics Notebook is an open-source framework for exploratory analysis, reporting and integrating multiomic data that are automated, reproducible and customizable. Built-in functions allow the processing of proteomic data from MaxQuant and metabolomic data from XCMS, along with other omics data in standardized input formats as specified in the documentation. In addition, the use of containerization manages R package installation requirements and is tailored for shared high-performance computing or cloud environments.

Omics Notebook is implemented in Python and R and is available for download from https://github.com/cnsb-boston/Omics_Notebook with additional documentation under a GNU GPLv3 license.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics Advances online.}, } @article {pmid36696392, year = {2023}, author = {Kim, J and Karyadi, DM and Hartley, SW and Zhu, B and Wang, M and Wu, D and Song, L and Armstrong, GT and Bhatia, S and Robison, LL and Yasui, Y and Carter, B and Sampson, JN and Freedman, ND and Goldstein, AM and Mirabello, L and Chanock, SJ and Morton, LM and Savage, SA and Stewart, DR}, title = {Inflated expectations: Rare-variant association analysis using public controls.}, journal = {PloS one}, volume = {18}, number = {1}, pages = {e0280951}, doi = {10.1371/journal.pone.0280951}, pmid = {36696392}, issn = {1932-6203}, abstract = {The use of publicly available sequencing datasets as controls (hereafter, "public controls") in studies of rare variant disease associations has great promise but can increase the risk of false-positive discovery. The specific factors that could contribute to inflated distribution of test statistics have not been systematically examined. Here, we leveraged both public controls, gnomAD v2.1 and several datasets sequenced in our laboratory to systematically investigate factors that could contribute to the false-positive discovery, as measured by λΔ95, a measure to quantify the degree of inflation in statistical significance. Analyses of datasets in this investigation found that 1) the significantly inflated distribution of test statistics decreased substantially when the same variant caller and filtering pipelines were employed, 2) differences in library prep kits and sequencers did not affect the false-positive discovery rate and, 3) joint vs. separate variant-calling of cases and controls did not contribute to the inflation of test statistics. Currently available methods do not adequately adjust for the high false-positive discovery. These results, especially if replicated, emphasize the risks of using public controls for rare-variant association tests in which individual-level data and the computational pipeline are not readily accessible, which prevents the use of the same variant-calling and filtering pipelines on both cases and controls. A plausible solution exists with the emergence of cloud-based computing, which can make it possible to bring containerized analytical pipelines to the data (rather than the data to the pipeline) and could avert or minimize these issues. It is suggested that future reports account for this issue and provide this as a limitation in reporting new findings based on studies that cannot practically analyze all data on a single pipeline.}, } @article {pmid36695636, year = {2023}, author = {Wang, J and Zheng, J and Lee, E and Aguilar, B and Phan, J and Abdilleh, K and Taylor, RC and Longabaugh, W and Johansson, B and Mertens, F and Mitelman, F and Pot, D and LaFramboise, T}, title = {A cloud-based resource for genome coordinate-based exploration and large-scale analysis of chromosome aberrations and gene fusions in cancer.}, journal = {Genes, chromosomes & cancer}, volume = {}, number = {}, pages = {}, doi = {10.1002/gcc.23128}, pmid = {36695636}, issn = {1098-2264}, abstract = {Cytogenetic analysis provides important information on the genetic mechanisms of cancer. The Mitelman Database of Chromosome Aberrations and Gene Fusions in Cancer (Mitelman DB) is the largest catalog of acquired chromosome aberrations, presently comprising >70,000 cases across multiple cancer types. Although this resource has enabled the identification of chromosome abnormalities leading to specific cancers and cancer mechanisms, a large-scale, systematic analysis of these aberrations and their downstream implications has been difficult due to the lack of a standard, automated mapping from aberrations to genomic coordinates. We previously introduced CytoConverter as a tool that automates such conversions. CytoConverter has now been updated with improved interpretation of karyotypes and has been integrated with the Mitelman DB, providing a comprehensive mapping of the 70,000+ cases to genomic coordinates, as well as visualization of the frequencies of chromosomal gains and losses. Importantly, all CytoConverter-generated genomic coordinates are publicly available in Google BigQuery, a cloud-based data warehouse, facilitating data exploration and integration with other datasets hosted by the Institute for Systems Biology Cancer Gateway in the Cloud (ISB-CGC) Resource. We demonstrate the use of BigQuery for integrative analysis of Mitelman DB with other cancer datasets, including a comparison of the frequency of imbalances identified in Mitelman DB cases with those found in the Cancer Genome Atlas (TCGA) copy number datasets and an exploration of cancer pathways affected by chromosome aberrations. This solution provides opportunities to leverage the power of cloud computing for low-cost, scalable, and integrated analysis of chromosome aberrations and gene fusions in cancer. This article is protected by copyright. All rights reserved.}, } @article {pmid36694127, year = {2023}, author = {Digby, B and Finn, SP and Ó Broin, P}, title = {nf-core/circrna: a portable workflow for the quantification, miRNA target prediction and differential expression analysis of circular RNAs.}, journal = {BMC bioinformatics}, volume = {24}, number = {1}, pages = {27}, pmid = {36694127}, issn = {1471-2105}, support = {18/CRT/6214/SFI_/Science Foundation Ireland/Ireland ; }, abstract = {BACKGROUND: Circular RNAs (circRNAs) are a class of covalenty closed non-coding RNAs that have garnered increased attention from the research community due to their stability, tissue-specific expression and role as transcriptional modulators via sequestration of miRNAs. Currently, multiple quantification tools capable of detecting circRNAs exist, yet none delineate circRNA-miRNA interactions, and only one employs differential expression analysis. Efforts have been made to bridge this gap by way of circRNA workflows, however these workflows are limited by both the types of analyses available and computational skills required to run them.

RESULTS: We present nf-core/circrna, a multi-functional, automated high-throughput pipeline implemented in nextflow that allows users to characterise the role of circRNAs in RNA Sequencing datasets via three analysis modules: (1) circRNA quantification, robust filtering and annotation (2) miRNA target prediction of the mature spliced sequence and (3) differential expression analysis. nf-core/circrna has been developed within the nf-core framework, ensuring robust portability across computing environments via containerisation, parallel deployment on cluster/cloud-based infrastructures, comprehensive documentation and maintenance support.

CONCLUSION: nf-core/circrna reduces the barrier to entry for researchers by providing an easy-to-use, platform-independent and scalable workflow for circRNA analyses. Source code, documentation and installation instructions are freely available at https://nf-co.re/circrna and https://github.com/nf-core/circrna .}, } @article {pmid36691672, year = {2023}, author = {Ørka, HO and Gailis, J and Vege, M and Gobakken, T and Hauglund, K}, title = {Analysis-ready satellite data mosaics from Landsat and Sentinel-2 imagery.}, journal = {MethodsX}, volume = {10}, number = {}, pages = {101995}, pmid = {36691672}, issn = {2215-0161}, abstract = {Today's enormous amounts of freely available high-resolution satellite imagery provide the demand for effective preprocessing methods. One such preprocessing method needed in many applications utilizing optical satellite imagery from the Landsat and Sentinel-2 archives is mosaicking. Merging hundreds of single scenes into a single satellite data mosaic before conducting analysis such as land cover classification, change detection, or modelling is often a prerequisite. Maintaining the original data structure and preserving metadata for further modelling or classification would be advantageous for many applications. Furthermore, in other applications, e.g., connected to land cover classification creating the mosaic for a specific period matching the phenological state of the phenomena in nature would be beneficial. In addition, supporting in-house and computing centers not directly connected to a specific cloud provider could be a requirement for some institutions or companies. In the current work, we present a method called Geomosaic that meets these criteria and produces analysis-ready satellite data mosaics from Landsat and Sentinel-2 imagery.•The method described produces analysis-ready satellite data mosaics.•The satellite data mosaics contain pixel metadata usable for further analysis.•The algorithm is available as an open-source tool coded in Python and can be used on multiple platforms.}, } @article {pmid36691530, year = {2023}, author = {Oñate, W and Sanz, R}, title = {Analysis of architectures implemented for IIoT.}, journal = {Heliyon}, volume = {9}, number = {1}, pages = {e12868}, pmid = {36691530}, issn = {2405-8440}, abstract = {Several technological blocks are being developed to provide solutions to the requirements necessary for the implementation of industrial IoT. However, this is feasible with the resources offered by the Cloud, such as processing, applications and services. Despite this, there are negative aspects such as bandwidth, Internet service variability, latency, lack of filtering of junk data transmitted to the cloud and security. From another perspective, these situations emerge as challenges that are being studied to meet the needs of this new industrial era, which means that the important contribution of academia, companies and consortiums, are achieving a change of course, by taking advantage of the potential of the Cloud but now from the vicinity or perimeter of a production plant. To achieve this task, some pillars of IoT technology are being used as a basis, such as the designs of Fog Computing Platforms (FCP), Edge Computing (EC) and considering the need for cooperation between IT and operation technologies (IT and OT), with which it is intended to accelerate the paradigm shift that this situation has generated. The objective of this study is to show a systematic literature review (SLR) of recent studies on hierarchical and flat peer-to-peer (P2P) architectures implemented for manufacturing IIoT, analyzing those successes and weaknesses derived from them such as latency, security, computing methodologies, virtualization architectures, Fog Computing (FC) in Manufacturing Execution Systems (MES), Quality of Service (QoS) and connectivity, with the aim of motivating possible research points when implementing IIoT with these new technologies.}, } @article {pmid36690091, year = {2023}, author = {Li, Z and Demir, I}, title = {U-net-based semantic classification for flood extent extraction using SAR imagery and GEE platform: A case study for 2019 central US flooding.}, journal = {The Science of the total environment}, volume = {}, number = {}, pages = {161757}, doi = {10.1016/j.scitotenv.2023.161757}, pmid = {36690091}, issn = {1879-1026}, abstract = {Data-driven models for water body extraction have experienced accelerated growth in recent years, thanks to advances in processing techniques and computational resources, as well as improved data availability. In this study, we modified the standard U-Net, a convolutional neural network (CNN) method, to extract water bodies from scenes captured from Sentinel-1 satellites of selected areas during the 2019 Central US flooding. We compared the results to several benchmark models, including the standard U-Net and ResNet50, an advanced thresholding method, Bmax Otsu, and a recently introduced flood inundation map archive. Then, we looked at how data input types, input resolution, and using pre-trained weights affect the model performance. We adopted a three-category classification frame to test whether and how permanent water and flood pixels behave differently. Most of the data in this study were gathered and pre-processed utilizing the open access Google Earth Engine (GEE) cloud platform. According to the results, the adjusted U-Net outperformed all other benchmark models and datasets. Adding a slope layer enhances model performance with the 30 m input data compared to training the model on only VV and VH bands of SAR images. Adding DEM and Height Above Nearest Drainage (HAND) model data layer improved performance for models trained on 10 m datasets. The results also suggested that CNN-based semantic segmentation may fail to correctly classify pixels around narrow river channels. Furthermore, our findings revealed that it is necessary to differentiate permanent water and flood pixels because they behave differently. Finally, the results indicated that using pre-trained weights from a coarse dataset can significantly minimize initial training loss on finer datasets and speed up convergence.}, } @article {pmid36687286, year = {2023}, author = {Ali, O and AlAhmad, A and Kahtan, H}, title = {A review of advanced technologies available to improve the healthcare performance during COVID-19 pandemic.}, journal = {Procedia computer science}, volume = {217}, number = {}, pages = {205-216}, pmid = {36687286}, issn = {1877-0509}, abstract = {Information technology (IT) has enabled the initiation of an innovative healthcare system. An innovative healthcare system integrates new technologies such as cloud computing, the internet of things, and artificial intelligence (AI), to transform the healthcare to be more efficient, more convenient and more personalized. This review aims to identify the key technologies that will help to support an innovative healthcare system. A case study approach was used in this research analysis to enable a researcher to closely analyze the data in a particular context. It presents a case study of the coronavirus (COVID-19) as a means of exploring the use of advanced technologies in an innovative healthcare system to help address a worldwide health crisis. An innovative healthcare system can help to promote better patient self-management, reduce costs, relieve staff pressures, help with resource and knowledge management, and improve the patient experience. An innovative healthcare system can reduce the expense and time for research, and increase the overall efficacy of the research. Overall, this research identifies how innovative technologies can improve the performance of the healthcare system. Advanced technologies can assist with pandemic control and can help in the recognition of the virus, clinical treatment, medical protection, intelligent diagnosis, and outbreak analysis. The review provides an analysis of the future prospects of an innovative healthcare system.}, } @article {pmid36686545, year = {2023}, author = {Wang, SH and Satapathy, SC and Xie, MX and Zhang, YD}, title = {ELUCNN for explainable COVID-19 diagnosis.}, journal = {Soft computing}, volume = {}, number = {}, pages = {1-17}, pmid = {36686545}, issn = {1432-7643}, abstract = {COVID-19 is a positive-sense single-stranded RNA virus caused by a strain of coronavirus, severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2). Several noteworthy variants of SARS-CoV-2 were declared by WHO as Alpha, Beta, Gamma, Delta, and Omicron. Till 13/Dec/2022, it has caused 6.65 million death tolls, and over 649 million confirmed positive cases. Based on the convolutional neural network (CNN), this study first proposes a ten-layer CNN as the backbone model. Then, the exponential linear unit (ELU) is introduced to replace ReLU, and the traditional convolutional block is now transformed into conv-ELU. Finally, an ELU-based CNN (ELUCNN) model is proposed for COVID-19 diagnosis. Besides, the MDA strategy is used to enhance the size of the training set. We develop a mobile app integrating ELUCNN, and this web app is run on a client-server modeled structure. Ten runs of the tenfold cross-validation experiment show our model yields a sensitivity of 94.41 ± 0.98 , a specificity of 94.84 ± 1.21 , an accuracy of 94.62 ± 0.96 , and an F1 score of 94.61 ± 0.95 . The ELUCNN model and mobile app are effective in COVID-19 diagnosis and give better results than 14 state-of-the-art COVID-19 diagnosis models concerning accuracy.}, } @article {pmid36685273, year = {2023}, author = {Yilmaz, OS and Acar, U and Sanli, FB and Gulgen, F and Ates, AM}, title = {Mapping burn severity and monitoring CO content in Türkiye's 2021 Wildfires, using Sentinel-2 and Sentinel-5P satellite data on the GEE platform.}, journal = {Earth science informatics}, volume = {}, number = {}, pages = {1-20}, pmid = {36685273}, issn = {1865-0473}, abstract = {This study investigated forest fires in the Mediterranean of Türkiye between July 28, 2021, and August 11, 2021. Burn severity maps were produced with the difference normalised burned ratio index (dNBR) and difference normalised difference vegetation index (dNDVI) using Sentinel-2 images on the Google Earth Engine (GEE) cloud platform. The burned areas were estimated based on the determined burning severity degrees. Vegetation density losses in burned areas were analysed using the normalised difference vegetation index (NDVI) time series. At the same time, the post-fire Carbon Monoxide (CO) column number densities were determined using the Sentinel-5P satellite data. According to the burn severity maps obtained with dNBR, the sum of high and moderate severity areas constitutes 34.64%, 20.57%, 46.43%, 51.50% and 18.88% of the entire area in Manavgat, Gündoğmuş, Marmaris, Bodrum and Köyceğiz districts, respectively. Likewise, according to the burn severity maps obtained with dNDVI, the sum of the areas of very high severity and high severity constitutes 41.17%, 30.16%, 30.50%, 42.35%, and 10.40% of the entire region, respectively. In post-fire NDVI time series analyses, sharp decreases were observed in NDVI values from 0.8 to 0.1 in all burned areas. While the Tropospheric CO column number density was 0.03 mol/m[2] in all regions burned before the fire, it was observed that this value increased to 0.14 mol/m[2] after the fire. Moreover, when the area was examined more broadly with Sentinel 5P data, it was observed that the amount of CO increased up to a maximum value of 0.333 mol/m[2]. The results of this study present significant information in terms of determining the severity of forest fires in the Mediterranean region in 2021 and the determination of the CO column number density after the fire. In addition, monitoring polluting gases with RS techniques after forest fires is essential in understanding the extent of the damage they can cause to the environment.}, } @article {pmid36679810, year = {2023}, author = {Yang, H and Zhou, H and Liu, Z and Deng, X}, title = {Energy Optimization of Wireless Sensor Embedded Cloud Computing Data Monitoring System in 6G Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, doi = {10.3390/s23021013}, pmid = {36679810}, issn = {1424-8220}, abstract = {With the construction and development of modern and smart cities, people's lives are becoming more intelligent and diversified. Surveillance systems increasingly play an active role in target tracking, vehicle identification, traffic management, etc. In the 6G network environment, facing the massive and large-scale data information in the monitoring system, it is difficult for the ordinary processing platform to meet this computing demand. This paper provides a data governance solution based on a 6G environment. The shortcomings of critical technologies in wireless sensor networks are addressed through ZigBee energy optimization to address the shortage of energy supply and high energy consumption in the practical application of wireless sensor networks. At the same time, this improved routing algorithm is combined with embedded cloud computing to optimize the monitoring system and achieve efficient data processing. The ZigBee-optimized wireless sensor network consumes less energy in practice and also increases the service life of the network, as proven by research and experiments. This optimized data monitoring system ensures data security and reliability.}, } @article {pmid36679800, year = {2023}, author = {Oztoprak, K and Tuncel, YK and Butun, I}, title = {Technological Transformation of Telco Operators towards Seamless IoT Edge-Cloud Continuum.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, doi = {10.3390/s23021004}, pmid = {36679800}, issn = {1424-8220}, abstract = {This article investigates and discusses challenges in the telecommunication field from multiple perspectives, both academic and industry sides are catered for, surveying the main points of technological transformation toward edge-cloud continuum from the view of a telco operator to show the complete picture, including the evolution of cloud-native computing, Software-Defined Networking (SDN), and network automation platforms. The cultural shift in software development and management with DevOps enabled the development of significant technologies in the telecommunication world, including network equipment, application development, and system orchestration. The effect of the aforementioned cultural shift to the application area, especially from the IoT point of view, is investigated. The enormous change in service diversity and delivery capabilities to mass devices are also discussed. During the last two decades, desktop and server virtualization has played an active role in the Information Technology (IT) world. With the use of OpenFlow, SDN, and Network Functions Virtualization (NFV), the network revolution has got underway. The shift from monolithic application development and deployment to micro-services changed the whole picture. On the other hand, the data centers evolved in several generations where the control plane cannot cope with all the networks without an intelligent decision-making process, benefiting from the AI/ML techniques. AI also enables operators to forecast demand more accurately, anticipate network load, and adjust capacity and throughput automatically. Going one step further, zero-touch networking and service management (ZSM) is proposed to get high-level human intents to generate a low-level configuration for network elements with validated results, minimizing the ratio of faults caused by human intervention. Harmonizing all signs of progress in different communication technologies enabled the use of edge computing successfully. Low-powered (from both energy and processing perspectives) IoT networks have disrupted the customer and end-point demands within the sector, as such paved the path towards devising the edge computing concept, which finalized the whole picture of the edge-cloud continuum.}, } @article {pmid36679795, year = {2023}, author = {Yin, HC and Lien, JJ}, title = {Cascaded Segmentation U-Net for Quality Evaluation of Scraping Workpiece.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, doi = {10.3390/s23020998}, pmid = {36679795}, issn = {1424-8220}, abstract = {In the terms of industry, the hand-scraping method is a key technology for achieving high precision in machine tools, and the quality of scraping workpieces directly affects the accuracy and service life of the machine tool. However, most of the quality evaluation of the scraping workpieces is carried out by the scraping worker's subjective judgment, which results in differences in the quality of the scraping workpieces and is time-consuming. Hence, in this research, an edge-cloud computing system was developed to obtain the relevant parameters, which are the percentage of point (POP) and the peak point per square inch (PPI), for evaluating the quality of scraping workpieces. On the cloud computing server-side, a novel network called cascaded segmentation U-Net is proposed to high-quality segment the height of points (HOP) (around 40 μm height) in favor of small datasets training and then carries out a post-processing algorithm that automatically calculates POP and PPI. This research emphasizes the architecture of the network itself instead. The design of the components of our network is based on the basic idea of identity function, which not only solves the problem of the misjudgment of the oil ditch and the residual pigment but also allows the network to be end-to-end trained effectively. At the head of the network, a cascaded multi-stage pixel-wise classification is designed for obtaining more accurate HOP borders. Furthermore, the "Cross-dimension Compression" stage is used to fuse high-dimensional semantic feature maps across the depth of the feature maps into low-dimensional feature maps, producing decipherable content for final pixel-wise classification. Our system can achieve an error rate of 3.7% and 0.9 points for POP and PPI. The novel network achieves an Intersection over Union (IoU) of 90.2%.}, } @article {pmid36679792, year = {2023}, author = {Kopras, B and Idzikowski, F and Bossy, B and Kryszkiewicz, P and Bogucka, H}, title = {Communication and Computing Task Allocation for Energy-Efficient Fog Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, doi = {10.3390/s23020997}, pmid = {36679792}, issn = {1424-8220}, abstract = {The well known cloud computing is being extended by the idea of fog with the computing nodes placed closer to end users to allow for task processing with tighter latency requirements. However, offloading of tasks (from end devices to either the cloud or to the fog nodes) should be designed taking energy consumption for both transmission and computation into account. The task allocation procedure can be challenging considering the high number of arriving tasks with various computational, communication and delay requirements, and the high number of computing nodes with various communication and computing capabilities. In this paper, we propose an optimal task allocation procedure, minimizing consumed energy for a set of users connected wirelessly to a network composed of FN located at AP and CN. We optimize the assignment of AP and computing nodes to offloaded tasks as well as the operating frequencies of FN. The considered problem is formulated as a Mixed-Integer Nonlinear Programming problem. The utilized energy consumption and delay models as well as their parameters, related to both the computation and communication costs, reflect the characteristics of real devices. The obtained results show that it is profitable to split the processing of tasks between multiple FNs and the cloud, often choosing different nodes for transmission and computation. The proposed algorithm manages to find the optimal allocations and outperforms all the considered alternative allocation strategies resulting in the lowest energy consumption and task rejection rate. Moreover, a heuristic algorithm that decouples the optimization of wireless transmission from implemented computations and wired transmission is proposed. It finds the optimal or close-to-optimal solutions for all of the studied scenarios.}, } @article {pmid36679619, year = {2023}, author = {Mirza, IB and Georgakopoulos, D and Yavari, A}, title = {Cyber-Physical-Social Awareness Platform for Comprehensive Situation Awareness.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, doi = {10.3390/s23020822}, pmid = {36679619}, issn = {1424-8220}, abstract = {Cyber-physical-social computing system integrates the interactions between cyber, physical, and social spaces by fusing information from these spaces. The result of this fusion can be used to drive many applications in areas such as intelligent transportation, smart cities, and healthcare. Situation Awareness was initially used in military services to provide knowledge of what is happening in a combat zone but has been used in many other areas such as disaster mitigation. Various applications have been developed to provide situation awareness using either IoT sensors or social media information spaces and, more recently, using both IoT sensors and social media information spaces. The information from these spaces is heterogeneous and, at their intersection, is sparse. In this paper, we propose a highly scalable, novel Cyber-physical-social Awareness (CPSA) platform that provides situation awareness by using and intersecting information from both IoT sensors and social media. By combining and fusing information from both social media and IoT sensors, the CPSA platform provides more comprehensive and accurate situation awareness than any other existing solutions that rely only on data from social media and IoT sensors. The CPSA platform achieves that by semantically describing and integrating the information extracted from sensors and social media spaces and intersects this information for enriching situation awareness. The CPSA platform uses user-provided situation models to refine and intersect cyber, physical, and social information. The CPSA platform analyses social media and IoT data using pretrained machine learning models deployed in the cloud, and provides coordination between information sources and fault tolerance. The paper describes the implementation and evaluation of the CPSA platform. The evaluation of the CPSA platform is measured in terms of capabilities such as the ability to semantically describe and integrate heterogenous information, fault tolerance, and time constraints such as processing time and throughput when performing real-world experiments. The evaluation shows that the CPSA platform can reliably process and intersect with large volumes of IoT sensor and social media data to provide enhanced situation awareness.}, } @article {pmid36679524, year = {2023}, author = {Chen, J and Zhou, J and Liu, L and Shu, C and Shen, M and Yao, W}, title = {Sow Farrowing Early Warning and Supervision for Embedded Board Implementations.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, doi = {10.3390/s23020727}, pmid = {36679524}, issn = {1424-8220}, abstract = {Sow farrowing is an important part of pig breeding. The accurate and effective early warning of sow behaviors in farrowing helps breeders determine whether it is necessary to intervene with the farrowing process in a timely manner and is thus essential for increasing the survival rate of piglets and the profits of pig farms. For large pig farms, human resources and costs are important considerations in farrowing supervision. The existing method, which uses cloud computing-based deep learning to supervise sow farrowing, has a high equipment cost and requires uploading all data to a cloud data center, requiring a large network bandwidth. Thus, this paper proposes an approach for the early warning and supervision of farrowing behaviors based on the embedded artificial-intelligence computing platform (NVIDIA Jetson Nano). This lightweight deep learning method allows the rapid processing of sow farrowing video data at edge nodes, reducing the bandwidth requirement and ensuring data security in the network transmission. Experiments indicated that after the model was migrated to the Jetson Nano, its precision of sow postures and newborn piglets detection was 93.5%, with a recall rate of 92.2%, and the detection speed was increased by a factor larger than 8. The early warning of 18 approaching farrowing (5 h) sows were tested. The mean error of warning was 1.02 h.}, } @article {pmid36679463, year = {2023}, author = {Hussain, MM and Azar, AT and Ahmed, R and Umar Amin, S and Qureshi, B and Dinesh Reddy, V and Alam, I and Khan, ZI}, title = {SONG: A Multi-Objective Evolutionary Algorithm for Delay and Energy Aware Facility Location in Vehicular Fog Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, doi = {10.3390/s23020667}, pmid = {36679463}, issn = {1424-8220}, abstract = {With the emergence of delay- and energy-critical vehicular applications, forwarding sense-actuate data from vehicles to the cloud became practically infeasible. Therefore, a new computational model called Vehicular Fog Computing (VFC) was proposed. It offloads the computation workload from passenger devices (PDs) to transportation infrastructures such as roadside units (RSUs) and base stations (BSs), called static fog nodes. It can also exploit the underutilized computation resources of nearby vehicles that can act as vehicular fog nodes (VFNs) and provide delay- and energy-aware computing services. However, the capacity planning and dimensioning of VFC, which come under a class of facility location problems (FLPs), is a challenging issue. The complexity arises from the spatio-temporal dynamics of vehicular traffic, varying resource demand from PD applications, and the mobility of VFNs. This paper proposes a multi-objective optimization model to investigate the facility location in VFC networks. The solutions to this model generate optimal VFC topologies pertaining to an optimized trade-off (Pareto front) between the service delay and energy consumption. Thus, to solve this model, we propose a hybrid Evolutionary Multi-Objective (EMO) algorithm called Swarm Optimized Non-dominated sorting Genetic algorithm (SONG). It combines the convergence and search efficiency of two popular EMO algorithms: the Non-dominated Sorting Genetic Algorithm (NSGA-II) and Speed-constrained Particle Swarm Optimization (SMPSO). First, we solve an example problem using the SONG algorithm to illustrate the delay-energy solution frontiers and plotted the corresponding layout topology. Subsequently, we evaluate the evolutionary performance of the SONG algorithm on real-world vehicular traces against three quality indicators: Hyper-Volume (HV), Inverted Generational Distance (IGD) and CPU delay gap. The empirical results show that SONG exhibits improved solution quality over the NSGA-II and SMPSO algorithms and hence can be utilized as a potential tool by the service providers for the planning and design of VFC networks.}, } @article {pmid36679436, year = {2023}, author = {Gec, S and Stankovski, V and Lavbič, D and Kochovski, P}, title = {A Recommender System for Robust Smart Contract Template Classification.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, doi = {10.3390/s23020639}, pmid = {36679436}, issn = {1424-8220}, abstract = {IoT environments are becoming increasingly heterogeneous in terms of their distributions and included entities by collaboratively involving not only data centers known from Cloud computing but also the different types of third-party entities that can provide computing resources. To transparently provide such resources and facilitate trust between the involved entities, it is necessary to develop and implement smart contracts. However, when developing smart contracts, developers face many challenges and concerns, such as security, contracts' correctness, a lack of documentation and/or design patterns, and others. To address this problem, we propose a new recommender system to facilitate the development and implementation of low-cost EVM-enabled smart contracts. The recommender system's algorithm provides the smart contract developer with smart contract templates that match their requirements and that are relevant to the typology of the fog architecture. It mainly relies on OpenZeppelin, a modular, reusable, and secure smart contract library that we use when classifying the smart contracts. The evaluation results indicate that by using our solution, the smart contracts' development times are overall reduced. Moreover, such smart contracts are sustainable for fog-computing IoT environments and applications in low-cost EVM-based ledgers. The recommender system has been successfully implemented in the ONTOCHAIN ecosystem, thus presenting its applicability.}, } @article {pmid36679409, year = {2023}, author = {Sakaguchi, Y and Bakibillah, ASM and Kamal, MAS and Yamada, K}, title = {A Cyber-Physical Framework for Optimal Coordination of Connected and Automated Vehicles on Multi-Lane Freeways.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, doi = {10.3390/s23020611}, pmid = {36679409}, issn = {1424-8220}, abstract = {Uncoordinated driving behavior is one of the main reasons for bottlenecks on freeways. This paper presents a novel cyber-physical framework for optimal coordination of connected and automated vehicles (CAVs) on multi-lane freeways. We consider that all vehicles are connected to a cloud-based computing framework, where a traffic coordination system optimizes the target trajectories of individual vehicles for smooth and safe lane changing or merging. In the proposed framework, the vehicles are coordinated into groups or platoons, and their trajectories are successively optimized in a receding horizon control (RHC) approach. Optimization of the traffic coordination system aims to provide sufficient gaps when a lane change is necessary while minimizing the speed deviation and acceleration of all vehicles. The coordination information is then provided to individual vehicles equipped with local controllers, and each vehicle decides its control acceleration to follow the target trajectories while ensuring a safe distance. Our proposed method guarantees fast optimization and can be used in real-time. The proposed coordination system was evaluated using microscopic traffic simulations and benchmarked with the traditional driving (human-based) system. The results show significant improvement in fuel economy, average velocity, and travel time for various traffic volumes.}, } @article {pmid36679360, year = {2023}, author = {Khan, AQ and Nikolov, N and Matskin, M and Prodan, R and Roman, D and Sahin, B and Bussler, C and Soylu, A}, title = {Smart Data Placement Using Storage-as-a-Service Model for Big Data Pipelines.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, doi = {10.3390/s23020564}, pmid = {36679360}, issn = {1424-8220}, abstract = {Big data pipelines are developed to process data characterized by one or more of the three big data features, commonly known as the three Vs (volume, velocity, and variety), through a series of steps (e.g., extract, transform, and move), making the ground work for the use of advanced analytics and ML/AI techniques. Computing continuum (i.e., cloud/fog/edge) allows access to virtually infinite amount of resources, where data pipelines could be executed at scale; however, the implementation of data pipelines on the continuum is a complex task that needs to take computing resources, data transmission channels, triggers, data transfer methods, integration of message queues, etc., into account. The task becomes even more challenging when data storage is considered as part of the data pipelines. Local storage is expensive, hard to maintain, and comes with several challenges (e.g., data availability, data security, and backup). The use of cloud storage, i.e., storage-as-a-service (StaaS), instead of local storage has the potential of providing more flexibility in terms of scalability, fault tolerance, and availability. In this article, we propose a generic approach to integrate StaaS with data pipelines, i.e., computation on an on-premise server or on a specific cloud, but integration with StaaS, and develop a ranking method for available storage options based on five key parameters: cost, proximity, network performance, server-side encryption, and user weights/preferences. The evaluation carried out demonstrates the effectiveness of the proposed approach in terms of data transfer performance, utility of the individual parameters, and feasibility of dynamic selection of a storage option based on four primary user scenarios.}, } @article {pmid36673212, year = {2022}, author = {Gavreev, MA and Kiktenko, EO and Mastiukova, AS and Fedorov, AK}, title = {Suppressing Decoherence in Quantum State Transfer with Unitary Operations.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {1}, pages = {}, doi = {10.3390/e25010067}, pmid = {36673212}, issn = {1099-4300}, abstract = {Decoherence is the fundamental obstacle limiting the performance of quantum information processing devices. The problem of transmitting a quantum state (known or unknown) from one place to another is of great interest in this context. In this work, by following the recent theoretical proposal, we study an application of quantum state-dependent pre- and post-processing unitary operations for protecting the given (multi-qubit) quantum state against the effect of decoherence acting on all qubits. We observe the increase in the fidelity of the output quantum state both in a quantum emulation experiment, where all protecting unitaries are perfect, and in a real experiment with a cloud-accessible quantum processor, where protecting unitaries themselves are affected by the noise. We expect the considered approach to be useful for analyzing capabilities of quantum information processing devices in transmitting known quantum states. We also demonstrate the applicability of the developed approach for suppressing decoherence in the process of distributing a two-qubit state over remote physical qubits of a quantum processor.}, } @article {pmid36670240, year = {2023}, author = {Yıldırım, E and Cicioğlu, M and Çalhan, A}, title = {Fog-cloud architecture-driven Internet of Medical Things framework for healthcare monitoring.}, journal = {Medical & biological engineering & computing}, volume = {}, number = {}, pages = {}, pmid = {36670240}, issn = {1741-0444}, abstract = {The new coronavirus disease (COVID-19) has increased the need for new technologies such as the Internet of Medical Things (IoMT), Wireless Body Area Networks (WBANs), and cloud computing in the health sector as well as in many areas. These technologies have also made it possible for billions of devices to connect to the internet and communicate with each other. In this study, an Internet of Medical Things (IoMT) framework consisting of Wireless Body Area Networks (WBANs) has been designed and the health big data from WBANs have been analyzed using fog and cloud computing technologies. Fog computing is used for fast and easy analysis, and cloud computing is used for time-consuming and complex analysis. The proposed IoMT framework is presented with a diabetes prediction scenario. The diabetes prediction process is carried out on fog with fuzzy logic decision-making and is achieved on cloud with support vector machine (SVM), random forest (RF), and artificial neural network (ANN) as machine learning algorithms. The dataset produced in WBANs is used for big data analysis in the scenario for both fuzzy logic and machine learning algorithm. The fuzzy logic gives 64% accuracy performance in fog and SVM, RF, and ANN have 89.5%, 88.4%, and 87.2% accuracy performance respectively in the cloud for diabetes prediction. In addition, the throughput and delay results of heterogeneous nodes with different priorities in the WBAN scenario created using the IEEE 802.15.6 standard and AODV routing protocol have been also analyzed. Fog-Cloud architecture-driven for IoMT networks • An IoMT framework is designed with important components and functions such as fog and cloud node capabilities. •Real-time data has been obtained from WBANs in Riverbed Modeler for a more realistic performance analysis of IoMT. •Fuzzy logic and machine learning algorithms (RF, SVM, and ANN) are used for diabetes predictions. •Intra and Inter-WBAN communications (IEEE 802.15.6 standard) are modeled as essential components of the IoMT framework with all functions.}, } @article {pmid36658912, year = {2018}, author = {Xin, T and Huang, S and Lu, S and Li, K and Luo, Z and Yin, Z and Li, J and Lu, D and Long, G and Zeng, B}, title = {NMRCloudQ: a quantum cloud experience on a nuclear magnetic resonance quantum computer.}, journal = {Science bulletin}, volume = {63}, number = {1}, pages = {17-23}, doi = {10.1016/j.scib.2017.12.022}, pmid = {36658912}, issn = {2095-9281}, abstract = {Cloud-based quantum computing is anticipated to be the most useful and reachable form for public users to experience with the power of quantum. As initial attempts, IBM Q has launched influential cloud services on a superconducting quantum processor in 2016, but no other platforms has followed up yet. Here, we report our new cloud quantum computing service - NMRCloudQ (http://nmrcloudq.com/zh-hans/), where nuclear magnetic resonance, one of the pioneer platforms with mature techniques in experimental quantum computing, plays as the role of implementing computing tasks. Our service provides a comprehensive software environment preconfigured with a list of quantum information processing packages, and aims to be freely accessible to either amateurs that look forward to keeping pace with this quantum era or professionals that are interested in carrying out real quantum computing experiments in person. In our current version, four qubits are already usable with in average 99.10% single-qubit gate fidelity and 97.15% two-qubit fidelity via randomized benchmaking tests. Improved control precisions as well as a new seven-qubit processor are also in preparation and will be available later.}, } @article {pmid36658205, year = {2023}, author = {Kazemi Garajeh, M and Salmani, B and Zare Naghadehi, S and Valipoori Goodarzi, H and Khasraei, A}, title = {An integrated approach of remote sensing and geospatial analysis for modeling and predicting the impacts of climate change on food security.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {1057}, pmid = {36658205}, issn = {2045-2322}, abstract = {The agriculture sector provides the majority of food supplies, ensures food security, and promotes sustainable development. Due to recent climate changes as well as trends in human population growth and environmental degradation, the need for timely agricultural information continues to rise. This study analyzes and predicts the impacts of climate change on food security (FS). For 2002-2021, Landsat, MODIS satellite images and predisposing variables (land surface temperature (LST), evapotranspiration, precipitation, sunny days, cloud ratio, soil salinity, soil moisture, groundwater quality, soil types, digital elevation model, slope, and aspect) were used. First, we used a deep learning convolutional neural network (DL-CNN) based on the Google Earth Engine (GEE) to detect agricultural land (AL). A remote sensing-based approach combined with the analytical network process (ANP) model was used to identify frost-affected areas. We then analyzed the relationship between climatic, geospatial, and topographical variables and AL and frost-affected areas. We found negative correlations of - 0.80, - 0.58, - 0.43, and - 0.45 between AL and LST, evapotranspiration, cloud ratio, and soil salinity, respectively. There is a positive correlation between AL and precipitation, sunny days, soil moisture, and groundwater quality of 0.39, 0.25, 0.21, and 0.77, respectively. The correlation between frost-affected areas and LST, evapotranspiration, cloud ratio, elevation, slope, and aspect are 0.55, 0.40, 0.52, 0.35, 0.45, and 0.39. Frost-affected areas have negative correlations with precipitation, sunny day, and soil moisture of - 0.68, - 0.23, and - 0.38, respectively. Our findings show that the increase in LST, evapotranspiration, cloud ratio, and soil salinity is associated with the decrease in AL. Additionally, AL decreases with a decreasing in precipitation, sunny days, soil moisture, and groundwater quality. It was also found that as LST, evapotranspiration, cloud ratio, elevation, slope, and aspect increase, frost-affected areas increase as well. Furthermore, frost-affected areas increase when precipitation, sunny days, and soil moisture decrease. Finally, we predicted the FS threat for 2030, 2040, 2050, and 2060 using the CA-Markov method. According to the results, the AL will decrease by 0.36% from 2030 to 2060. Between 2030 and 2060, however, the area with very high frost-affected will increase by about 10.64%. In sum, this study accentuates the critical impacts of climate change on the FS in the region. Our findings and proposed methods could be helpful for researchers to model and quantify the climate change impacts on the FS in different regions and periods.}, } @article {pmid36658166, year = {2023}, author = {Tsakanikas, V and Dagiuklas, T and Iqbal, M and Wang, X and Mumtaz, S}, title = {An intelligent model for supporting edge migration for virtual function chains in next generation internet of things.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {1063}, pmid = {36658166}, issn = {2045-2322}, abstract = {The developments on next generation IoT sensing devices, with the advances on their low power computational capabilities and high speed networking has led to the introduction of the edge computing paradigm. Within an edge cloud environment, services may generate and consume data locally, without involving cloud computing infrastructures. Aiming to tackle the low computational resources of the IoT nodes, Virtual-Function-Chain has been proposed as an intelligent distribution model for exploiting the maximum of the computational power at the edge, thus enabling the support of demanding services. An intelligent migration model with the capacity to support Virtual-Function-Chains is introduced in this work. According to this model, migration at the edge can support individual features of a Virtual-Function-Chain. First, auto-healing can be implemented with cold migrations, if a Virtual Function fails unexpectedly. Second, a Quality of Service monitoring model can trigger live migrations, aiming to avoid edge devices overload. The evaluation studies of the proposed model revealed that it has the capacity to increase the robustness of an edge-based service on low-powered IoT devices. Finally, comparison with similar frameworks, like Kubernetes, showed that the migration model can effectively react on edge network fluctuations.}, } @article {pmid36654019, year = {2022}, author = {Yin, Y and Wang, Z and Zhou, W and Gan, Y and Zhang, Y}, title = {Group key agreement protocol for edge computing in industrial internet.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {19}, number = {12}, pages = {12730-12743}, doi = {10.3934/mbe.2022594}, pmid = {36654019}, issn = {1551-0018}, abstract = {Industrial internet security is a critical component of cyberspace safety. Furthermore, the encryption protocol is a critical component of cyberspace security. Due to the rapid development of industrial internet and edge computing, increasingly more devices are outsourcing their data to cloud servers to save costs. Edge devices should have a secure session key to reduce communication costs and share information. However, most key generation and storage are completed by a centralized third-party organization, which carries some security risks. In this context, this paper will propose a lightweight multi-dimensional virtual iteration of the group key agreement protocol. Group key agreement protocol allows for one-at-a-time encryption and timely key updates without the involvement of a trusted third party, and each device in the network can agreement a large number of keys. According to the analysis of this protocol, it has high security, rapid computation speed, and little storage space.}, } @article {pmid36654109, year = {2021}, author = {Xu, X and Sun, J and Endo, S and Li, Y and Benjamin, SC and Yuan, X}, title = {Variational algorithms for linear algebra.}, journal = {Science bulletin}, volume = {66}, number = {21}, pages = {2181-2188}, doi = {10.1016/j.scib.2021.06.023}, pmid = {36654109}, issn = {2095-9281}, abstract = {Quantum algorithms have been developed for efficiently solving linear algebra tasks. However, they generally require deep circuits and hence universal fault-tolerant quantum computers. In this work, we propose variational algorithms for linear algebra tasks that are compatible with noisy intermediate-scale quantum devices. We show that the solutions of linear systems of equations and matrix-vector multiplications can be translated as the ground states of the constructed Hamiltonians. Based on the variational quantum algorithms, we introduce Hamiltonian morphing together with an adaptive ansätz for efficiently finding the ground state, and show the solution verification. Our algorithms are especially suitable for linear algebra problems with sparse matrices, and have wide applications in machine learning and optimisation problems. The algorithm for matrix multiplications can be also used for Hamiltonian simulation and open system simulation. We evaluate the cost and effectiveness of our algorithm through numerical simulations for solving linear systems of equations. We implement the algorithm on the IBM quantum cloud device with a high solution fidelity of 99.95%.}, } @article {pmid36654308, year = {2021}, author = {Chen, X and Cheng, B and Li, Z and Nie, X and Yu, N and Yung, MH and Peng, X}, title = {Experimental cryptographic verification for near-term quantum cloud computing.}, journal = {Science bulletin}, volume = {66}, number = {1}, pages = {23-28}, doi = {10.1016/j.scib.2020.08.013}, pmid = {36654308}, issn = {2095-9281}, abstract = {An important task for quantum cloud computing is to make sure that there is a real quantum computer running, instead of classical simulation. Here we explore the applicability of a cryptographic verification scheme for verifying quantum cloud computing. We provided a theoretical extension and implemented the scheme on a 5-qubit NMR quantum processor in the laboratory and a 5-qubit and 16-qubit processors of the IBM quantum cloud. We found that the experimental results of the NMR processor can be verified by the scheme with about 1.4% error, after noise compensation by standard techniques. However, the fidelity of the IBM quantum cloud is currently too low to pass the test (about 42% error). This verification scheme shall become practical when servers claim to offer quantum-computing resources that can achieve quantum supremacy.}, } @article {pmid36648445, year = {2023}, author = {Deutsch, EW and Mendoza, L and Shteynberg, DD and Hoopmann, MR and Sun, Z and Eng, JK and Moritz, RL}, title = {Trans-Proteomic Pipeline: Robust Mass Spectrometry-Based Proteomics Data Analysis Suite.}, journal = {Journal of proteome research}, volume = {}, number = {}, pages = {}, doi = {10.1021/acs.jproteome.2c00624}, pmid = {36648445}, issn = {1535-3907}, abstract = {The Trans-Proteomic Pipeline (TPP) mass spectrometry data analysis suite has been in continual development and refinement since its first tools, PeptideProphet and ProteinProphet, were published 20 years ago. The current release provides a large complement of tools for spectrum processing, spectrum searching, search validation, abundance computation, protein inference, and more. Many of the tools include machine-learning modeling to extract the most information from data sets and build robust statistical models to compute the probabilities that derived information is correct. Here we present the latest information on the many TPP tools, and how TPP can be deployed on various platforms from personal Windows laptops to Linux clusters and expansive cloud computing environments. We describe tutorials on how to use TPP in a variety of ways and describe synergistic projects that leverage TPP. We conclude with plans for continued development of TPP.}, } @article {pmid36645733, year = {2023}, author = {Yazdani, A and Dashti, SF and Safdari, Y}, title = {A fog-assisted information model based on priority queue and clinical decision support systems.}, journal = {Health informatics journal}, volume = {29}, number = {1}, pages = {14604582231152792}, doi = {10.1177/14604582231152792}, pmid = {36645733}, issn = {1741-2811}, abstract = {OBJECTIVES: Telehealth monitoring applications are latency-sensitive. The current fog-based telehealth monitoring models are mainly focused on the role of the fog computing in improving response time and latency. In this paper, we have introduced a new service called "priority queue" in fog layer, which is programmed to prioritize the events sent by different sources in different environments to assist the cloud layer with reducing response time and latency.

MATERIAL AND METHODS: We analyzed the performance of the proposed model in a fog-enabled cloud environment with the IFogSim toolkit. To provide a comparison of cloud and fog computing environments, three parameters namely response time, latency, and network usage were used. We used the Pima Indian diabetes dataset to evaluate the model.

RESULT: The fog layer proved to be very effective in improving the response time while handling emergencies using priority queues. The proposed model reduces response time by 25.8%, latency by 36.18%, bandwidth by 28.17%, and network usage time by 41.4% as compared to the cloud.

CONCLUSION: By combining priority queues, and fog computing in this study, the network usage, latency time, bandwidth, and response time were significantly reduced as compared to cloud computing.}, } @article {pmid36642685, year = {2023}, author = {Akgün, FA and Fındık, Y and Solak, S and Uçar, MHB and Büyükçavuş, MH and Baykul, T}, title = {Face comparison analysis of patients with orthognathic surgery treatment using cloud computing-based face recognition application programming interfaces.}, journal = {American journal of orthodontics and dentofacial orthopedics : official publication of the American Association of Orthodontists, its constituent societies, and the American Board of Orthodontics}, volume = {}, number = {}, pages = {}, doi = {10.1016/j.ajodo.2022.05.023}, pmid = {36642685}, issn = {1097-6752}, abstract = {INTRODUCTION: This study aimed to investigate whether the postoperative change in patients after orthognathic surgery, whose facial aesthetics was affected, led to detectable differences using Microsoft Azure, Amazon Web Services Rekognition, and Face[++], which were commercially available face recognition systems.

METHODS: Photographs of 35 patients after orthognathic surgery were analyzed using 3 well-known cloud computing-based facial recognition application programming interfaces to compute similarity scores between preoperative and postoperative photographs. The preoperative, relaxed, smiling, profile, and semiprofile photographs of the patients were compared separately to validate the relevant application programming interfaces. Patient characteristics and type of surgery were recorded for statistical analysis. Kruskal-Wallis rank sum tests were performed to analyze the relationship between patient characteristics and similarity scores. Multiple-comparison Wilcoxon rank sum tests were performed on the statistically significant characteristics.

RESULTS: The similarity scores in the Face[++] program were lower than those in the Microsoft Azure and Amazon Web Services Rekognition. In addition, the similarity scores were higher in smiling photographs. A statistically significant difference was found in similarity scores between relaxed and smiling photographs according to different programs (P <0.05). For all 3 facial recognition programs, comparable similarity scores were found in all photographs taken before and after surgery across sex, type of surgery, and type of surgical approach. The type of surgery and surgical approach, sex, and amount of surgical movement did not significantly affect similarity scores in any facial recognition programs (P >0.05).

CONCLUSIONS: The similarity scores between the photographs before and after orthognathic surgery were high, suggesting that the software algorithms might value measurements on the basis of upper-face landmarks more than lower-face measurements.}, } @article {pmid36637558, year = {2022}, author = {Ye, W and Wang, J and Tian, H and Quan, H}, title = {Public auditing for real-time medical sensor data in cloud-assisted HealthIIoT system.}, journal = {Frontiers of optoelectronics}, volume = {15}, number = {1}, pages = {29}, pmid = {36637558}, issn = {2095-2767}, abstract = {With the advancement of industrial internet of things (IIoT), wireless medical sensor networks (WMSNs) have been widely introduced in modern healthcare systems to collect real-time medical data from patients, which is known as HealthIIoT. Considering the limited computing and storage capabilities of lightweight HealthIIoT devices, it is necessary to upload these data to remote cloud servers for storage and maintenance. However, there are still some serious security issues within outsourcing medical sensor data to the cloud. One of the most significant challenges is how to ensure the integrity of these data, which is a prerequisite for providing precise medical diagnosis and treatment. To meet this challenge, we propose a novel and efficient public auditing scheme, which is suitable for cloud-assisted HealthIIoT system. Specifically, to address the contradiction between the high real-time requirement of medical sensor data and the limited computing power of HealthIIoT devices, a new online/offline tag generation algorithm is designed to improve preprocessing efficiency; to protect medical data privacy, a secure hash function is employed to blind the data proof. We formally prove the security of the presented scheme, and evaluate the performance through detailed experimental comparisons with the state-of-the-art ones. The results show that the presented scheme can greatly improve the efficiency of tag generation, while achieving better auditing performance than previous schemes.}, } @article {pmid36636525, year = {2023}, author = {Wang, SH and Khan, MA and Zhu, Z and Zhang, YD}, title = {WACPN: A Neural Network for Pneumonia Diagnosis.}, journal = {International journal of computer systems science & engineering}, volume = {45}, number = {1}, pages = {21-34}, pmid = {36636525}, issn = {2766-483X}, abstract = {Community-acquired pneumonia (CAP) is considered a sort of pneumonia developed outside hospitals and clinics. To diagnose community-acquired pneumonia (CAP) more efficiently, we proposed a novel neural network model. We introduce the 2-dimensional wavelet entropy (2d-WE) layer and an adaptive chaotic particle swarm optimization (ACP) algorithm to train the feed-forward neural network. The ACP uses adaptive inertia weight factor (AIWF) and Rossler attractor (RA) to improve the performance of standard particle swarm optimization. The final combined model is named WE-layer ACP-based network (WACPN), which attains a sensitivity of 91.87±1.37%, a specificity of 90.70±1.19%, a precision of 91.01±1.12%, an accuracy of 91.29±1.09%, F1 score of 91.43±1.09%, an MCC of 82.59±2.19%, and an FMI of 91.44±1.09%. The AUC of this WACPN model is 0.9577. We find that the maximum deposition level chosen as four can obtain the best result. Experiments demonstrate the effectiveness of both AIWF and RA. Finally, this proposed WACPN is efficient in diagnosing CAP and superior to six state-of-the-art models. Our model will be distributed to the cloud computing environment.}, } @article {pmid36627353, year = {2023}, author = {Saxena, D and Singh, AK and Lee, CN and Buyya, R}, title = {A sustainable and secure load management model for green cloud data centres.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {491}, pmid = {36627353}, issn = {2045-2322}, abstract = {The massive upsurge in cloud resource demand and inefficient load management stave off the sustainability of Cloud Data Centres (CDCs) resulting in high energy consumption, resource contention, excessive carbon emission, and security threats. In this context, a novel Sustainable and Secure Load Management (SaS-LM) Model is proposed to enhance the security for users with sustainability for CDCs. The model estimates and reserves the required resources viz., compute, network, and storage and dynamically adjust the load subject to maximum security and sustainability. An evolutionary optimization algorithm named Dual-Phase Black Hole Optimization (DPBHO) is proposed for optimizing a multi-layered feed-forward neural network and allowing the model to estimate resource usage and detect probable congestion. Further, DPBHO is extended to a Multi-objective DPBHO algorithm for a secure and sustainable VM allocation and management to minimize the number of active server machines, carbon emission, and resource wastage for greener CDCs. SaS-LM is implemented and evaluated using benchmark real-world Google Cluster VM traces. The proposed model is compared with state-of-the-arts which reveals its efficacy in terms of reduced carbon emission and energy consumption up to 46.9% and 43.9%, respectively with improved resource utilization up to 16.5%.}, } @article {pmid36624887, year = {2023}, author = {Saba, T and Rehman, A and Haseeb, K and Alam, T and Jeon, G}, title = {Cloud-edge load balancing distributed protocol for IoE services using swarm intelligence.}, journal = {Cluster computing}, volume = {}, number = {}, pages = {1-11}, pmid = {36624887}, issn = {1386-7857}, abstract = {Rapid development of the Internet of Everything (IoE) and cloud services offer a vital role in the growth of smart applications. It provides scalability with the collaboration of cloud servers and copes with a big amount of collected data for network systems. Although, edge computing supports efficient utilization of communication bandwidth, and latency requirements to facilitate smart embedded systems. However, it faces significant research issues regarding data aggregation among heterogeneous network services and objects. Moreover, distributed systems are more precise for data access and storage, thus machine-to-machine is needed to be secured from unpredictable events. As a result, this research proposed secured data management with distributed load balancing protocol using particle swarm optimization, which aims to decrease the response time for cloud users and effectively maintain the integrity of network communication. It combines distributed computing and shift high cost computations closer to the requesting node to reduce latency and transmission overhead. Moreover, the proposed work also protects the communicating machines from malicious devices by evaluating the trust in a controlled manner. Simulation results revealed a significant performance of the proposed protocol in comparison to other solutions in terms of energy consumption by 20%, success rate by 17%, end-to-end delay by 14%, and network cost by 19% as average in the light of various performance metrics.}, } @article {pmid36624868, year = {2023}, author = {Liu, X and Gao, A and Chen, C and Moghimi, MM}, title = {Lightweight similarity checking for English literatures in mobile edge computing.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {3}, pmid = {36624868}, issn = {2192-113X}, abstract = {With the advent of information age, mobile devices have become one of the major convenient equipment that aids people's daily office activities such as academic research, one of whose major tasks is to check the repetition rate or similarity among different English literatures. Traditional literature similarity checking solutions in cloud paradigm often call for intensive computational cost and long waiting time. To tackle this issue, in this paper, we modify the traditional literature similarity checking solution in cloud paradigm to make it suitable for the light-weight mobile edge environment. Furthermore, we put forward a lightweight similarity checking approach SC MEC for English literatures in mobile edge computing environment. To validate the advantages of SC MEC , we have designed massive experiments on a dataset. The reported experimental results show that SC MEC can deliver a satisfactory similarity checking result of literatures compared to other existing approaches.}, } @article {pmid36620727, year = {2022}, author = {Wegner, T and Lassnig, M and Ueberholz, P and Zeitnitz, C}, title = {Simulation and Evaluation of Cloud Storage Caching for Data Intensive Science.}, journal = {Computing and software for big science}, volume = {6}, number = {1}, pages = {5}, pmid = {36620727}, issn = {2510-2044}, abstract = {A common task in scientific computing is the data reduction. This workflow extracts the most important information from large input data and stores it in smaller derived data objects. The derived data objects can then be used for further analysis. Typically, these workflows use distributed storage and computing resources. A straightforward setup of storage media would be low-cost tape storage and higher-cost disk storage. The large, infrequently accessed input data are stored on tape storage. The smaller, frequently accessed derived data is stored on disk storage. In a best-case scenario, the large input data is only accessed very infrequently and in a well-planned pattern. However, practice shows that often the data has to be processed continuously and unpredictably. This can significantly reduce tape storage performance. A common approach to counter this is storing copies of the large input data on disk storage. This contribution evaluates an approach that uses cloud storage resources to serve as a flexible cache or buffer, depending on the computational workflow. The proposed model is explored for the case of continuously processed data. For the evaluation, a simulation tool was developed, which can be used to analyse models related to storage and network resources. We show that using commercial cloud storage can reduce on-premises disk storage requirements, while maintaining an equal throughput of jobs. Moreover, the key metrics of the model are discussed, and an approach is described, which uses the simulation to assist with the decision process of using commercial cloud storage. The goal is to investigate approaches and propose new evaluation methods to overcome future data challenges.}, } @article {pmid36618951, year = {2021}, author = {Moosavi, J and Bakhshi, J and Martek, I}, title = {The application of industry 4.0 technologies in pandemic management: Literature review and case study.}, journal = {Healthcare analytics (New York, N.Y.)}, volume = {1}, number = {}, pages = {100008}, pmid = {36618951}, issn = {2772-4425}, abstract = {The Covid-19 pandemic impact on people's lives has been devastating. Around the world, people have been forced to stay home, resorting to the use of digital technologies in an effort to continue their life and work as best they can. Covid-19 has thus accelerated society's digital transformation towards Industry 4.0 (the fourth industrial revolution). Using scientometric analysis, this study presents a systematic literature review of the themes within Industry 4.0. Thematic analysis reveals that the Internet of Things (IoT), Artificial Intelligence (AI), Cloud computing, Machine learning, Security, Big Data, Blockchain, Deep learning, Digitalization, and Cyber-physical system (CPS) to be the key technologies associated with Industry 4.0. Subsequently, a case study using Industry 4.0 technologies to manage the Covid-19 pandemic is discussed. In conclusion, Covid-19,is clearly shown to be an accelerant in the progression towards Industry 4.0. Moreover, the technologies of this digital transformation can be expected to be invoked in the management of future pandemics.}, } @article {pmid36617078, year = {2023}, author = {Harach, T and Simonik, P and Vrtkova, A and Mrovec, T and Klein, T and Ligori, JJ and Koreny, M}, title = {Novel Method for Determining Internal Combustion Engine Dysfunctions on Platform as a Service.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, doi = {10.3390/s23010477}, pmid = {36617078}, issn = {1424-8220}, abstract = {This article deals with a unique, new powertrain diagnostics platform at the level of a large number of EU25 inspection stations. Implemented method uses emission measurement data and additional data from significant sample of vehicles. An original technique using machine learning that uses 9 static testing points (defined by constant engine load and constant engine speed), volume of engine combustion chamber, EURO emission standard category, engine condition state coefficient and actual mileage is applied. An example for dysfunction detection using exhaust emission analyses is described in detail. The test setup is also described, along with the procedure for data collection using a Mindsphere cloud data processing platform. Mindsphere is a core of the new Platform as a Service (Paas) for data processing from multiple testing facilities. An evaluation on a fleet level which used quantile regression method is implemented. In this phase of the research, real data was used, as well as data defined on the basis of knowledge of the manifestation of internal combustion engine defects. As a result of the application of the platform and the evaluation method, it is possible to classify combustion engine dysfunctions. These are defects that cannot be detected by self-diagnostic procedures for cars up to the EURO 6 level.}, } @article {pmid36616922, year = {2022}, author = {Martínez-Otzeta, JM and Rodríguez-Moreno, I and Mendialdua, I and Sierra, B}, title = {RANSAC for Robotic Applications: A Survey.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, doi = {10.3390/s23010327}, pmid = {36616922}, issn = {1424-8220}, abstract = {Random Sample Consensus, most commonly abbreviated as RANSAC, is a robust estimation method for the parameters of a model contaminated by a sizable percentage of outliers. In its simplest form, the process starts with a sampling of the minimum data needed to perform an estimation, followed by an evaluation of its adequacy, and further repetitions of this process until some stopping criterion is met. Multiple variants have been proposed in which this workflow is modified, typically tweaking one or several of these steps for improvements in computing time or the quality of the estimation of the parameters. RANSAC is widely applied in the field of robotics, for example, for finding geometric shapes (planes, cylinders, spheres, etc.) in cloud points or for estimating the best transformation between different camera views. In this paper, we present a review of the current state of the art of RANSAC family methods with a special interest in applications in robotics.}, } @article {pmid36616830, year = {2022}, author = {Abolhassani Khajeh, S and Saberikamarposhti, M and Rahmani, AM}, title = {Real-Time Scheduling in IoT Applications: A Systematic Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, doi = {10.3390/s23010232}, pmid = {36616830}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) is a telecommunication network in the next generation of applications with the rapid progress of wireless sensor network techniques that have touched many spheres of life today. Hardware, telephony, communications, storage, secure platforms, software and services, and data processing platforms are all part of the IoT environment. IoT sensors collect data from their environment and share it by connecting to the Internet gateway. These sensors often perform tasks without human intervention. This article aims to review real-time scheduling in the IoT to fully understand the issues raised in this area published from 2018 to 2022. A classification for IoT applications based on practical application is provided for selected studies. Selected studies include healthcare, infrastructure, industrial applications, smart city, commercial applications, environmental protection, and general IoT applications. Studies are sorted into groups based on related applications and compared based on indicators such as performance time, energy consumption, makespan, and assessment environments depending on the provided classification. Finally, this paper discusses all reviewed studies' main concepts, disadvantages, advantages, and future work.}, } @article {pmid36616797, year = {2022}, author = {Bhatia, J and Italiya, K and Jadeja, K and Kumhar, M and Chauhan, U and Tanwar, S and Bhavsar, M and Sharma, R and Manea, DL and Verdes, M and Raboaca, MS}, title = {An Overview of Fog Data Analytics for IoT Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, doi = {10.3390/s23010199}, pmid = {36616797}, issn = {1424-8220}, abstract = {With the rapid growth in the data and processing over the cloud, it has become easier to access those data. On the other hand, it poses many technical and security challenges to the users of those provisions. Fog computing makes these technical issues manageable to some extent. Fog computing is one of the promising solutions for handling the big data produced by the IoT, which are often security-critical and time-sensitive. Massive IoT data analytics by a fog computing structure is emerging and requires extensive research for more proficient knowledge and smart decisions. Though an advancement in big data analytics is taking place, it does not consider fog data analytics. However, there are many challenges, including heterogeneity, security, accessibility, resource sharing, network communication overhead, the real-time data processing of complex data, etc. This paper explores various research challenges and their solution using the next-generation fog data analytics and IoT networks. We also performed an experimental analysis based on fog computing and cloud architecture. The result shows that fog computing outperforms the cloud in terms of network utilization and latency. Finally, the paper is concluded with future trends.}, } @article {pmid36616774, year = {2022}, author = {Condon, F and Martínez, JM and Eltamaly, AM and Kim, YC and Ahmed, MA}, title = {Design and Implementation of a Cloud-IoT-Based Home Energy Management System.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, doi = {10.3390/s23010176}, pmid = {36616774}, issn = {1424-8220}, abstract = {The advances in the Internet of Things (IoT) and cloud computing opened new opportunities for developing various smart grid applications and services. The rapidly increasing adoption of IoT devices has enabled the development of applications and solutions to manage energy consumption efficiently. This work presents the design and implementation of a home energy management system (HEMS), which allows collecting and storing energy consumption data from appliances and the main load of the home. Two scenarios are designed and implemented: a local HEMS isolated from the Internet and relies on its processing and storage duties using an edge device and a Cloud HEMS using AWS IoT Core to manage incoming data messages and provide data-driven services and applications. A testbed was carried out in a real house in the city of Valparaiso, Chile, over a one-year period, where four appliances were used to collect energy consumption using smart plugs, as well as collecting the main energy load of the house through a data logger acting as a smart meter. To the best of our knowledge, this is the first electrical energy dataset with a 10-second sampling rate from a real household in Valparaiso, Chile. Results show that both implementations perform the baseline tasks (collecting, storing, and controlling) for a HEMS. This work contributes by providing a detailed technical implementation of HEMS that enables researchers and engineers to develop and implement HEMS solutions to support different smart home applications.}, } @article {pmid36616737, year = {2022}, author = {Zheng, Y and Luo, J and Chen, W and Zhang, Y and Sun, H and Pan, Z}, title = {Unsupervised 3D Reconstruction with Multi-Measure and High-Resolution Loss.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, doi = {10.3390/s23010136}, pmid = {36616737}, issn = {1424-8220}, abstract = {Multi-view 3D reconstruction technology based on deep learning is developing rapidly. Unsupervised learning has become a research hotspot because it does not need ground truth labels. The current unsupervised method mainly uses 3DCNN to regularize the cost volume to regression image depth. This approach results in high memory requirements and long computing time. In this paper, we propose an end-to-end unsupervised multi-view 3D reconstruction network framework based on PatchMatch, Unsup_patchmatchnet. It dramatically reduces memory requirements and computing time. We propose a feature point consistency loss function. We incorporate various self-supervised signals such as photometric consistency loss and semantic consistency loss into the loss function. At the same time, we propose a high-resolution loss method. This improves the reconstruction of high-resolution images. The experiment proves that the memory usage of the network is reduced by 80% and the running time is reduced by more than 50% compared with the network using 3DCNN method. The overall error of reconstructed 3D point cloud is only 0.501 mm. It is superior to most current unsupervised multi-view 3D reconstruction networks. Then, we test on different data sets and verify that the network has good generalization.}, } @article {pmid36616717, year = {2022}, author = {Passian, A and Buchs, G and Seck, CM and Marino, AM and Peters, NA}, title = {The Concept of a Quantum Edge Simulator: Edge Computing and Sensing in the Quantum Era.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, doi = {10.3390/s23010115}, pmid = {36616717}, issn = {1424-8220}, abstract = {Sensors, enabling observations across vast spatial, spectral, and temporal scales, are major data generators for information technology (IT). Processing, storing, and communicating this ever-growing amount of data pose challenges for the current IT infrastructure. Edge computing-an emerging paradigm to overcome the shortcomings of cloud-based computing-could address these challenges. Furthermore, emerging technologies such as quantum computing, quantum sensing, and quantum communications have the potential to fill the performance gaps left by their classical counterparts. Here, we present the concept of an edge quantum computing (EQC) simulator-a platform for designing the next generation of edge computing applications. An EQC simulator is envisioned to integrate elements from both quantum technologies and edge computing to allow studies of quantum edge applications. The presented concept is motivated by the increasing demand for more sensitive and precise sensors that can operate faster at lower power consumption, generating both larger and denser datasets. These demands may be fulfilled with edge quantum sensor networks. Envisioning the EQC era, we present our view on how such a scenario may be amenable to quantification and design. Given the cost and complexity of quantum systems, constructing physical prototypes to explore design and optimization spaces is not sustainable, necessitating EQC infrastructure and component simulators to aid in co-design. We discuss what such a simulator may entail and possible use cases that invoke quantum computing at the edge integrated with new sensor infrastructures.}, } @article {pmid36610429, year = {2023}, author = {Krumm, N}, title = {Organizational and Technical Security Considerations for Laboratory Cloud Computing.}, journal = {The journal of applied laboratory medicine}, volume = {8}, number = {1}, pages = {180-193}, doi = {10.1093/jalm/jfac118}, pmid = {36610429}, issn = {2576-9456}, abstract = {BACKGROUND: Clinical and anatomical pathology services are increasingly utilizing cloud information technology (IT) solutions to meet growing requirements for storage, computation, and other IT services. Cloud IT solutions are often considered on the promise of low cost of entry, durability and reliability, scalability, and features that are typically out of reach for small- or mid-sized IT organizations. However, use of cloud-based IT infrastructure also brings additional security and privacy risks to organizations, as unfamiliarity, public networks, and complex feature sets contribute to an increased surface area for attacks.

CONTENT: In this best-practices guide, we aim to help both managers and IT professionals in healthcare environments understand the requirements and risks when using cloud-based IT infrastructure within the laboratory environment. We will describe how technical, operational, and organizational best practices that can help mitigate security, privacy, and other risks associated with the use of could infrastructure; furthermore, we identify how these best practices fit into healthcare regulatory frameworks.Among organizational best practices, we identify the need for specific hiring requirements, relationships with parent IT groups, mechanisms for reviewing and auditing security practices, and sound practices for onboarding and offboarding employees. Then, we highlight selected specific operational security, account security, and auditing/logging best practices. Finally, we describe how individual cloud technologies have specific resource-level security features.

SUMMARY: We emphasize that laboratory directors, managers, and IT professionals must ensure that the fundamental organizational and process-based requirements are addressed first, to establish the groundwork for technical security solutions and successful implementation of cloud infrastructure.}, } @article {pmid36597385, year = {2022}, author = {Zhang, J and Liu, T and Yu, Y}, title = {[Research on Comprehensive Safety Monitoring System for Elderly Care Based on Artificial Intelligence and Information Fusion].}, journal = {Zhongguo yi liao qi xie za zhi = Chinese journal of medical instrumentation}, volume = {46}, number = {6}, pages = {611-614}, doi = {10.3969/j.issn.1671-7104.2022.06.005}, pmid = {36597385}, issn = {1671-7104}, abstract = {Nowadays, China has entered into an aging society; how to ensure safety in elderly care has drawn social attention. Through artificial intelligence and multi-information fusion research, combined with the applications of machine learning algorithms, internet of things devices and cloud computing, this paper presents a comprehensive, intelligent safety monitoring system for the elderly in the community and at home. The system collects the daily life data of the elderly through a series of sensors in an all-round, all-time, and non-intrusive manner, and realizes intelligent alarms for high-risk states such as falls, acute illness, abnormal personnel, and gas smoke for the elderly. Through the innovative research of human pose estimation and behavior recognition, and application of multi-sensor information fusion, the system can greatly reduce the occurrence or injury caused by safety incidents in senior care, bringing safe and healthy living environment for the elderly at homes and communities.}, } @article {pmid36590844, year = {2022}, author = {Gudla, SPK and Bhoi, SK and Nayak, SR and Singh, KK and Verma, A and Izonin, I}, title = {A Deep Intelligent Attack Detection Framework for Fog-Based IoT Systems.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6967938}, pmid = {36590844}, issn = {1687-5273}, abstract = {Fog computing provides a multitude of end-based IoT system services. End IoT devices exchange information with fog nodes and the cloud to handle client undertakings. During the process of data collection between the layer of fog and the cloud, there are more chances of crucial attacks or assaults like DDoS and many more security attacks being compromised by IoT end devices. These network (NW) threats must be spotted early. Deep learning (DL) assumes an unmistakable part in foreseeing the end client behavior by extricating highlights and grouping the foe in the network. Yet, because of IoT devices' compelled nature in calculation and storage spaces, DL cannot be managed on those. Here, a framework for fog-based attack detection is proffered, and different attacks are prognosticated utilizing long short-term memory (LSTM). The end IoT gadget behaviour can be prognosticated by installing a trained LSTMDL model at the fog node computation module. The simulations are performed using Python by comparing LSTMDL model with deep neural multilayer perceptron (DNMLP), bidirectional LSTM (Bi-LSTM), gated recurrent units (GRU), hybrid ensemble model (HEM), and hybrid deep learning model (CNN + LSTM) comprising convolutional neural network (CNN) and LSTM on DDoS-SDN (Mendeley Dataset), NSLKDD, UNSW-NB15, and IoTID20 datasets. To evaluate the performance of the binary classifier, metrics like accuracy, precision, recall, f1-score, and ROC-AUC curves are considered on these datasets. The LSTMDL model shows outperforming nature in binary classification with 99.70%, 99.12%, 94.11%, and 99.88% performance accuracies on experimentation with respective datasets. The network simulation further shows how different DL models present fog layer communication behaviour detection time (CBDT). DNMLP detects communication behaviour (CB) faster than other models, but LSTMDL predicts assaults better.}, } @article {pmid36590152, year = {2022}, author = {Farhadi, F and Barnes, MR and Sugito, HR and Sin, JM and Henderson, ER and Levy, JJ}, title = {Applications of artificial intelligence in orthopaedic surgery.}, journal = {Frontiers in medical technology}, volume = {4}, number = {}, pages = {995526}, pmid = {36590152}, issn = {2673-3129}, abstract = {The practice of medicine is rapidly transforming as a result of technological breakthroughs. Artificial intelligence (AI) systems are becoming more and more relevant in medicine and orthopaedic surgery as a result of the nearly exponential growth in computer processing power, cloud based computing, and development, and refining of medical-task specific software algorithms. Because of the extensive role of technologies such as medical imaging that bring high sensitivity, specificity, and positive/negative prognostic value to management of orthopaedic disorders, the field is particularly ripe for the application of machine-based integration of imaging studies, among other applications. Through this review, we seek to promote awareness in the orthopaedics community of the current accomplishments and projected uses of AI and ML as described in the literature. We summarize the current state of the art in the use of ML and AI in five key orthopaedic disciplines: joint reconstruction, spine, orthopaedic oncology, trauma, and sports medicine.}, } @article {pmid36589280, year = {2022}, author = {Panja, S and Chattopadhyay, AK and Nag, A and Singh, JP}, title = {Fuzzy-logic-based IoMT framework for COVID19 patient monitoring.}, journal = {Computers & industrial engineering}, volume = {176}, number = {}, pages = {108941}, pmid = {36589280}, issn = {1879-0550}, abstract = {Smart healthcare is an integral part of a smart city, which provides real time and intelligent remote monitoring and tracking services to patients and elderly persons. In the era of an extraordinary public health crisis due to the spread of the novel coronavirus (2019-nCoV), which caused the deaths of millions and affected a multitude of people worldwide in different ways, the role of smart healthcare has become indispensable. Any modern method that allows for speedy and efficient monitoring of COVID19-affected patients could be highly beneficial to medical staff. Several smart-healthcare systems based on the Internet of Medical Things (IoMT) have attracted worldwide interest in their growing technical assistance in health services, notably in predicting, identifying and preventing, and their remote surveillance of most infectious diseases. In this paper, a real time health monitoring system for COVID19 patients based on edge computing and fuzzy logic technique is proposed. The proposed model makes use of the IoMT architecture to collect real time biological data (or health information) from the patients to monitor and analyze the health conditions of the infected patients and generates alert messages that are transmitted to the concerned parties such as relatives, medical staff and doctors to provide appropriate treatment in a timely fashion. The health data are collected through sensors attached to the patients and transmitted to the edge devices and cloud storage for further processing. The collected data are analyzed through fuzzy logic in edge devices to efficiently identify the risk status (such as low risk, moderate risk and high risk) of the COVID19 patients in real time. The proposed system is also associated with a mobile app that enables the continuous monitoring of the health status of the patients. Moreover, once alerted by the system about the high risk status of a patient, a doctor can fetch all the health records of the patient for a specified period, which can be utilized for a detailed clinical diagnosis.}, } @article {pmid36588663, year = {2023}, author = {Gezimati, M and Singh, G}, title = {Advances in terahertz technology for cancer detection applications.}, journal = {Optical and quantum electronics}, volume = {55}, number = {2}, pages = {151}, pmid = {36588663}, issn = {0306-8919}, abstract = {Currently, there is an increasing demand for the diagnostic techniques that provide functional and morphological information with early cancer detection capability. Novel modern medical imaging systems driven by the recent advancements in technology such as terahertz (THz) and infrared radiation-based imaging technologies which are complementary to conventional modalities are being developed, investigated, and validated. The THz cancer imaging techniques offer novel opportunities for label free, non-ionizing, non-invasive and early cancer detection. The observed image contrast in THz cancer imaging studies has been mostly attributed to higher refractive index, absorption coefficient and dielectric properties in cancer tissue than that in the normal tissue due the local increase of the water molecule content in tissue and increased blood supply to the cancer affected tissue. Additional image contrast parameters and cancer biomarkers that have been reported to contribute to THz image contrast include cell structural changes, molecular density, interactions between agents (e.g., contrast agents and embedding agents) and biological tissue as well as tissue substances like proteins, fiber and fat etc. In this paper, we have presented a systematic and comprehensive review of the advancements in the technological development of THz technology for cancer imaging applications. Initially, the fundamentals principles and techniques for THz radiation generation and detection, imaging and spectroscopy are introduced. Further, the application of THz imaging for detection of various cancers tissues are presented, with more focus on the in vivo imaging of skin cancer. The data processing techniques for THz data are briefly discussed. Also, we identify the advantages and existing challenges in THz based cancer detection and report the performance improvement techniques. The recent advancements towards THz systems which are optimized and miniaturized are also reported. Finally, the integration of THz systems with artificial intelligent (AI), internet of things (IoT), cloud computing, big data analytics, robotics etc. for more sophisticated systems is proposed. This will facilitate the large-scale clinical applications of THz for smart and connected next generation healthcare systems and provide a roadmap for future research.}, } @article {pmid36584089, year = {2022}, author = {Yang, D and Yu, J and Du, X and He, Z and Li, P}, title = {Energy saving strategy of cloud data computing based on convolutional neural network and policy gradient algorithm.}, journal = {PloS one}, volume = {17}, number = {12}, pages = {e0279649}, doi = {10.1371/journal.pone.0279649}, pmid = {36584089}, issn = {1932-6203}, abstract = {Cloud Data Computing (CDC) is conducive to precise energy-saving management of user data centers based on the real-time energy consumption monitoring of Information Technology equipment. This work aims to obtain the most suitable energy-saving strategies to achieve safe, intelligent, and visualized energy management. First, the theory of Convolutional Neural Network (CNN) is discussed. Besides, an intelligent energy-saving model based on CNN is designed to ameliorate the variable energy consumption, load, and power consumption of the CDC data center. Then, the core idea of the policy gradient (PG) algorithm is introduced. In addition, a CDC task scheduling model is designed based on the PG algorithm, aiming at the uncertainty and volatility of the CDC scheduling tasks. Finally, the performance of different neural network models in the training process is analyzed from the perspective of total energy consumption and load optimization of the CDC center. At the same time, simulation is performed on the CDC task scheduling model based on the PG algorithm to analyze the task scheduling demand. The results demonstrate that the energy consumption of the CNN algorithm in the CDC energy-saving model is better than that of the Elman algorithm and the ecoCloud algorithm. Besides, the CNN algorithm reduces the number of virtual machine migrations in the CDC energy-saving model by 9.30% compared with the Elman algorithm. The Deep Deterministic Policy Gradient (DDPG) algorithm performs the best in task scheduling of the cloud data center, and the average response time of the DDPG algorithm is 141. In contrast, the Deep Q Network algorithm performs poorly. This paper proves that Deep Reinforcement Learning (DRL) and neural networks can reduce the energy consumption of CDC and improve the completion time of CDC tasks, offering a research reference for CDC resource scheduling.}, } @article {pmid36575310, year = {2022}, author = {Wang, J and Li, X and Wang, X and Zhou, S and Luo, Y}, title = {Farmland quality assessment using deep fully convolutional neural networks.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {1}, pages = {239}, pmid = {36575310}, issn = {1573-2959}, abstract = {Farmland is the cornerstone of agriculture and is important for food security and social production. Farmland assessment is essential but traditional methods are usually expensive and slow. Deep learning methods have been developed and widely applied recently in image recognition, semantic understanding, and many other application domains. In this research, we used fully convolutional networks (FCN) as the deep learning model to evaluate farmland grades. Normalized difference vegetation index (NDVI) derived from Landsat images was used as the input data, and the China National Cultivated Land Grade Database within Jiangsu Province was used to train the model on cloud computing. We also applied an image segmentation method to improve the original results from the FCN and compared the results with classical machine learning (ML) methods. Our research found that the FCN can predict farmland grades with an overall F1 score (the harmonic mean of precision and recall) of 0.719 and F1 score of 0.909, 0.590, 0.740, 0.642, and 0.023 for non-farmland, level I, II, III, and IV farmland, respectively. Combining the FCN and image segmentation method can further improve prediction accuracy with results of fewer noise pixels and more realistic edges. Compared with conventional ML, at least in farmland evaluation, FCN provides better results with higher precision, recall, and F1 score. Our research indicates that by using remote sensing NDVI data, the deep learning method can provide acceptable farmland assessment without fieldwork and can be used as a novel supplement to traditional methods. The method used in this research will save a lot of time and cost compared with traditional means.}, } @article {pmid36575255, year = {2022}, author = {Niyazi, M and Behnamian, J}, title = {Application of cloud computing and big data in three-stage dynamic modeling of disaster relief logistics and wounded transportation: a case study.}, journal = {Environmental science and pollution research international}, volume = {}, number = {}, pages = {}, pmid = {36575255}, issn = {1614-7499}, abstract = {Collecting and sharing information about affected areas is an important activity for optimal decision-making in relief processes. Defects such as over-sending some items to affected areas and mistakes in transferring injured people to medical centers in accidents are due to improper management of this information. Because cloud computing as a processing and storage platform for big data is independent of the device and location and can also perform high-speed processing, its use in disasters has been highly regarded by researchers. In this environment, a three-stage dynamic procedure for evacuation operations and logistics issues is presented. The first stage of the proposed model is image processing and tweet mining in a cloud center in order to determine the disaster parameters. In stage II, a mixed-integer multi-commodity model is presented for the relief commodity delivery, wounded people transportation with capacity constraints, and locating of the possible on-site clinics and local distribution centers near disaster areas. In stage III, by using a system of equations, detailed vehicle load/unload instructions are obtained. Finally, the effectiveness of the proposed model on the data of an earthquake disaster in Iran is investigated. The results of comparing the proposed approach with a two-stage algorithm show that the total number of unsatisfied demand for all types of commodities in the proposed approach was better than the other. Also, the number of survivors in the three-stage model is significantly higher than in the two-stage one. The better performance of the proposed algorithm is due to the fact that online data is continuously available and that decisions such as sending relief items and dispatching are made more effectively.}, } @article {pmid36572709, year = {2022}, author = {Khan, S and Khan, HU and Nazir, S}, title = {Systematic analysis of healthcare big data analytics for efficient care and disease diagnosing.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {22377}, pmid = {36572709}, issn = {2045-2322}, abstract = {Big data has revolutionized the world by providing tremendous opportunities for a variety of applications. It contains a gigantic amount of data, especially a plethora of data types that has been significantly useful in diverse research domains. In healthcare domain, the researchers use computational devices to extract enriched relevant information from this data and develop smart applications to solve real-life problems in a timely fashion. Electronic health (eHealth) and mobile health (mHealth) facilities alongwith the availability of new computational models have enabled the doctors and researchers to extract relevant information and visualize the healthcare big data in a new spectrum. Digital transformation of healthcare systems by using of information system, medical technology, handheld and smart wearable devices has posed many challenges to researchers and caretakers in the form of storage, minimizing treatment cost, and processing time (to extract enriched information, and minimize error rates to make optimum decisions). In this research work, the existing literature is analysed and assessed, to identify gaps that result in affecting the overall performance of the available healthcare applications. Also, it aims to suggest enhanced solutions to address these gaps. In this comprehensive systematic research work, the existing literature reported during 2011 to 2021, is thoroughly analysed for identifying the efforts made to facilitate the doctors and practitioners for diagnosing diseases using healthcare big data analytics. A set of rresearch questions are formulated to analyse the relevant articles for identifying the key features and optimum management solutions, and laterally use these analyses to achieve effective outcomes. The results of this systematic mapping conclude that despite of hard efforts made in the domains of healthcare big data analytics, the newer hybrid machine learning based systems and cloud computing-based models should be adapted to reduce treatment cost, simulation time and achieve improved quality of care. This systematic mapping will also result in enhancing the capabilities of doctors, practitioners, researchers, and policymakers to use this study as evidence for future research.}, } @article {pmid36570052, year = {2022}, author = {Zahid, MA and Akhtar, A and Shafiq, B and Shamail, S and Afzal, A and Vaidya, J}, title = {An Integrated Framework for Fault Resolution in Business Processes.}, journal = {IEEE International Conference on Web Services : proceedings. IEEE International Conference on Web Services}, volume = {2022}, number = {}, pages = {266-275}, doi = {10.1109/icws55610.2022.00048}, pmid = {36570052}, issn = {2770-8144}, abstract = {Cloud and edge-computing based platforms have enabled rapid development of distributed business process (BP) applications in a plug and play manner. However, these platforms do not provide the needed capabilities for identifying or repairing faults in BPs. Faults in BP may occur due to errors made by BP designers because of their lack of understanding of the underlying component services, misconfiguration of these services, or incorrect/incomplete BP workflow specifications. Such faults may not be discovered at design or development stage and may occur at runtime. In this paper, we present a unified framework for automated fault resolution in BPs. The proposed framework employs a novel and efficient fault resolution approach that extends the generate-and-validate program repair approach. In addition, we propose a hybrid approach that performs fault resolution by analyzing a faulty BP in isolation as well as by comparing with other BPs using similar services. This hybrid approach results in improved accuracy and broader coverage of fault types. We also perform an extensive experimental evaluation to compare the effectiveness of the proposed approach using a dataset of 208 faulty BPs.}, } @article {pmid36569183, year = {2022}, author = {Mawgoud, AA and Taha, MHN and Abu-Talleb, A and Kotb, A}, title = {A deep learning based steganography integration framework for ad-hoc cloud computing data security augmentation using the V-BOINC system.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {97}, pmid = {36569183}, issn = {2192-113X}, abstract = {In the early days of digital transformation, the automation, scalability, and availability of cloud computing made a big difference for business. Nonetheless, significant concerns have been raised regarding the security and privacy levels that cloud systems can provide, as enterprises have accelerated their cloud migration journeys in an effort to provide a remote working environment for their employees, primarily in light of the COVID-19 outbreak. The goal of this study is to come up with a way to improve steganography in ad hoc cloud systems by using deep learning. This research implementation is separated into two sections. In Phase 1, the "Ad-hoc Cloud System" idea and deployment plan were set up with the help of V-BOINC. In Phase 2, a modified form of steganography and deep learning were used to study the security of data transmission in ad-hoc cloud networks. In the majority of prior studies, attempts to employ deep learning models to augment or replace data-hiding systems did not achieve a high success rate. The implemented model inserts data images through colored images in the developed ad hoc cloud system. A systematic steganography model conceals from statistics lower message detection rates. Additionally, it may be necessary to incorporate small images beneath huge cover images. The implemented ad-hoc system outperformed Amazon AC2 in terms of performance, while the execution of the proposed deep steganography approach gave a high rate of evaluation for concealing both data and images when evaluated against several attacks in an ad-hoc cloud system environment.}, } @article {pmid36567676, year = {2022}, author = {Barot, V and Patel, DR}, title = {A physiological signal compression approach using optimized Spindle Convolutional Auto-encoder in mHealth applications.}, journal = {Biomedical signal processing and control}, volume = {73}, number = {}, pages = {103436}, pmid = {36567676}, issn = {1746-8094}, abstract = {BACKGROUND AND OBJECTIVES: The COVID-19 pandemic manifested the need of developing robust digital platforms for facilitating healthcare services such as consultancy, clinical therapies, real time remote monitoring, early diagnosis and future predictions. Innovations made using technologies such as Internet of Things (IoT), edge computing, cloud computing and artificial intelligence are helping address this crisis. The urge for remote monitoring, symptom analysis and early detection of diseases lead to tremendous increase in the deployment of wearable sensor devices. They facilitate seamless gathering of physiological data such as electrocardiogram (ECG) signals, respiration traces (RESP), galvanic skin response (GSR), pulse rate, body temperature, photoplethysmograms (PPG), oxygen saturation (SpO2) etc. For diagnosis and analysis purpose, the gathered data needs to be stored. Wearable devices operate on batteries and have a memory constraint. In mHealth application architectures, this gathered data is hence stored on cloud based servers. While transmitting data from wearable devices to cloud servers via edge devices, a lot of energy is consumed. This paper proposes a deep learning based compression model SCAElite that reduces the data volume, enabling energy efficient transmission.

RESULTS: Stress Recognition in Automobile Drivers dataset and MIT-BIH dataset from PhysioNet are used for validation of algorithm performance. The model achieves a compression ratio of up to 300 fold with reconstruction errors within 8% over the stress recognition dataset and 106.34-fold with reconstruction errors within 8% over the MIT-BIH dataset. The computational complexity of SCAElite is 51.65% less compared to state-of-the-art deep compressive model.

CONCLUSION: It is experimentally validated that SCAElite guarantees a high compression ratio with good quality restoration capabilities for physiological signal compression in mHealth applications. It has a compact architecture and is computationally more efficient compared to state-of-the-art deep compressive model.}, } @article {pmid36567694, year = {2021}, author = {Singh, M and Singh, BB and Singh, R and Upendra, B and Kaur, R and Gill, SS and Biswas, MS}, title = {Quantifying COVID-19 enforced global changes in atmospheric pollutants using cloud computing based remote sensing.}, journal = {Remote sensing applications : society and environment}, volume = {22}, number = {}, pages = {100489}, pmid = {36567694}, issn = {2352-9385}, abstract = {Global lockdowns in response to the COVID-19 pandemic have led to changes in the anthropogenic activities resulting in perceivable air quality improvements. Although several recent studies have analyzed these changes over different regions of the globe, these analyses have been constrained due to the usage of station based data which is mostly limited up to the metropolitan cities. Also the quantifiable changes have been reported only for the developed and developing regions leaving the poor economies (e.g. Africa) due to the shortage of in-situ data. Using a comprehensive set of high spatiotemporal resolution satellites and merged products of air pollutants, we analyze the air quality across the globe and quantify the improvement resulting from the suppressed anthropogenic activity during the lockdowns. In particular, we focus on megacities, capitals and cities with high standards of living to make the quantitative assessment. Our results offer valuable insights into the spatial distribution of changes in the air pollutants due to COVID-19 enforced lockdowns. Statistically significant reductions are observed over megacities with mean reduction by 19.74%, 7.38% and 49.9% in nitrogen dioxide (NO2), aerosol optical depth (AOD) and PM2.5 concentrations. Google Earth Engine empowered cloud computing based remote sensing is used and the results provide a testbed for climate sensitivity experiments and validation of chemistry-climate models. Additionally, Google Earth Engine based apps have been developed to visualize the changes in a real-time fashion.}, } @article {pmid36563043, year = {2022}, author = {Tuler de Oliveira, M and Amorim Reis, LH and Marquering, H and Zwinderman, AH and Delgado Olabarriaga, S}, title = {Perceptions of a Secure Cloud-Based Solution for Data Sharing During Acute Stroke Care: Qualitative Interview Study.}, journal = {JMIR formative research}, volume = {6}, number = {12}, pages = {e40061}, doi = {10.2196/40061}, pmid = {36563043}, issn = {2561-326X}, abstract = {BACKGROUND: Acute stroke care demands fast procedures performed through the collaboration of multiple professionals across multiple organizations. Cloud computing and the wide adoption of electronic medical records (EMRs) enable health care systems to improve data availability and facilitate sharing among professionals. However, designing a secure and privacy-preserving EMR cloud-based application is challenging because it must dynamically control the access to the patient's EMR according to the needs for data during treatment.

OBJECTIVE: We developed a prototype of a secure EMR cloud-based application. The application explores the security features offered by the eHealth cloud-based framework created by the Advanced Secure Cloud Encrypted Platform for Internationally Orchestrated Solutions in Health Care Horizon 2020 project. This study aimed to collect impressions, challenges, and improvements for the prototype when applied to the use case of secure data sharing among acute care teams during emergency treatment in the Netherlands.

METHODS: We conducted 14 semistructured interviews with medical professionals with 4 prominent roles in acute care: emergency call centers, ambulance services, emergency hospitals, and general practitioner clinics. We used in-depth interviews to capture their perspectives about the application's design and functions and its use in a simulated acute care event. We used thematic analysis of interview transcripts. Participants were recruited until the collected data reached thematic saturation.

RESULTS: The participants' perceptions and feedback are presented as 5 themes identified from the interviews: current challenges (theme 1), quality of the shared EMR data (theme 2), integrity and auditability of the EMR data (theme 3), usefulness and functionality of the application (theme 4), and trust and acceptance of the technology (theme 5). The results reinforced the current challenges in patient data sharing during acute stroke care. Moreover, from the user point of view, we expressed the challenges of adopting the Advanced Secure Cloud Encrypted Platform for Internationally Orchestrated Solutions in Health Care Acute Stroke Care application in a real scenario and provided suggestions for improving the proposed technology's acceptability.

CONCLUSIONS: This study has endorsed a system that supports data sharing among acute care professionals with efficiency, but without compromising the security and privacy of the patient. This explorative study identified several significant barriers to and improvement opportunities for the future acceptance and adoption of the proposed system. Moreover, the study results highlight that the desired digital transformation should consider integrating the already existing systems instead of requesting migration to a new centralized system.}, } @article {pmid36561335, year = {2022}, author = {Sethuraman, A}, title = {Teaching computational genomics and bioinformatics on a high performance computing cluster-a primer.}, journal = {Biology methods & protocols}, volume = {7}, number = {1}, pages = {bpac032}, pmid = {36561335}, issn = {2396-8923}, abstract = {The burgeoning field of genomics as applied to personalized medicine, epidemiology, conservation, agriculture, forensics, drug development, and other fields comes with large computational and bioinformatics costs, which are often inaccessible to student trainees in classroom settings at universities. However, with increased availability of resources such as NSF XSEDE, Google Cloud, Amazon AWS, and other high-performance computing (HPC) clouds and clusters for educational purposes, a growing community of academicians are working on teaching the utility of HPC resources in genomics and big data analyses. Here, I describe the successful implementation of a semester-long (16 week) upper division undergraduate/graduate level course in Computational Genomics and Bioinformatics taught at San Diego State University in Spring 2022. Students were trained in the theory, algorithms and hands-on applications of genomic data quality control, assembly, annotation, multiple sequence alignment, variant calling, phylogenomic analyses, population genomics, genome-wide association studies, and differential gene expression analyses using RNAseq data on their own dedicated 6-CPU NSF XSEDE Jetstream virtual machines. All lesson plans, activities, examinations, tutorials, code, lectures, and notes are publicly available at https://github.com/arunsethuraman/biomi609spring2022.}, } @article {pmid36560272, year = {2022}, author = {Uslu, S and Kaur, D and Durresi, M and Durresi, A}, title = {Trustability for Resilient Internet of Things Services on 5G Multiple Access Edge Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {24}, pages = {}, doi = {10.3390/s22249905}, pmid = {36560272}, issn = {1424-8220}, abstract = {Billions of Internet of Things (IoT) devices and sensors are expected to be supported by fifth-generation (5G) wireless cellular networks. This highly connected structure is predicted to attract different and unseen types of attacks on devices, sensors, and networks that require advanced mitigation strategies and the active monitoring of the system components. Therefore, a paradigm shift is needed, from traditional prevention and detection approaches toward resilience. This study proposes a trust-based defense framework to ensure resilient IoT services on 5G multi-access edge computing (MEC) systems. This defense framework is based on the trustability metric, which is an extension of the concept of reliability and measures how much a system can be trusted to keep a given level of performance under a specific successful attack vector. Furthermore, trustability is used as a trade-off with system cost to measure the net utility of the system. Systems using multiple sensors with different levels of redundancy were tested, and the framework was shown to measure the trustability of the entire system. Furthermore, different types of attacks were simulated on an edge cloud with multiple nodes, and the trustability was compared to the capabilities of dynamic node addition for the redundancy and removal of untrusted nodes. Finally, the defense framework measured the net utility of the service, comparing the two types of edge clouds with and without the node deactivation capability. Overall, the proposed defense framework based on trustability ensures a satisfactory level of resilience for IoT on 5G MEC systems, which serves as a trade-off with an accepted cost of redundant resources under various attacks.}, } @article {pmid36560073, year = {2022}, author = {El-Nahal, F and Xu, T and AlQahtani, D and Leeson, M}, title = {A Bidirectional Wavelength Division Multiplexed (WDM) Free Space Optical Communication (FSO) System for Deployment in Data Center Networks (DCNs).}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {24}, pages = {}, doi = {10.3390/s22249703}, pmid = {36560073}, issn = {1424-8220}, abstract = {Data centers are crucial to the growth of cloud computing. Next-generation data center networks (DCNs) will rely heavily on optical technology. Here, we have investigated a bidirectional wavelength-division-multiplexed (WDM) free space optical communication (FSO) system for deployment in optical wireless DCNs. The system was evaluated for symmetric 10 Gbps 16-quadrature amplitude modulation (16-QAM) intensity-modulated orthogonal frequency-division multiplexing (OFDM) downstream signals and 10 Gbps on-off keying (OOK) upstream signals, respectively. The transmission of optical signals over an FSO link is demonstrated using a gamma-gamma channel model. According to the bit error rate (BER) results obtained for each WDM signal, the bidirectional WDM-FSO transmission could achieve 320 Gbps over 1000 m free space transmission length. The results show that the proposed FSO topology offers an excellent alternative to fiber-based optical interconnects in DCNs, allowing for high data rate bidirectional transmission.}, } @article {pmid36555731, year = {2022}, author = {Puch-Giner, I and Molina, A and Municoy, M and Pérez, C and Guallar, V}, title = {Recent PELE Developments and Applications in Drug Discovery Campaigns.}, journal = {International journal of molecular sciences}, volume = {23}, number = {24}, pages = {}, doi = {10.3390/ijms232416090}, pmid = {36555731}, issn = {1422-0067}, abstract = {Computer simulation techniques are gaining a central role in molecular pharmacology. Due to several factors, including the significant improvements of traditional molecular modelling, the irruption of machine learning methods, the massive data generation, or the unlimited computational resources through cloud computing, the future of pharmacology seems to go hand in hand with in silico predictions. In this review, we summarize our recent efforts in such a direction, centered on the unconventional Monte Carlo PELE software and on its coupling with machine learning techniques. We also provide new data on combining two recent new techniques, aquaPELE capable of exhaustive water sampling and fragPELE, for fragment growing.}, } @article {pmid36555493, year = {2022}, author = {Nelson, TM and Ghosh, S and Postler, TS}, title = {L-RAPiT: A Cloud-Based Computing Pipeline for the Analysis of Long-Read RNA Sequencing Data.}, journal = {International journal of molecular sciences}, volume = {23}, number = {24}, pages = {}, doi = {10.3390/ijms232415851}, pmid = {36555493}, issn = {1422-0067}, abstract = {Long-read sequencing (LRS) has been adopted to meet a wide variety of research needs, ranging from the construction of novel transcriptome annotations to the rapid identification of emerging virus variants. Amongst other advantages, LRS preserves more information about RNA at the transcript level than conventional high-throughput sequencing, including far more accurate and quantitative records of splicing patterns. New studies with LRS datasets are being published at an exponential rate, generating a vast reservoir of information that can be leveraged to address a host of different research questions. However, mining such publicly available data in a tailored fashion is currently not easy, as the available software tools typically require familiarity with the command-line interface, which constitutes a significant obstacle to many researchers. Additionally, different research groups utilize different software packages to perform LRS analysis, which often prevents a direct comparison of published results across different studies. To address these challenges, we have developed the Long-Read Analysis Pipeline for Transcriptomics (L-RAPiT), a user-friendly, free pipeline requiring no dedicated computational resources or bioinformatics expertise. L-RAPiT can be implemented directly through Google Colaboratory, a system based on the open-source Jupyter notebook environment, and allows for the direct analysis of transcriptomic reads from Oxford Nanopore and PacBio LRS machines. This new pipeline enables the rapid, convenient, and standardized analysis of publicly available or newly generated LRS datasets.}, } @article {pmid36554175, year = {2022}, author = {Liu, C and Jiao, J and Li, W and Wang, J and Zhang, J}, title = {Tr-Predictior: An Ensemble Transfer Learning Model for Small-Sample Cloud Workload Prediction.}, journal = {Entropy (Basel, Switzerland)}, volume = {24}, number = {12}, pages = {}, doi = {10.3390/e24121770}, pmid = {36554175}, issn = {1099-4300}, abstract = {Accurate workload prediction plays a key role in intelligent scheduling decisions on cloud platforms. There are massive amounts of short-workload sequences in the cloud platform, and the small amount of data and the presence of outliers make accurate workload sequence prediction a challenge. For the above issues, this paper proposes an ensemble learning method based on sample weight transfer and long short-term memory (LSTM), termed as Tr-Predictor. Specifically, a selection method of similar sequences combining time warp edit distance (TWED) and transfer entropy (TE) is proposed to select a source domain dataset with higher similarity for the target workload sequence. Then, we upgrade the basic learner of the ensemble model two-stage TrAdaBoost.R2 to LSTM in the deep model and enhance the ability of the ensemble model to extract sequence features. To optimize the weight adjustment strategy, we adopt a two-stage weight adjustment strategy and select the best weight for the learner according to the sample error and model error. Finally, the above process determines the parameters of the target model and uses the target model to predict the short-task sequences. In the experimental validation, we arbitrarily select nine sets of short-workload data from the Google dataset and three sets of short-workload data from the Alibaba cluster to verify the prediction effectiveness of the proposed algorithm. The experimental results show that compared with the commonly used cloud workload prediction methods Tr-Predictor has higher prediction accuracy on the small-sample workload. The prediction indicators of the ablation experiments show the performance gain of each part in the proposed method.}, } @article {pmid36550311, year = {2022}, author = {Pietris, J and Bacchi, S and Tan, Y and Kovoor, J and Gupta, A and Chan, W}, title = {Safety always: the challenges of cloud computing in medical practice and ophthalmology.}, journal = {Eye (London, England)}, volume = {}, number = {}, pages = {}, pmid = {36550311}, issn = {1476-5454}, } @article {pmid36547491, year = {2022}, author = {Martin, J and Cantero, D and González, M and Cabrera, A and Larrañaga, M and Maltezos, E and Lioupis, P and Kosyvas, D and Karagiannidis, L and Ouzounoglou, E and Amditis, A}, title = {Embedded Vision Intelligence for the Safety of Smart Cities.}, journal = {Journal of imaging}, volume = {8}, number = {12}, pages = {}, doi = {10.3390/jimaging8120326}, pmid = {36547491}, issn = {2313-433X}, abstract = {Advances in Artificial intelligence (AI) and embedded systems have resulted on a recent increase in use of image processing applications for smart cities' safety. This enables a cost-adequate scale of automated video surveillance, increasing the data available and releasing human intervention. At the same time, although deep learning is a very intensive task in terms of computing resources, hardware and software improvements have emerged, allowing embedded systems to implement sophisticated machine learning algorithms at the edge. Additionally, new lightweight open-source middleware for constrained resource devices, such as EdgeX Foundry, have appeared to facilitate the collection and processing of data at sensor level, with communication capabilities to exchange data with a cloud enterprise application. The objective of this work is to show and describe the development of two Edge Smart Camera Systems for safety of Smart cities within S4AllCities H2020 project. Hence, the work presents hardware and software modules developed within the project, including a custom hardware platform specifically developed for the deployment of deep learning models based on the I.MX8 Plus from NXP, which considerably reduces processing and inference times; a custom Video Analytics Edge Computing (VAEC) system deployed on a commercial NVIDIA Jetson TX2 platform, which provides high level results on person detection processes; and an edge computing framework for the management of those two edge devices, namely Distributed Edge Computing framework, DECIoT. To verify the utility and functionality of the systems, extended experiments were performed. The results highlight their potential to provide enhanced situational awareness and demonstrate the suitability for edge machine vision applications for safety in smart cities.}, } @article {pmid36547481, year = {2022}, author = {Saad El Imanni, H and El Harti, A and Hssaisoune, M and Velastegui-Montoya, A and Elbouzidi, A and Addi, M and El Iysaouy, L and El Hachimi, J}, title = {Rapid and Automated Approach for Early Crop Mapping Using Sentinel-1 and Sentinel-2 on Google Earth Engine; A Case of a Highly Heterogeneous and Fragmented Agricultural Region.}, journal = {Journal of imaging}, volume = {8}, number = {12}, pages = {}, doi = {10.3390/jimaging8120316}, pmid = {36547481}, issn = {2313-433X}, abstract = {Accurate and rapid crop type mapping is critical for agricultural sustainability. The growing trend of cloud-based geospatial platforms provides rapid processing tools and cloud storage for remote sensing data. In particular, a variety of remote sensing applications have made use of publicly accessible data from the Sentinel missions of the European Space Agency (ESA). However, few studies have employed these data to evaluate the effectiveness of Sentinel-1, and Sentinel-2 spectral bands and Machine Learning (ML) techniques in challenging highly heterogeneous and fragmented agricultural landscapes using the Google Earth Engine (GEE) cloud computing platform. This work aims to map, accurately and early, the crop types in a highly heterogeneous and fragmented agricultural region of the Tadla Irrigated Perimeter (TIP) as a case study using the high spatiotemporal resolution of Sentinel-1, Sentinel-2, and a Random Forest (RF) classifier implemented on GEE. More specifically, five experiments were performed to assess the optical band reflectance values, vegetation indices, and SAR backscattering coefficients on the accuracy of crop classification. Besides, two scenarios were used to assess the monthly temporal windows on classification accuracy. The findings of this study show that the fusion of Sentinel-1 and Sentinel-2 data can accurately produce the early crop mapping of the studied area with an Overall Accuracy (OA) reaching 95.02%. The scenarios prove that the monthly time series perform better in terms of classification accuracy than single monthly windows images. Red-edge and shortwave infrared bands can improve the accuracy of crop classification by 1.72% when compared to only using traditional bands (i.e., visible and near-infrared bands). The inclusion of two common vegetation indices (The Normalized Vegetation Index (NDVI), the Enhanced Vegetation Index (EVI)) and Sentinel-1 backscattering coefficients to the crop classification enhanced the overall classification accuracy by 0.02% and 2.94%, respectively, compared to using the Sentinel-2 reflectance bands alone. The monthly windows analysis indicated that the improvement in the accuracy of crop classification is the greatest when the March images are accessible, with an OA higher than 80%.}, } @article {pmid36544470, year = {2023}, author = {Bang, I and Khanh Nong, L and Young Park, J and Thi Le, H and Mok Lee, S and Kim, D}, title = {ChEAP: ChIP-exo analysis pipeline and the investigation of Escherichia coli RpoN protein-DNA interactions.}, journal = {Computational and structural biotechnology journal}, volume = {21}, number = {}, pages = {99-104}, pmid = {36544470}, issn = {2001-0370}, abstract = {Genome-scale studies of the bacterial regulatory network have been leveraged by declining sequencing cost and advances in ChIP (chromatin immunoprecipitation) methods. Of which, ChIP-exo has proven competent with its near-single base-pair resolution. While several algorithms and programs have been developed for different analytical steps in ChIP-exo data processing, there is a lack of effort in incorporating them into a convenient bioinformatics pipeline that is intuitive and publicly available. In this paper, we developed ChIP-exo Analysis Pipeline (ChEAP) that executes the one-step process, starting from trimming and aligning raw sequencing reads to visualization of ChIP-exo results. The pipeline was implemented on the interactive web-based Python development environment - Jupyter Notebook, which is compatible with the Google Colab cloud platform to facilitate the sharing of codes and collaboration among researchers. Additionally, users could exploit the free GPU and CPU resources allocated by Colab to carry out computing tasks regardless of the performance of their local machines. The utility of ChEAP was demonstrated with the ChIP-exo datasets of RpoN sigma factor in E. coli K-12 MG1655. To analyze two raw data files, ChEAP runtime was 2 min and 25 s. Subsequent analyses identified 113 RpoN binding sites showing a conserved RpoN binding pattern in the motif search. ChEAP application in ChIP-exo data analysis is extensive and flexible for the parallel processing of data from various organisms.}, } @article {pmid36541007, year = {2023}, author = {Holko, M and Weber, N and Lunt, C and Brenner, SE}, title = {Biomedical research in the Cloud: considerations for researchers and organizations moving to (or adding) cloud computing resources.}, journal = {Pacific Symposium on Biocomputing. Pacific Symposium on Biocomputing}, volume = {28}, number = {}, pages = {536-540}, pmid = {36541007}, issn = {2335-6936}, abstract = {As biomedical research data grow, researchers need reliable and scalable solutions for storage and compute. There is also a need to build systems that encourage and support collaboration and data sharing, to result in greater reproducibility. This has led many researchers and organizations to use cloud computing [1]. The cloud not only enables scalable, on-demand resources for storage and compute, but also collaboration and continuity during virtual work, and can provide superior security and compliance features. Moving to or adding cloud resources, however, is not trivial or without cost, and may not be the best choice in every scenario. The goal of this workshop is to explore the benefits of using the cloud in biomedical and computational research, and considerations (pros and cons) for a range of scenarios including individual researchers, collaborative research teams, consortia research programs, and large biomedical research agencies / organizations.}, } @article {pmid36537483, year = {2022}, author = {Woods, DL and Hall, K and Williams, G and Baldo, J and Johnson, DK and Chok, JM and Sucich, G and Pebler, P and Blank, M and Geraci, K and Herron, T}, title = {Consensus automatic speech recognition (CASR) in the California Cognitive Assessment Battery (CCAB).}, journal = {Alzheimer's & dementia : the journal of the Alzheimer's Association}, volume = {18 Suppl 2}, number = {}, pages = {e067887}, doi = {10.1002/alz.067887}, pmid = {36537483}, issn = {1552-5279}, abstract = {BACKGROUND: Recent reports have investigated the use of automatic speech recognition (ASR) to analyze and score verbal responses in cognitive tests. ASR scoring is objective, permits the efficient computerized administration of verbal tests, and generates timestamps that enable the detailed temporal analysis of responses. However, ASR transcription accuracy varies by engine, task, and participant, and ASR can incorrectly score responses from participants with atypical speech patterns. Here we describe the speech-transcription pipeline of the California Cognitive Assessment Battery (CCAB), which incorporates consensus ASR (CASR) to produce more accurate transcripts than possible with any single ASR engine. We also developed a Transcript Review Tool (TRT) which facilitates the manual correction of mis-transcribed words in problem subjects.

METHOD: Figure 1 shows the CCAB speech transcription pipeline. Realtime ASR transcriptions are obtained along with the transcriptions of the digital recordings of responses using six cloud-based ASR engines (e.g., Google, etc.). Individual transcripts are then combined to produce a "consensus" transcript, and a transcription confidence measure based primarily on the agreement between ASR engines (Figure 2). If needed, "consensus" transcripts can be manually corrected using the Transcript Review Tool which enables the review of all words or just those words below a predefined CASR confidence threshold (Figure 3).

RESULT: ASR transcriptions were obtained from 442 healthy adults (mean age = 65.1 ±14.4) who each underwent three days of cognitive testing that included 25 verbal tests. In all, approximately 276 hours of speech were transcribed. Preliminary analyses show that CASR transcription accuracy surpassed 99% for tests with limited response sets (e.g., digit span, verbal list learning, face-name binding, etc.) and exceeded 95% for discursive speech tests (e.g., picture description and logical memory).

CONCLUSION: CASR transcription is more accurate than that of any single ASR engine. When combined with the TRT, "consensus" ASR can produce error-free, timestamped transcripts that enable the detailed analysis of verbal responses from older individuals at risk of cognitive decline.}, } @article {pmid36537002, year = {2022}, author = {Crowley, MA and Stockdale, CA and Johnston, JM and Wulder, MA and Liu, T and McCarty, JL and Rieb, JT and Cardille, JA and White, JC}, title = {Towards a whole-system framework for wildfire monitoring using Earth observations.}, journal = {Global change biology}, volume = {}, number = {}, pages = {}, doi = {10.1111/gcb.16567}, pmid = {36537002}, issn = {1365-2486}, abstract = {Fire seasons have become increasingly variable and extreme due to changing climatological, ecological, and social conditions. Earth observation data are critical for monitoring fires and their impacts. Herein, we present a whole-systems framework for identifying and synthesizing fire monitoring objectives and data needs throughout the life cycle of a fire event. The four stages of fire monitoring informed using Earth observation data include: 1) pre-fire vegetation inventories, 2) active-fire monitoring, 3) post-fire assessment, and 4) multi-scale synthesis. We identify the challenges and opportunities associated with current approaches to fire monitoring, highlighting four case studies from North American boreal, montane, and grassland ecosystems. While the case studies are localized to these ecosystems and regional contexts, they provide insights for others experiencing similar monitoring challenges worldwide. The field of remote sensing is experiencing a rapid proliferation of new data sources, providing observations that can inform all aspects of our fire monitoring framework; however, significant challenges for meeting fire monitoring objectives remain. We identify future opportunities for data sharing and rapid co-development of information products using cloud computing that benefit from open-access Earth observation and other geospatial data layers.}, } @article {pmid36536803, year = {2022}, author = {Bao, G and Guo, P}, title = {Federated learning in cloud-edge collaborative architecture: key technologies, applications and challenges.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {94}, pmid = {36536803}, issn = {2192-113X}, abstract = {In recent years, with the rapid growth of edge data, the novel cloud-edge collaborative architecture has been proposed to compensate for the lack of data processing power of traditional cloud computing. On the other hand, on account of the increasing demand of the public for data privacy, federated learning has been proposed to compensate for the lack of security of traditional centralized machine learning. Deploying federated learning in cloud-edge collaborative architecture is widely considered to be a promising cyber infrastructure in the future. Although each cloud-edge collaboration and federated learning is hot research topic respectively at present, the discussion of deploying federated learning in cloud-edge collaborative architecture is still in its infancy and little research has been conducted. This article aims to fill the gap by providing a detailed description of the critical technologies, challenges, and applications of deploying federated learning in cloud-edge collaborative architecture, and providing guidance on future research directions.}, } @article {pmid36534206, year = {2022}, author = {Ruifeng, L and Kai, Y and Xing, L and Xiaoli, L and Xitao, Z and Xiaocheng, G and Juan, F and Shixin, C}, title = {Extraction and spatiotemporal changes of open-pit mines during 1985-2020 using Google Earth Engine: A case study of Qingzhou City, Shandong Province, China.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {1}, pages = {209}, pmid = {36534206}, issn = {1573-2959}, abstract = {The global use of mineral resources has increased exponentially for decades and will continue to grow for the foreseeable future, resulting in increasingly negative impacts on the surrounding environment. However, to date, there are a lack of historical and current spatial extent datasets with high accuracy for mining areas in many parts of the world, which has hindered a more comprehensive understanding of the environmental impacts of mining. Using the Google Earth Engine cloud platform and the Landsat normalized difference vegetation index (NDVI) datasets, the spatial extent data of open-pit mining areas for eight years (1985, 1990, 1995, 2000, 2005, 2010, 2015, and 2020) was extracted by the Otsu algorithm. The limestone mining areas in Qingzhou, Shandong Province, China, was selected as a case study. The annual maximum NDVI was first derived from the Landsat NDVI datasets, and then the Otsu algorithm was used to segment the annual maximum NDVI images to obtain the extent of the mining areas. Finally, the spatiotemporal characteristics of the mining areas in the study region were analyzed in reference to previous survey data. The results showed that the mining areas were primarily located in Shaozhuang Town, Wangfu Street and the northern part of Miaozi Town, and the proportion of mining areas within these three administrative areas has increased annually from 88% in 1985 to more than 98% in 2010. Moreover, the open-pit mining areas in Qingzhou gradually expanded from a scattered, point-like distribution to a large, contiguous distribution. From 1985 to 2020, the open-pit mining area expanded to more than 10 times its original size at a rate of 0.5 km[2]/year. In 2015, this area reached its maximum size of 19.7 km[2] and slightly decreased in 2020. Furthermore, the expansion of the mining areas in Qingzhou went through three stages: a slow growth period before 1995, a rapid expansion period from 1995 to 2005, and a shutdown and remediation period after 2005. A quantitative accuracy assessment was performed by calculating the Intersection over Union (IoU) of the extraction results and the visual interpretation results from Gaofen-2 images with 1-m spatial resolution. The IoU reached 72%. The results showed that it was feasible to threshold the Landsat annual maximum NDVI data by the Otsu algorithm to extract the annual spatial extent of the open-pit mining areas. Our method will be easily transferable to other regions worldwide, enabling the monitoring of mine environments.}, } @article {pmid36530862, year = {2022}, author = {Tsai, CW and Lee, LY and Cheng, YP and Lin, CH and Hung, ML and Lin, JW}, title = {Integrating online meta-cognitive learning strategy and team regulation to develop students' programming skills, academic motivation, and refusal self-efficacy of Internet use in a cloud classroom.}, journal = {Universal access in the information society}, volume = {}, number = {}, pages = {1-16}, pmid = {36530862}, issn = {1615-5297}, abstract = {With the development of technology and demand for online courses, there have been considerable quantities of online, blended, or flipped courses designed and provided. However, in the technology-enhanced learning environments, which are also full of social networking websites, shopping websites, and free online games, it is challenging to focus students' attention and help them achieve satisfactory learning performance. In addition, the instruction of programming courses constantly challenges both teachers and students, particularly in online learning environments. To overcome and solve these problems and to facilitate students' learning, the researchers in this study integrated two teaching approaches, using meta-cognitive learning strategy (MCLS) and team regulation (TR), to develop students' regular learning habits and further contribute to their programming skills, academic motivation, and refusal self-efficacy of Internet use, in a cloud classroom. In this research, a quasi-experiment was conducted to investigate the effects of MCLS and TR adopting the experimental design of a 2 (MCLS vs. non-MCLS) × 2 (TR vs. non-TR) factorial pre-test/post-test. In this research, the participants consisted of four classes of university students from non-information or computer departments enrolled in programming design, a required course. The experimental groups comprised three of the classes, labelled as G1, G2, and G3. G1 concurrently received both the online MCLS and TR intervention, while G2 only received the online MCLS intervention, and G3 only received the online TR intervention. Serving as the control group, the fourth class (G4) received traditional teaching. This study investigated the effects of MCLS, TR, and their combination, on improving students' programming skills, academic motivation, and refusal self-efficacy of Internet use in an online computing course. According to the results, students who received online TR significantly enhanced their programming design skills and their refusal self-efficacy of Internet use a cloud classroom. However, the expected effects of MCLS on developing students' programming skills, academic motivation, and refusal self-efficacy of Internet use were not found in this study. The teaching strategy of integrating MCLS and TR in an online programming course in this study can serve as a reference for educators when conducting online, blended, or flipped courses during the COVID-19 pandemic.}, } @article {pmid36523099, year = {2022}, author = {Wang, S and Chen, B and Liang, R and Liu, L and Chen, H and Gao, M and Wu, J and Ju, W and Ho, PH}, title = {Energy-efficient workload allocation in edge-cloud fiber-wireless networks.}, journal = {Optics express}, volume = {30}, number = {24}, pages = {44186-44200}, doi = {10.1364/OE.472978}, pmid = {36523099}, issn = {1094-4087}, abstract = {In order to realize the green computing of the edge-cloud fiber-wireless networks, the cooperation between the edge servers and the cloud servers is particularly important to reduce the network energy consumption. Therefore, this paper proposes an energy-efficient workload allocation (EEWA) scheme to improve the energy efficiency by using the architecture of edge-cloud fiber-wireless networks. The feasibility of the proposed EEWA scheme was verified on our SDN testbed. We also do the simulation to obtain the optimal results for a given set of task requests. Simulation results show that our proposed EEWA scheme greatly reduces the blocking probability and the average energy consumption of task requests in edge-cloud fiber-wireless networks.}, } @article {pmid36517473, year = {2022}, author = {Ogasawara, O}, title = {Building cloud computing environments for genome analysis in Japan.}, journal = {Human genome variation}, volume = {9}, number = {1}, pages = {46}, pmid = {36517473}, issn = {2054-345X}, abstract = {This review article describes the current status of data archiving and computational infrastructure in the field of genomic medicine, focusing primarily on the situation in Japan. I begin by introducing the status of supercomputer operations in Japan, where a high-performance computing infrastructure (HPCI) is operated to meet the diverse computational needs of science in general. Since this HPCI consists of supercomputers of various architectures located across the nation connected via a high-speed network, including supercomputers specialized in genome science, the status of its response to the explosive increase in genomic data, including the International Nucleotide Sequence Database Collaboration (INSDC) data archive, is explored. Separately, since it is clear that the use of commercial cloud computing environments needs to be promoted, both in light of the rapid increase in computing demands and to support international data sharing and international data analysis projects, I explain how the Japanese government has established a series of guidelines for the use of cloud computing based on its cybersecurity strategy and has begun to build a government cloud for government agencies. I will also carefully consider several other issues of user concern. Finally, I will show how Japan's major cloud computing infrastructure is currently evolving toward a multicloud and hybrid cloud configuration.}, } @article {pmid36516515, year = {2022}, author = {Zhou, Y and Luo, B and Sang, J and Li, C and Zhu, M and Zhu, Z and Dai, J and Wang, J and Chen, H and Zhai, S and Lu, L and Liu, H and Yu, G and Ye, J and Zhang, Z and Huan, J}, title = {A cloud-based consultation and collaboration system for radiotherapy: Remote decision support services for community radiotherapy centers.}, journal = {Computer methods and programs in biomedicine}, volume = {229}, number = {}, pages = {107270}, doi = {10.1016/j.cmpb.2022.107270}, pmid = {36516515}, issn = {1872-7565}, abstract = {PURPOSE: This study aimed to establish a cloud-based radiotherapy consultation and collaboration system, then investigated the practicability of remote decision support for community radiotherapy centers using the system.

METHODS AND MATERIALS: A cloud-based consultation and collaboration system for radiotherapy, OncoEvidance®, was developed to provide remote services of LINAC modeling, simulation CT data import/export, target volume and organ-at-risk delineation, prescription, and treatment planning. The system was deployed on a hybrid cloud. A federate of public nodes, each corresponding to a medical institution, are managed by a central node where a group of consultants have registered. Users can access the system through network using computing devices. The system has been tested at three community radiotherapy centers. One accelerator was modeled. 12 consultants participated the remote radiotherapy decision support and 77 radiation treatment plans had been evaluated remotely.

RESULTS: All the passing rates of per-beam dose verification are > 94% and all the passing rates of composite beam dose verification are > 99%. The average downloading time for one set of simulation CT data for one patient from Internet was within 1 min under the cloud download bandwidth of 8 Mbps and local network bandwidth of 100 Mbps. The average response time for one consultant to contour target volumes and make prescription was about 24 h. And that for one consultant to design and optimize a IMRT treatment plan was about 36 h. 100% of the remote plans passed the dosimetric criteria and could be imported into the local TPS for further verification.

CONCLUSION: The cloud-based consultation and collaboration system saved the travel time for consultants and provided high quality radiotherapy to patients in community centers. The under-staffed community radiotherapy centers could benefit from the remote system with lower cost and better treatment quality control.}, } @article {pmid36515465, year = {2022}, author = {Wiewiórka, M and Szmurło, A and Stankiewicz, P and Gambin, T}, title = {Cloud-native distributed genomic pileup operations.}, journal = {Bioinformatics (Oxford, England)}, volume = {}, number = {}, pages = {}, doi = {10.1093/bioinformatics/btac804}, pmid = {36515465}, issn = {1367-4811}, abstract = {MOTIVATION: Pileup analysis is a building block of many bioinformatics pipelines, including variant calling and genotyping. This step tends to become a bottleneck of the entire assay since the straightforward pileup implementations involve processing of all base calls from all alignments sequentially. On the other hand, a distributed version of the algorithm faces the intrinsic challenge of splitting reads-oriented file formats into self-contained partitions to avoid costly data exchange between computational nodes.

RESULTS: Here, we present a scalable, distributed, and efficient implementation of a pileup algorithm that is suitable for deploying in cloud computing environments. In particular, we implemented: (i) our custom data-partitioning algorithm optimized to work with the alignment reads, (ii) a novel and unique approach to process alignment events from sequencing reads using the MD tags, (iii) the source code micro-optimizations for recurrent operations, and (iv) a modular structure of the algorithm. We have proven that our novel approach consistently and significantly outperforms other state-of-the-art distributed tools in terms of execution time (up to 6.5x faster) and memory usage (up to 2x less), resulting in a substantial cloud cost reduction. SeQuiLa is a cloud-native solution that can be easily deployed using any managed Kubernetes and Hadoop services available in public clouds, like Microsoft Azure Cloud, Google Cloud Platform or Amazon Web Services. Together with the already implemented distributed range joins and coverage calculations, our package provides end-users with an unified SQL interface for convenient analyzing of population-scale genomic data in an interactive way.

AVAILABILITY: https://biodatageeks.github.io/sequila/.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid36512073, year = {2022}, author = {Paul, A and K S, V and Sood, A and Bhaumik, S and Singh, KA and Sethupathi, S and Chanda, A}, title = {Suspended Particulate Matter Analysis of Pre and During Covid Lockdown Using Google Earth Engine Cloud Computing: A Case Study of Ukai Reservoir.}, journal = {Bulletin of environmental contamination and toxicology}, volume = {110}, number = {1}, pages = {7}, pmid = {36512073}, issn = {1432-0800}, mesh = {Humans ; *Particulate Matter/analysis ; Cloud Computing ; Search Engine ; *COVID-19 ; Communicable Disease Control ; }, abstract = {Presence of suspended particulate matter (SPM) in a waterbody or a river can be caused by multiple parameters such as other pollutants by the discharge of poorly maintained sewage, siltation, sedimentation, flood and even bacteria. In this study, remote sensing techniques were used to understand the effects of pandemic-induced lockdown on the SPM concentration in the lower Tapi reservoir or Ukai reservoir. The estimation was done using Landsat-8 OLI (Operational Land Imager) having radiometric resolution (12-bit) and a spatial resolution of 30 m. The Google Earth Engine (GEE) cloud computing platform was used in this study to generate the products. The GEE is a semi-automated workflow system using a robust approach designed for scientific analysis and visualization of geospatial datasets. An algorithm was deployed, and a time-series (2013-2020) analysis was done for the study area. It was found that the average mean value of SPM in Tapi River during 2020 is lowest than the last seven years at the same time.}, } @article {pmid36508783, year = {2022}, author = {Xu, X and Li, L and Zhou, H and Fan, M and Wang, H and Wang, L and Hu, Q and Cai, Q and Zhu, Y and Ji, S}, title = {MRTCM: A comprehensive dataset for probabilistic risk assessment of metals and metalloids in traditional Chinese medicine.}, journal = {Ecotoxicology and environmental safety}, volume = {249}, number = {}, pages = {114395}, doi = {10.1016/j.ecoenv.2022.114395}, pmid = {36508783}, issn = {1090-2414}, abstract = {Traditional Chinese medicine (TCM) is still considered a global complementary or alternative medical system, but exogenous hazardous contaminants remain in TCM even after decocting. Besides, it is time-consuming to conduct a risk assessment of trace elements in TCMs with a non-automatic approach due to the wide variety of TCMs. Here, we present MRTCM, a cloud-computing infrastructure for automating the probabilistic risk assessment of metals and metalloids in TCM. MRTCM includes a consumption database and a pollutant database involving forty million rows of consumption data and fourteen types of TCM potentially toxic elements concentrations. The algorithm of probabilistic risk assessment was also packaged in MRTCM to assess the risks of eight elements with Monte Carlo simulation. The results demonstrated that 96.64% and 99.46% had no non-carcinogenic risk (hazard indices (HI) were < 1.0) for animal and herbal medicines consumers, respectively. After twenty years of exposure, less than 1% of the total carcinogenic risk (CRt) was > 10[-4] for TCM consumers, indicating that they are at potential risk for carcinogenicity. Sensitivity analysis revealed that annual consumption and concentration were the main variables affecting the assessment results. Ultimately, a priority management list of TCMs was also generated, indicating that more attention should be paid to the non-carcinogenic risks of As, Mn, and Hg and the carcinogenic risks of As and Cr in Pheretima and Cr in Arcae Conch. In general, MRTCM could significantly enhance the efficiency of risk assessment in TCM and provide reasonable guidance for policymakers to optimize risk management.}, } @article {pmid36506615, year = {2022}, author = {Zahid, MA and Shafiq, B and Shamail, S and Afzal, A and Vaidya, J}, title = {BP-DEBUG: A Fault Debugging and Resolution Tool for Business Processes.}, journal = {Proceedings. International Conference on Distributed Computing Systems}, volume = {2022}, number = {}, pages = {1306-1309}, doi = {10.1109/icdcs54860.2022.00143}, pmid = {36506615}, issn = {2575-8411}, abstract = {Cloud computing and Internet-ware software paradigm have enabled rapid development of distributed business process (BP) applications. Several tools are available to facilitate automated/ semi-automated development and deployment of such distributed BPs by orchestrating relevant service components in a plug-and-play fashion. However, the BPs developed using such tools are not guaranteed to be fault-free. In this demonstration, we present a tool called BP-DEBUG for debugging and automated repair of faulty BPs. BP-DEBUG implements our Collaborative Fault Resolution (CFR) approach that utilizes the knowledge of existing BPs with a similar set of web services fault detection and resolution in a given user BP. Essentially, CFR attempts to determine any semantic and structural differences between a faulty BP and related BPs and computes a minimum set of transformations which can be used to repair the faulty BP. Demo url: https://youtu.be/mf49oSekLOA.}, } @article {pmid36506593, year = {2022}, author = {Silversmith, W and Zlateski, A and Bae, JA and Tartavull, I and Kemnitz, N and Wu, J and Seung, HS}, title = {Igneous: Distributed dense 3D segmentation meshing, neuron skeletonization, and hierarchical downsampling.}, journal = {Frontiers in neural circuits}, volume = {16}, number = {}, pages = {977700}, pmid = {36506593}, issn = {1662-5110}, abstract = {Three-dimensional electron microscopy images of brain tissue and their dense segmentations are now petascale and growing. These volumes require the mass production of dense segmentation-derived neuron skeletons, multi-resolution meshes, image hierarchies (for both modalities) for visualization and analysis, and tools to manage the large amount of data. However, open tools for large-scale meshing, skeletonization, and data management have been missing. Igneous is a Python-based distributed computing framework that enables economical meshing, skeletonization, image hierarchy creation, and data management using cloud or cluster computing that has been proven to scale horizontally. We sketch Igneous's computing framework, show how to use it, and characterize its performance and data storage.}, } @article {pmid36504549, year = {2021}, author = {Hanke, M and Pestilli, F and Wagner, AS and Markiewicz, CJ and Poline, JB and Halchenko, YO}, title = {In defense of decentralized research data management.}, journal = {Neuroforum}, volume = {27}, number = {1}, pages = {17-25}, pmid = {36504549}, issn = {2363-7013}, abstract = {Decentralized research data management (dRDM) systems handle digital research objects across participating nodes without critically relying on central services. We present four perspectives in defense of dRDM, illustrating that, in contrast to centralized or federated research data management solutions, a dRDM system based on heterogeneous but interoperable components can offer a sustainable, resilient, inclusive, and adaptive infrastructure for scientific stakeholders: An individual scientist or laboratory, a research institute, a domain data archive or cloud computing platform, and a collaborative multisite consortium. All perspectives share the use of a common, self-contained, portable data structure as an abstraction from current technology and service choices. In conjunction, the four perspectives review how varying requirements of independent scientific stakeholders can be addressed by a scalable, uniform dRDM solution and present a working system as an exemplary implementation.}, } @article {pmid36502208, year = {2022}, author = {Buriboev, A and Muminov, A}, title = {Computer State Evaluation Using Adaptive Neuro-Fuzzy Inference Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, doi = {10.3390/s22239502}, pmid = {36502208}, issn = {1424-8220}, abstract = {Several crucial system design and deployment decisions, including workload management, sizing, capacity planning, and dynamic rule generation in dynamic systems such as computers, depend on predictive analysis of resource consumption. An analysis of the computer components' utilizations and their workloads is the best way to assess the performance of the computer's state. Especially, analyzing the particular or whole influence of components on another component gives more reliable information about the state of computer systems. There are many evaluation techniques proposed by researchers. The bulk of them have complicated metrics and parameters such as utilization, time, throughput, latency, delay, speed, frequency, and the percentage which are difficult to understand and use in the assessing process. According to these, we proposed a simplified evaluation method using components' utilization in percentage scale and its linguistic values. The use of the adaptive neuro-fuzzy inference system (ANFIS) model and fuzzy set theory offers fantastic prospects to realize use impact analyses. The purpose of the study is to examine the usage impact of memory, cache, storage, and bus on CPU performance using the Sugeno type and Mamdani type ANFIS models to determine the state of the computer system. The suggested method is founded on keeping an eye on how computer parts behave. The developed method can be applied for all kinds of computing system, such as personal computers, mainframes, and supercomputers by considering that the inference engine of the proposed ANFIS model requires only its own behavior data of computers' components and the number of inputs can be enriched according to the type of computer, for instance, in cloud computers' case the added number of clients and network quality can be used as the input parameters. The models present linguistic and quantity results which are convenient to understand performance issues regarding specific bottlenecks and determining the relationship of components.}, } @article {pmid36502177, year = {2022}, author = {Mei, P and Karimi, HR and Chen, F and Yang, S and Huang, C and Qiu, S}, title = {A Learning-Based Vehicle-Cloud Collaboration Approach for Joint Estimation of State-of-Energy and State-of-Health.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, doi = {10.3390/s22239474}, pmid = {36502177}, issn = {1424-8220}, abstract = {The state-of-energy (SOE) and state-of-health (SOH) are two crucial quotas in the battery management systems, whose accurate estimation is facing challenges by electric vehicles' (EVs) complexity and changeable external environment. Although the machine learning algorithm can significantly improve the accuracy of battery estimation, it cannot be performed on the vehicle control unit as it requires a large amount of data and computing power. This paper proposes a joint SOE and SOH prediction algorithm, which combines long short-term memory (LSTM), Bi-directional LSTM (Bi-LSTM), and convolutional neural networks (CNNs) for EVs based on vehicle-cloud collaboration. Firstly, the indicator of battery performance degradation is extracted for SOH prediction according to the historical data; the Bayesian optimization approach is applied to the SOH prediction combined with Bi-LSTM. Then, the CNN-LSTM is implemented to provide direct and nonlinear mapping models for SOE. These direct mapping models avoid parameter identification and updating, which are applicable in cases with complex operating conditions. Finally, the SOH correction in SOE estimation achieves the joint estimation with different time scales. With the validation of the National Aeronautics and Space Administration battery data set, as well as the established battery platform, the error of the proposed method is kept within 3%. The proposed vehicle-cloud approach performs high-precision joint estimation of battery SOE and SOH. It can not only use the battery historical data of the cloud platform to predict the SOH but also correct the SOE according to the predicted value of the SOH. The feasibility of vehicle-cloud collaboration is promising in future battery management systems.}, } @article {pmid36502107, year = {2022}, author = {Jing, X and Tian, X and Du, C}, title = {LPAI-A Complete AIoT Framework Based on LPWAN Applicable to Acoustic Scene Classification Scenarios.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, doi = {10.3390/s22239404}, pmid = {36502107}, issn = {1424-8220}, abstract = {Deploying artificial intelligence on edge nodes of Low-Power Wide Area Networks can significantly reduce network transmission volumes, event response latency, and overall network power consumption. However, the edge nodes in LPWAN bear limited computing power and storage space, and researchers have found it challenging to improve the recognition capability of the nodes using sensor data from the environment. In particular, the domain-shift problem in LPWAN is challenging to overcome. In this paper, a complete AIoT system framework referred to as LPAI is presented. It is the first generic framework for implementing AIoT technology based on LPWAN applicable to acoustic scene classification scenarios. LPAI overcomes the domain-shift problem, which enables resource-constrained edge nodes to continuously improve their performance using real data to become more adaptive to the environment. For efficient use of limited resources, the edge nodes independently select representative data and transmit it back to the cloud. Moreover, the model is iteratively retrained on the cloud using the few-shot uploaded data. Finally, the feasibility of LPAI is analyzed, and simulation experiments on the public ASC dataset provide validation that our proposed framework can improve the recognition accuracy by as little as 5% using 85 actual sensor data points.}, } @article {pmid36501960, year = {2022}, author = {Wan, S and Zhao, K and Lu, Z and Li, J and Lu, T and Wang, H}, title = {A Modularized IoT Monitoring System with Edge-Computing for Aquaponics.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, doi = {10.3390/s22239260}, pmid = {36501960}, issn = {1424-8220}, abstract = {Aquaponics is a green and efficient agricultural production model that combines aquaculture and vegetable cultivation. It is worth looking into optimizing the proportion of fish and plants to improve the quality and yield. However, there is little non-destructive monitoring of plant growth in aquaponics monitoring systems currently. In this paper, based on the Internet of Things technologies, a monitoring system is designed with miniaturization, modularization, and low-cost features for cultivation-breeding ratio research. The system can realize remote monitoring and intelligent control of parameters needed to keep fish and plants under optimal conditions. First, a 32-bit chip is used as the Microcontroller Unit to develop the intelligent sensing unit, which can realize 16 different data acquisitions as stand-alone extensible modules. Second, to achieve plant data acquisition and upload, the Raspberry Pi embedded with image processing algorithms is introduced to realize edge-computing. Finally, all the collected data is stored in the Ali-cloud through Wi-Fi and a WeChat Mini Program is designed to display data and control devices. The results show that there is no packet loss within 90 m for wireless transmission, and the error rate of environment parameters is limited to 5%. It was proven that the system is intelligent, flexible, low-cost, and stable which is suitable for small-scale aquaponics well.}, } @article {pmid36501875, year = {2022}, author = {Wu, TY and Kong, F and Wang, L and Chen, YC and Kumari, S and Pan, JS}, title = {Toward Smart Home Authentication Using PUF and Edge-Computing Paradigm.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, doi = {10.3390/s22239174}, pmid = {36501875}, issn = {1424-8220}, abstract = {The smart home is a crucial embodiment of the internet of things (IoT), which can facilitate users to access smart home services anytime and anywhere. Due to the limited resources of cloud computing, it cannot meet users' real-time needs. Therefore, edge computing emerges as the times require, providing users with better real-time access and storage. The application of edge computing in the smart home environment can enable users to enjoy smart home services. However, users and smart devices communicate through public channels, and malicious attackers may intercept information transmitted through public channels, resulting in user privacy disclosure. Therefore, it is a critical issue to protect the secure communication between users and smart devices in the smart home environment. Furthermore, authentication protocols in smart home environments also have some security challenges. In this paper, we propose an anonymous authentication protocol that applies edge computing to the smart home environment to protect communication security between entities. To protect the security of smart devices, we embed physical unclonable functions (PUF) into each smart device. Real-or-random model, informal security analysis, and ProVerif are adopted to verify the security of our protocol. Finally, we compare our protocol with existing protocols regarding security and performance. The comparison results demonstrate that our protocol has higher security and slightly better performance.}, } @article {pmid36501855, year = {2022}, author = {Li, P and Cao, J}, title = {A Virtual Machine Consolidation Algorithm Based on Dynamic Load Mean and Multi-Objective Optimization in Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, doi = {10.3390/s22239154}, pmid = {36501855}, issn = {1424-8220}, abstract = {High energy consumption and low resource utilization have become increasingly prominent problems in cloud data centers. Virtual machine (VM) consolidation is the key technology to solve the problems. However, excessive VM consolidation may lead to service level agreement violations (SLAv). Most studies have focused on optimizing energy consumption and ignored other factors. An effective VM consolidation should comprehensively consider multiple factors, including the quality of service (QoS), energy consumption, resource utilization, migration overhead and network communication overhead, which is a multi-objective optimization problem. To solve the problems above, we propose a VM consolidation approach based on dynamic load mean and multi-objective optimization (DLMM-VMC), which aims to minimize power consumption, resources waste, migration overhead and network communication overhead while ensuring QoS. Fist, based on multi-dimensional resources consideration, the host load status is objectively evaluated by using the proposed host load detection algorithm based on the dynamic load mean to avoid an excessive VM consolidation. Then, the best solution is obtained based on the proposed multi-objective optimization model and optimized ant colony algorithm, so as to ensure the common interests of cloud service providers and users. Finally, the experimental results show that compared with the existing VM consolidation methods, our proposed algorithm has a significant improvement in the energy consumption, QoS, resources waste, SLAv, migration and network overhead.}, } @article {pmid36501828, year = {2022}, author = {Marcillo, P and Tamayo-Urgilés, D and Valdivieso Caraguay, ÁL and Hernández-Álvarez, M}, title = {Security in V2I Communications: A Systematic Literature Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, doi = {10.3390/s22239123}, pmid = {36501828}, issn = {1424-8220}, abstract = {Recently, the number of vehicles equipped with wireless connections has increased considerably. The impact of that growth in areas such as telecommunications, infotainment, and automatic driving is enormous. More and more drivers want to be part of a vehicular network, despite the implications or risks that, for instance, the openness of wireless communications, its dynamic topology, and its considerable size may bring. Undoubtedly, this trend is because of the benefits the vehicular network can offer. Generally, a vehicular network has two modes of communication (V2I and V2V). The advantage of V2I over V2V is roadside units' high computational and transmission power, which assures the functioning of early warning and driving guidance services. This paper aims to discover the principal vulnerabilities and challenges in V2I communications, the tools and methods to mitigate those vulnerabilities, the evaluation metrics to measure the effectiveness of those tools and methods, and based on those metrics, the methods or tools that provide the best results. Researchers have identified the non-resistance to attacks, the regular updating and exposure of keys, and the high dependence on certification authorities as main vulnerabilities. Thus, the authors found schemes resistant to attacks, authentication schemes, privacy protection models, and intrusion detection and prevention systems. Of the solutions for providing security analyzed in this review, the authors determined that most of them use metrics such as computational cost and communication overhead to measure their performance. Additionally, they determined that the solutions that use emerging technologies such as fog/edge/cloud computing present better results than the rest. Finally, they established that the principal challenge in V2I communication is to protect and dispose of a safe and reliable communication channel to avoid adversaries taking control of the medium.}, } @article {pmid36501767, year = {2022}, author = {Hung, YH}, title = {Developing an Improved Ensemble Learning Approach for Predictive Maintenance in the Textile Manufacturing Process.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, doi = {10.3390/s22239065}, pmid = {36501767}, issn = {1424-8220}, abstract = {With the rapid development of digital transformation, paper forms are digitalized as electronic forms (e-Forms). Existing data can be applied in predictive maintenance (PdM) for the enabling of intelligentization and automation manufacturing. This study aims to enhance the utilization of collected e-Form data though machine learning approaches and cloud computing to predict and provide maintenance actions. The ensemble learning approach (ELA) requires less computation time and has a simple hardware requirement; it is suitable for processing e-form data with specific attributes. This study proposed an improved ELA to predict the defective class of product data from a manufacturing site's work order form. This study proposed the resource dispatching approach to arrange data with the corresponding emailing resource for automatic notification. This study's novelty is the integration of cloud computing and an improved ELA for PdM to assist the textile product manufacturing process. The data analytics results show that the improved ensemble learning algorithm has over 98% accuracy and precision for defective product prediction. The validation results of the dispatching approach show that data can be correctly transmitted in a timely manner to the corresponding resource, along with a notification being sent to users.}, } @article {pmid36501737, year = {2022}, author = {Gul, OM}, title = {Heuristic Resource Reservation Policies for Public Clouds in the IoT Era.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, doi = {10.3390/s22239034}, pmid = {36501737}, issn = {1424-8220}, abstract = {With the advances in the IoT era, the number of wireless sensor devices has been growing rapidly. This increasing number gives rise to more complex networks where more complex tasks can be executed by utilizing more computational resources from the public clouds. Cloud service providers use various pricing models for their offered services. Some models are appropriate for the cloud service user's short-term requirements whereas the other models are appropriate for the long-term requirements of cloud service users. Reservation-based price models are suitable for long-term requirements of cloud service users. We used the pricing schemes with spot and reserved instances. Reserved instances support a hybrid cost model with fixed reservation costs that vary with contract duration and an hourly usage charge which is lower than the charge of the spot instances. Optimizing resources to be reserved requires sufficient research effort. Recent algorithms proposed for this problem are generally based on integer programming problems, so they do not have polynomial time complexity. In this work, heuristic-based polynomial time policies are proposed for this problem. It is exhibited that the cost for the cloud service user which uses our approach is comparable to optimal solutions, i.e., it is near-optimal.}, } @article {pmid36500810, year = {2022}, author = {Malik, S and Dhasmana, A and Preetam, S and Mishra, YK and Chaudhary, V and Bera, SP and Ranjan, A and Bora, J and Kaushik, A and Minkina, T and Jatav, HS and Singh, RK and Rajput, VD}, title = {Exploring Microbial-Based Green Nanobiotechnology for Wastewater Remediation: A Sustainable Strategy.}, journal = {Nanomaterials (Basel, Switzerland)}, volume = {12}, number = {23}, pages = {}, doi = {10.3390/nano12234187}, pmid = {36500810}, issn = {2079-4991}, abstract = {Water scarcity due to contamination of water resources with different inorganic and organic contaminants is one of the foremost global concerns. It is due to rapid industrialization, fast urbanization, and the low efficiency of traditional wastewater treatment strategies. Conventional water treatment strategies, including chemical precipitation, membrane filtration, coagulation, ion exchange, solvent extraction, adsorption, and photolysis, are based on adopting various nanomaterials (NMs) with a high surface area, including carbon NMs, polymers, metals-based, and metal oxides. However, significant bottlenecks are toxicity, cost, secondary contamination, size and space constraints, energy efficiency, prolonged time consumption, output efficiency, and scalability. On the contrary, green NMs fabricated using microorganisms emerge as cost-effective, eco-friendly, sustainable, safe, and efficient substitutes for these traditional strategies. This review summarizes the state-of-the-art microbial-assisted green NMs and strategies including microbial cells, magnetotactic bacteria (MTB), bio-augmentation and integrated bioreactors for removing an extensive range of water contaminants addressing the challenges associated with traditional strategies. Furthermore, a comparative analysis of the efficacies of microbe-assisted green NM-based water remediation strategy with the traditional practices in light of crucial factors like reusability, regeneration, removal efficiency, and adsorption capacity has been presented. The associated challenges, their alternate solutions, and the cutting-edge prospects of microbial-assisted green nanobiotechnology with the integration of advanced tools including internet-of-nano-things, cloud computing, and artificial intelligence have been discussed. This review opens a new window to assist future research dedicated to sustainable and green nanobiotechnology-based strategies for environmental remediation applications.}, } @article {pmid36497649, year = {2022}, author = {Vărzaru, AA}, title = {Assessing Digital Transformation of Cost Accounting Tools in Healthcare.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {23}, pages = {}, doi = {10.3390/ijerph192315572}, pmid = {36497649}, issn = {1660-4601}, abstract = {The expansion of digital technologies has significantly changed most economic activities and professions. Digital technologies penetrated managerial accounting and have a vast potential to transform this profession. Implementing emerging digital technologies, such as artificial intelligence, blockchain, the Internet of Things, big data, and cloud computing, can trigger a crucial leap forward, leading to a paradigm-shifting in healthcare organizations' accounting management. The paper's main objective is to investigate the perception of Romanian accountants on implementing digital technologies in healthcare organizations' accounting management. The paper implies a study based on a questionnaire among Romanian accountants who use various digital technologies implemented in traditional and innovative cost accounting tools. Based on structural equation modeling, the results emphasize the prevalence of innovative tools over traditional cost accounting tools improved through digital transformation, digital technologies assuming the most complex and time-consuming tasks. Moreover, the influence of cost accounting tools improved through digital transformation on healthcare organizations' performance is much more robust in the case of innovative tools than in the case of traditional cost accounting tools. The proposed model provides managers in healthcare organizations with information on the most effective methods in the context of digital transformation.}, } @article {pmid36495459, year = {2023}, author = {Contaldo, SG and Alessandri, L and Colonnelli, I and Beccuti, M and Aldinucci, M}, title = {Bringing Cell Subpopulation Discovery on a Cloud-HPC Using rCASC and StreamFlow.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2584}, number = {}, pages = {337-345}, pmid = {36495459}, issn = {1940-6029}, abstract = {The idea behind novel single-cell RNA sequencing (scRNA-seq) pipelines is to isolate single cells through microfluidic approaches and generate sequencing libraries in which the transcripts are tagged to track their cell of origin. Modern scRNA-seq platforms are capable of analyzing up to many thousands of cells in each run. Then, combined with massive high-throughput sequencing producing billions of reads, scRNA-seq allows the assessment of fundamental biological properties of cell populations and biological systems at unprecedented resolution.In this chapter, we describe how cell subpopulation discovery algorithms, integrated into rCASC, could be efficiently executed on cloud-HPC infrastructure. To achieve this task, we focus on the StreamFlow framework which provides container-native runtime support for scientific workflows in cloud/HPC environments.}, } @article {pmid36472895, year = {2022}, author = {Barbaric, A and Munteanu, C and Ross, H and Cafazzo, JA}, title = {Design of a Patient Voice App Experience for Heart Failure Management: Usability Study.}, journal = {JMIR formative research}, volume = {6}, number = {12}, pages = {e41628}, doi = {10.2196/41628}, pmid = {36472895}, issn = {2561-326X}, abstract = {BACKGROUND: The use of digital therapeutics (DTx) in the prevention and management of medical conditions has increased through the years, with an estimated 44 million people using one as part of their treatment plan in 2021, nearly double the number from the previous year. DTx are commonly accessed through smartphone apps, but offering these treatments through additional platforms can improve the accessibility of these interventions. Voice apps are an emerging technology in the digital health field; not only do they have the potential to improve DTx adherence, but they can also create a better user experience for some user groups.

OBJECTIVE: This research aimed to identify the acceptability and feasibility of offering a voice app for a chronic disease self-management program. The objective of this project was to design, develop, and evaluate a voice app of an already-existing smartphone-based heart failure self-management program, Medly, to be used as a case study.

METHODS: A voice app version of Medly was designed and developed through a user-centered design process. We conducted a usability study and semistructured interviews with patients with heart failure (N=8) at the Peter Munk Cardiac Clinic in Toronto General Hospital to better understand the user experience. A Medly voice app prototype was built using a software development kit in tandem with a cloud computing platform and was verified and validated before the usability study. Data collection and analysis were guided by a mixed methods triangulation convergence design.

RESULTS: Common themes were identified in the results of the usability study, which involved 8 participants with heart failure. Almost all participants (7/8, 88%) were satisfied with the voice app and felt confident using it, although half of the participants (4/8, 50%) were unsure about using it in the future. Six main themes were identified: changes in physical behavior, preference between voice app and smartphone, importance of music during voice app interaction, lack of privacy concerns, desired reassurances during voice app interaction, and helpful aids during voice app interaction. These findings were triangulated with the quantitative data, and it concluded that the main area for improvement was related to the ease of use; design changes were then implemented to better improve the user experience.

CONCLUSIONS: This work offered preliminary insight into the acceptability and feasibility of a Medly voice app. Given the recent emergence of voice apps in health care, we believe that this research offered invaluable insight into successfully deploying DTx for chronic disease self-management using this technology.}, } @article {pmid36470948, year = {2022}, author = {Zhao, S and Guo, X and Qu, Z and Zhang, Z and Yu, T}, title = {Intelligent retrieval method for power grid operation data based on improved SimHash and multi-attribute decision making.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {20994}, pmid = {36470948}, issn = {2045-2322}, abstract = {IN the trend of energy revolution, power data becomes one of the key elements of the power grid. And an advance power system with "electric power + computing power" as the core has become an inevitable choice. However, the traditional search approach based on directory query is commonly used for power grid operation data in domestic and international. The approach fails to effectively meet the user's need for fast, accurate and personalized retrieval of useful information from the vast amount of power grid data. It seriously affects the real-time availability of data and the efficiency of business-critical analytical decisions. For this reason, an intelligent retrieval approach for power grid operation data based on improved SimHash and multi-attribute decision making is proposed in this paper. This method elaborates the properties of SimHash and multi-attribute decision making algorithms. And an intelligent parallel retrieval algorithm MR-ST based on MapReduce model is designed. Finally, real time grid operation data from multiple sources are analyzed on the cloud platform for example. The experimental results show the effectiveness and precision of the method. Compared with traditional methods, the search accuracy rate, search completion rate and search time are significantly improved. Experiments show that the method can be applied to intelligent retrieval of power grid operation data.}, } @article {pmid36470698, year = {2022}, author = {Lee, P and Tahmasebi, A and Dave, JK and Parekh, MR and Kumaran, M and Wang, S and Eisenbrey, JR and Donuru, A}, title = {Comparison of Gray-scale Inversion to Improve Detection of Pulmonary Nodules on Chest X-rays Between Radiologists and a Deep Convolutional Neural Network.}, journal = {Current problems in diagnostic radiology}, volume = {}, number = {}, pages = {}, doi = {10.1067/j.cpradiol.2022.11.004}, pmid = {36470698}, issn = {1535-6302}, abstract = {Detection of pulmonary nodules on chest x-rays is an important task for radiologists. Previous studies have shown improved detection rates using gray-scale inversion. The purpose of our study was to compare the efficacy of gray-scale inversion in improving the detection of pulmonary nodules on chest x-rays for radiologists and machine learning models (ML). We created a mixed dataset consisting of 60, 2-view (posteroanterior view - PA and lateral view) chest x-rays with computed tomography confirmed nodule(s) and 62 normal chest x-rays. Twenty percent of the cases were separated for a testing dataset (24 total images). Data augmentation through mirroring and transfer learning was used for the remaining cases (784 total images) for supervised training of 4 ML models (grayscale PA, grayscale lateral, gray-scale inversion PA, and gray-scale inversion lateral) on Google's cloud-based AutoML platform. Three cardiothoracic radiologists analyzed the complete 2-view dataset (n=120) and, for comparison to the ML, the single-view testing subsets (12 images each). Gray-scale inversion (area under the curve (AUC) 0.80, 95% confidence interval (CI) 0.75-0.85) did not improve diagnostic performance for radiologists compared to grayscale (AUC 0.84, 95% CI 0.79-0.88). Gray-scale inversion also did not improve diagnostic performance for the ML. The ML did demonstrate higher sensitivity and negative predictive value for grayscale PA (72.7% and 75.0%), grayscale lateral (63.6% and 66.6%), and gray-scale inversion lateral views (72.7% and 76.9%), comparing favorably to the radiologists (63.9% and 72.3%, 27.8% and 58.3%, 19.5% and 50.5% respectively). In the limited testing dataset, the ML did demonstrate higher sensitivity and negative predictive value for grayscale PA (72.7% and 75.0%), grayscale lateral (63.6% and 66.6%), and gray-scale inversion lateral views (72.7% and 76.9%), comparing favorably to the radiologists (63.9% and 72.3%, 27.8% and 58.3%, 19.5% and 50.5%, respectively). Further investigation of other post-processing algorithms to improve diagnostic performance of ML is warranted.}, } @article {pmid36467434, year = {2022}, author = {Lanjewar, MG and Shaikh, AY and Parab, J}, title = {Cloud-based COVID-19 disease prediction system from X-Ray images using convolutional neural network on smartphone.}, journal = {Multimedia tools and applications}, volume = {}, number = {}, pages = {1-30}, pmid = {36467434}, issn = {1380-7501}, abstract = {COVID-19 has engulfed over 200 nations through human-to-human transmission, either directly or indirectly. Reverse Transcription-polymerase Chain Reaction (RT-PCR) has been endorsed as a standard COVID-19 diagnostic procedure but has caveats such as low sensitivity, the need for a skilled workforce, and is time-consuming. Coronaviruses show significant manifestation in Chest X-Ray (CX-Ray) images and, thus, can be a viable option for an alternate COVID-19 diagnostic strategy. An automatic COVID-19 detection system can be developed to detect the disease, thus reducing strain on the healthcare system. This paper discusses a real-time Convolutional Neural Network (CNN) based system for COVID-19 illness prediction from CX-Ray images on the cloud. The implemented CNN model displays exemplary results, with training accuracy being 99.94% and validation accuracy reaching 98.81%. The confusion matrix was utilized to assess the models' outcome and achieved 99% precision, 98% recall, 99% F1 score, 100% training area under the curve (AUC) and 98.3% validation AUC. The same CX-Ray dataset was also employed to predict the COVID-19 disease with deep Convolution Neural Networks (DCNN), such as ResNet50, VGG19, InceptonV3, and Xception. The prediction outcome demonstrated that the present CNN was more capable than the DCNN models. The efficient CNN model was deployed to the Platform as a Service (PaaS) cloud.}, } @article {pmid36465713, year = {2022}, author = {Magotra, B and Malhotra, D and Dogra, AK}, title = {Adaptive Computational Solutions to Energy Efficiency in Cloud Computing Environment Using VM Consolidation.}, journal = {Archives of computational methods in engineering : state of the art reviews}, volume = {}, number = {}, pages = {1-30}, pmid = {36465713}, issn = {1886-1784}, abstract = {Cloud Computing has emerged as a computing paradigm where services are provided through the internet in recent years. Offering on-demand services has transformed the IT companies' working environment, leading to a linearly increasing trend of its usage. The provisioning of the Computing infrastructure is achieved with the help of virtual machines. A great figure of physical devices is required to satisfy the users' resource requirements. To meet the requirements of the submitted workloads that are usually dynamic, the cloud data centers cause the over-provisioning of cloud resources. The result of this over-provisioning is the resource wastage with an increase in the levels of energy consumption, causing a raised operational cost. High CO2 emissions result from this huge energy consumption by data centers, posing a threat to environmental stability. The environmental concern demands for the controlled energy consumption, which can be attained by optimal usage of resources to achieve in the server load, by minimizing the number of active nodes, and by minimizing the frequency of switching between active and de-active server mode in the data center. Motivated by these actualities, we discuss numerous statistical, deterministic, probabilistic, machine learning and optimization based computational solutions for the cloud computing environment. A comparative analysis of the computational methods, on the basis of architecture, consolidation step involved, objectives achieved, simulators involved and resources utilized, has also been presented. A taxonomy for virtual machine (VM) consolidation has also been derived in this research article followed by emerging challenges and research gaps in the field of VM consolidation in cloud computing environment.}, } @article {pmid36465318, year = {2022}, author = {Ilyas, A and Alatawi, MN and Hamid, Y and Mahfooz, S and Zada, I and Gohar, N and Shah, MA}, title = {Software architecture for pervasive critical health monitoring system using fog computing.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {84}, pmid = {36465318}, issn = {2192-113X}, abstract = {Because of the existence of Covid-19 and its variants, health monitoring systems have become mandatory, particularly for critical patients such as neonates. However, the massive volume of real-time data generated by monitoring devices necessitates the use of efficient methods and approaches to respond promptly. A fog-based architecture for IoT healthcare systems tends to provide better services, but it also produces some issues that must be addressed. We present a bidirectional approach to improving real-time data transmission for health monitors by minimizing network latency and usage in this paper. To that end, a simplified approach for large-scale IoT health monitoring systems is devised, which provides a solution for IoT device selection of optimal fog nodes to reduce both communication and processing delays. Additionally, an improved dynamic approach for load balancing and task assignment is also suggested. Embedding the best practices from the IoT, Fog, and Cloud planes, our aim in this work is to offer software architecture for IoT-based healthcare systems to fulfill non-functional needs. 4 + 1 views are used to illustrate the proposed architecture.}, } @article {pmid36462891, year = {2022}, author = {Motwani, A and Shukla, PK and Pawar, M}, title = {Ubiquitous and smart healthcare monitoring frameworks based on machine learning: A comprehensive review.}, journal = {Artificial intelligence in medicine}, volume = {134}, number = {}, pages = {102431}, doi = {10.1016/j.artmed.2022.102431}, pmid = {36462891}, issn = {1873-2860}, mesh = {Humans ; Aged ; *COVID-19/epidemiology ; Pandemics ; Machine Learning ; Delivery of Health Care ; }, abstract = {During the COVID-19 pandemic, the patient care delivery paradigm rapidly shifted to remote technological solutions. Rising rates of life expectancy of older people, and deaths due to chronic diseases (CDs) such as cancer, diabetes and respiratory disease pose many challenges to healthcare. While the feasibility of Remote Patient Monitoring (RPM) with a Smart Healthcare Monitoring (SHM) framework was somewhat questionable before the COVID-19 pandemic, it is now a proven commodity and is on its way to becoming ubiquitous. More health organizations are adopting RPM to enable CD management in the absence of individual monitoring. The current studies on SHM have reviewed the applications of IoT and/or Machine Learning (ML) in the domain, their architecture, security, privacy and other network related issues. However, no study has analyzed the AI and ubiquitous computing advances in SHM frameworks. The objective of this research is to identify and map key technical concepts in the SHM framework. In this context an interesting and meaningful classification of the research articles surveyed for this work is presented. The comprehensive and systematic review is based on the "Preferred Reporting Items for Systematic Review and Meta-Analysis" (PRISMA) approach. A total of 2540 papers were screened from leading research archives from 2016 to March 2021, and finally, 50 articles were selected for review. The major advantages, developments, distinctive architectural structure, components, technical challenges and possibilities in SHM are briefly discussed. A review of various recent cloud and fog computing based architectures, major ML implementation challenges, prospects and future trends is also presented. The survey primarily encourages the data driven predictive analytics aspects of healthcare and the development of ML models for health empowerment.}, } @article {pmid36459531, year = {2022}, author = {Truong, L and Ayora, F and D'Orsogna, L and Martinez, P and De Santis, D}, title = {Nanopore sequencing data analysis using Microsoft Azure cloud computing service.}, journal = {PloS one}, volume = {17}, number = {12}, pages = {e0278609}, doi = {10.1371/journal.pone.0278609}, pmid = {36459531}, issn = {1932-6203}, mesh = {Animals ; Cloud Computing ; *Nanopore Sequencing ; Data Analysis ; Data Accuracy ; *Mammoths ; }, abstract = {Genetic information provides insights into the exome, genome, epigenetics and structural organisation of the organism. Given the enormous amount of genetic information, scientists are able to perform mammoth tasks to improve the standard of health care such as determining genetic influences on outcome of allogeneic transplantation. Cloud based computing has increasingly become a key choice for many scientists, engineers and institutions as it offers on-demand network access and users can conveniently rent rather than buy all required computing resources. With the positive advancements of cloud computing and nanopore sequencing data output, we were motivated to develop an automated and scalable analysis pipeline utilizing cloud infrastructure in Microsoft Azure to accelerate HLA genotyping service and improve the efficiency of the workflow at lower cost. In this study, we describe (i) the selection process for suitable virtual machine sizes for computing resources to balance between the best performance versus cost effectiveness; (ii) the building of Docker containers to include all tools in the cloud computational environment; (iii) the comparison of HLA genotype concordance between the in-house manual method and the automated cloud-based pipeline to assess data accuracy. In conclusion, the Microsoft Azure cloud based data analysis pipeline was shown to meet all the key imperatives for performance, cost, usability, simplicity and accuracy. Importantly, the pipeline allows for the on-going maintenance and testing of version changes before implementation. This pipeline is suitable for the data analysis from MinION sequencing platform and could be adopted for other data analysis application processes.}, } @article {pmid36443470, year = {2022}, author = {Jang, H and Koh, H and Gu, W and Kang, B}, title = {Integrative web cloud computing and analytics using MiPair for design-based comparative analysis with paired microbiome data.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {20465}, pmid = {36443470}, issn = {2045-2322}, mesh = {Humans ; Cloud Computing ; *Microbiota ; *Gastrointestinal Microbiome ; Mouth ; Skin ; }, abstract = {Pairing (or blocking) is a design technique that is widely used in comparative microbiome studies to efficiently control for the effects of potential confounders (e.g., genetic, environmental, or behavioral factors). Some typical paired (block) designs for human microbiome studies are repeated measures designs that profile each subject's microbiome twice (or more than twice) (1) for pre and post treatments to see the effects of a treatment on microbiome, or (2) for different organs of the body (e.g., gut, mouth, skin) to see the disparity in microbiome between (or across) body sites. Researchers have developed a sheer number of web-based tools for user-friendly microbiome data processing and analytics, though there is no web-based tool currently available for such paired microbiome studies. In this paper, we thus introduce an integrative web-based tool, named MiPair, for design-based comparative analysis with paired microbiome data. MiPair is a user-friendly web cloud service that is built with step-by-step data processing and analytic procedures for comparative analysis between (or across) groups or between baseline and other groups. MiPair employs parametric and non-parametric tests for complete or incomplete block designs to perform comparative analyses with respect to microbial ecology (alpha- and beta-diversity) and taxonomy (e.g., phylum, class, order, family, genus, species). We demonstrate its usage through an example clinical trial on the effects of antibiotics on gut microbiome. MiPair is an open-source software that can be run on our web server (http://mipair.micloud.kr) or on user's computer (https://github.com/yj7599/mipairgit).}, } @article {pmid36439763, year = {2022}, author = {Fouotsa Manfouo, NC and Von Fintel, D}, title = {Investigating the effects of drought and lockdowns on smallholder and commercial agricultural production in KwaZulu-Natal using remotely sensed data.}, journal = {Heliyon}, volume = {8}, number = {11}, pages = {e11637}, pmid = {36439763}, issn = {2405-8440}, abstract = {Not many efforts have been made so far to understand the effects of both the 2015-2016 drought and the 2020 lockdown measures on the agricultural production of smallholder vis-a-vis commercial farmers in Kwazulu-Natal. Google Earth Engine, and random forest algorithm, are used to generate a dataset that help to investigate this question. A regression is performed on double differenced data to investigate the effects of interest. A k-mean cluster analysis, is also used to determine whether the distribution patterns of crop production changed with drought and disruption of agricultural production input. Results show that: (1) droughts affected the agricultural production of both areas similarly. Crop cover declined in both areas for one season after droughts were broken. Then recovery was driven by greener, more productive crops rather than the expansion of crop area. (2) The response of both areas to the COVID-19 lockdown was also similar. Both smallholder and commercial areas' Normalised Difference Vegetation Index - a proxy for crop vitality - improved in response to regulations favourable to the sector and improved rainfall. No significant adjustments in crop cover were observed. Production therefore changed primarily at the intensive margin (improved productivity of existing croplands) rather than the extensive (changing the extent of land under cultivation). (3) Cluster analysis allows for a more granular view, showing that the positive impact of lockdowns on agriculture were concentrated in areas with high rainfall and close proximity to metropolitan markets. Both smallholder and commercial farmers therefore are reliant on market access together with favourable environmental conditions for improved production.}, } @article {pmid36438442, year = {2022}, author = {Alzoubi, YI and Gill, A and Mishra, A}, title = {A systematic review of the purposes of Blockchain and fog computing integration: classification and open issues.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {80}, pmid = {36438442}, issn = {2192-113X}, abstract = {The fog computing concept was proposed to help cloud computing for the data processing of Internet of Things (IoT) applications. However, fog computing faces several challenges such as security, privacy, and storage. One way to address these challenges is to integrate blockchain with fog computing. There are several applications of blockchain-fog computing integration that have been proposed, recently, due to their lucrative benefits such as enhancing security and privacy. There is a need to systematically review and synthesize the literature on this topic of blockchain-fog computing integration. The purposes of integrating blockchain and fog computing were determined using a systematic literature review approach and tailored search criteria established from the research questions. In this research, 181 relevant papers were found and reviewed. The results showed that the authors proposed the combination of blockchain and fog computing for several purposes such as security, privacy, access control, and trust management. A lack of standards and laws may make it difficult for blockchain and fog computing to be integrated in the future, particularly in light of newly developed technologies like quantum computing and artificial intelligence. The findings of this paper serve as a resource for researchers and practitioners of blockchain-fog computing integration for future research and designs.}, } @article {pmid36433599, year = {2022}, author = {Trakadas, P and Masip-Bruin, X and Facca, FM and Spantideas, ST and Giannopoulos, AE and Kapsalis, NC and Martins, R and Bosani, E and Ramon, J and Prats, RG and Ntroulias, G and Lyridis, DV}, title = {A Reference Architecture for Cloud-Edge Meta-Operating Systems Enabling Cross-Domain, Data-Intensive, ML-Assisted Applications: Architectural Overview and Key Concepts.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {22}, pages = {}, doi = {10.3390/s22229003}, pmid = {36433599}, issn = {1424-8220}, mesh = {*Ecosystem ; *Software ; }, abstract = {Future data-intensive intelligent applications are required to traverse across the cloud-to-edge-to-IoT continuum, where cloud and edge resources elegantly coordinate, alongside sensor networks and data. However, current technical solutions can only partially handle the data outburst associated with the IoT proliferation experienced in recent years, mainly due to their hierarchical architectures. In this context, this paper presents a reference architecture of a meta-operating system (RAMOS), targeted to enable a dynamic, distributed and trusted continuum which will be capable of facilitating the next-generation smart applications at the edge. RAMOS is domain-agnostic, capable of supporting heterogeneous devices in various network environments. Furthermore, the proposed architecture possesses the ability to place the data at the origin in a secure and trusted manner. Based on a layered structure, the building blocks of RAMOS are thoroughly described, and the interconnection and coordination between them is fully presented. Furthermore, illustration of how the proposed reference architecture and its characteristics could fit in potential key industrial and societal applications, which in the future will require more power at the edge, is provided in five practical scenarios, focusing on the distributed intelligence and privacy preservation principles promoted by RAMOS, as well as the concept of environmental footprint minimization. Finally, the business potential of an open edge ecosystem and the societal impacts of climate net neutrality are also illustrated.}, } @article {pmid36433575, year = {2022}, author = {Bin Mofidul, R and Alam, MM and Rahman, MH and Jang, YM}, title = {Real-Time Energy Data Acquisition, Anomaly Detection, and Monitoring System: Implementation of a Secured, Robust, and Integrated Global IIoT Infrastructure with Edge and Cloud AI.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {22}, pages = {}, pmid = {36433575}, issn = {1424-8220}, mesh = {*Internet of Things ; Artificial Intelligence ; Reproducibility of Results ; Computers ; Electrocardiography ; }, abstract = {The industrial internet of things (IIoT), a leading technology to digitize industrial sectors and applications, requires the integration of edge and cloud computing, cyber security, and artificial intelligence to enhance its efficiency, reliability, and sustainability. However, the collection of heterogeneous data from individual sensors as well as monitoring and managing large databases with sufficient security has become a concerning issue for the IIoT framework. The development of a smart and integrated IIoT infrastructure can be a possible solution that can efficiently handle the aforementioned issues. This paper proposes an AI-integrated, secured IIoT infrastructure incorporating heterogeneous data collection and storing capability, global inter-communication, and a real-time anomaly detection model. To this end, smart data acquisition devices are designed and developed through which energy data are transferred to the edge IIoT servers. Hash encoding credentials and transport layer security protocol are applied to the servers. Furthermore, these servers can exchange data through a secured message queuing telemetry transport protocol. Edge and cloud databases are exploited to handle big data. For detecting the anomalies of individual electrical appliances in real-time, an algorithm based on a group of isolation forest models is developed and implemented on edge and cloud servers as well. In addition, remote-accessible online dashboards are implemented, enabling users to monitor the system. Overall, this study covers hardware design; the development of open-source IIoT servers and databases; the implementation of an interconnected global networking system; the deployment of edge and cloud artificial intelligence; and the development of real-time monitoring dashboards. Necessary performance results are measured, and they demonstrate elaborately investigating the feasibility of the proposed IIoT framework at the end.}, } @article {pmid36433564, year = {2022}, author = {Umoren, O and Singh, R and Awan, S and Pervez, Z and Dahal, K}, title = {Blockchain-Based Secure Authentication with Improved Performance for Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {22}, pages = {}, doi = {10.3390/s22228969}, pmid = {36433564}, issn = {1424-8220}, mesh = {*Blockchain ; Computer Security ; Cloud Computing ; *Internet of Things ; Algorithms ; }, abstract = {Advancement in the Internet of Things (IoT) and cloud computing has escalated the number of connected edge devices in a smart city environment. Having billions more devices has contributed to security concerns, and an attack-proof authentication mechanism is the need of the hour to sustain the IoT environment. Securing all devices could be a huge task and require lots of computational power, and can be a bottleneck for devices with fewer computational resources. To improve the authentication mechanism, many researchers have proposed decentralized applications such as blockchain technology for securing fog and IoT environments. Ethereum is considered a popular blockchain platform and is used by researchers to implement the authentication mechanism due to its programable smart contract. In this research, we proposed a secure authentication mechanism with improved performance. Neo blockchain is a platform that has properties that can provide improved security and faster execution. The research utilizes the intrinsic properties of Neo blockchain to develop a secure authentication mechanism. The proposed authentication mechanism is compared with the existing algorithms and shows that the proposed mechanism is 20 to 90 per cent faster in execution time and has over 30 to 70 per cent decrease in registration and authentication when compared to existing methods.}, } @article {pmid36433381, year = {2022}, author = {Yang, J and Lee, TY and Lee, WT and Xu, L}, title = {A Design and Application of Municipal Service Platform Based on Cloud-Edge Collaboration for Smart Cities.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {22}, pages = {}, doi = {10.3390/s22228784}, pmid = {36433381}, issn = {1424-8220}, mesh = {Cities ; *Artificial Intelligence ; *Cloud Computing ; Computers ; Game Theory ; }, abstract = {Information and Communication Technology (ICT) makes cities "smart", capable of providing advanced municipal services to citizens more efficiently. In the literature, many applications of municipal service platform based on cloud computing and edge computing have been proposed, but the reference model and application instance based on cloud-edge collaboration specially for municipal service platform is rarely studied. In this context, this paper first develops a reference model, including resource collaboration, application collaboration, service collaboration, and security collaboration, and discusses the main contents and challenges of each part. Then, aiming at the problem of computing and communication resources allocation in the cloud-edge collaboration, a game-theory-based dynamic resource allocation model is introduced. Finally, an e-government self-service system based on the cloud-edge collaboration is designed and implemented. The cloud side is a cloud computing server, and the edge side are the self-service terminals integrating various edge computing devices with Artificial Intelligence (AI) embedded. The experimental results show that the designed system combines the advantages of cloud computing and edge computing, and provides a better user experience with lower processing latency, larger bandwidth, and more concurrent tasks. Meanwhile, the findings show that the evolutionary equilibrium and the Nash equilibrium are the optimal solutions, respectively.}, } @article {pmid36433374, year = {2022}, author = {Mir, TS and Liaqat, HB and Kiren, T and Sana, MU and Alvarez, RM and Miró, Y and Pascual Barrera, AE and Ashraf, I}, title = {Antifragile and Resilient Geographical Information System Service Delivery in Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {22}, pages = {}, doi = {10.3390/s22228778}, pmid = {36433374}, issn = {1424-8220}, mesh = {*Geographic Information Systems ; *Cloud Computing ; }, abstract = {The demand for cloud computing has drastically increased recently, but this paradigm has several issues due to its inherent complications, such as non-reliability, latency, lesser mobility support, and location-aware services. Fog computing can resolve these issues to some extent, yet it is still in its infancy. Despite several existing works, these works lack fault-tolerant fog computing, which necessitates further research. Fault tolerance enables the performing and provisioning of services despite failures and maintains anti-fragility and resiliency. Fog computing is highly diverse in terms of failures as compared to cloud computing and requires wide research and investigation. From this perspective, this study primarily focuses on the provision of uninterrupted services through fog computing. A framework has been designed to provide uninterrupted services while maintaining resiliency. The geographical information system (GIS) services have been deployed as a test bed which requires high computation, requires intensive resources in terms of CPU and memory, and requires low latency. Keeping different types of failures at different levels and their impacts on service failure and greater response time in mind, the framework was made anti-fragile and resilient at different levels. Experimental results indicate that during service interruption, the user state remains unaffected.}, } @article {pmid36433242, year = {2022}, author = {Daraghmi, YA and Daraghmi, EY and Daraghma, R and Fouchal, H and Ayaida, M}, title = {Edge-Fog-Cloud Computing Hierarchy for Improving Performance and Security of NB-IoT-Based Health Monitoring Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {22}, pages = {}, doi = {10.3390/s22228646}, pmid = {36433242}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Electrocardiography ; Algorithms ; Support Vector Machine ; }, abstract = {This paper proposes a three-computing-layer architecture consisting of Edge, Fog, and Cloud for remote health vital signs monitoring. The novelty of this architecture is in using the Narrow-Band IoT (NB-IoT) for communicating with a large number of devices and covering large areas with minimum power consumption. Additionally, the architecture reduces the communication delay as the edge layer serves the health terminal devices with initial decisions and prioritizes data transmission for minimizing congestion on base stations. The paper also investigates different authentication protocols for improving security while maintaining low computation and transmission time. For data analysis, different machine learning algorithms, such as decision tree, support vector machines, and logistic regression, are used on the three layers. The proposed architecture is evaluated using CloudSim, iFogSim, and ns3-NB-IoT on real data consisting of medical vital signs. The results show that the proposed architecture reduces the NB-IoT delay by 59.9%, the execution time by an average of 38.5%, and authentication time by 35.1% for a large number of devices. This paper concludes that the NB-IoT combined with edge, fog, and cloud computing can support efficient remote health monitoring for large devices and large areas.}, } @article {pmid36430048, year = {2022}, author = {Zhao, Z and Wang, Z and Garcia-Campayo, J and Perez, HM}, title = {The Dissemination Strategy of an Urban Smart Medical Tourism Image by Big Data Analysis Technology.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {22}, pages = {}, doi = {10.3390/ijerph192215330}, pmid = {36430048}, issn = {1660-4601}, mesh = {Humans ; *Medical Tourism ; Tourism ; Data Analysis ; Big Data ; Reproducibility of Results ; Technology ; }, abstract = {The advanced level of medical care is closely related to the development and popularity of a city, and it will also drive the development of tourism. The smart urban medical system based on big data analysis technology can greatly facilitate people's lives and increase the flow of people in the city, which is of great significance to the city's tourism image dissemination and branding. The medical system, with eight layers of architecture including access, medical cloud service governance, the medical cloud service resource, the platform's public service, the platform's runtime service, infrastructure, and the overall security and monitoring system of the platform, is designed based on big data analysis technology. Chengdu city is taken as an example based on big data analysis technology to position the dissemination of an urban tourism image. Quantitative analysis and questionnaire methods are used to study the effect of urban smart medical system measurement and tourism image communication positioning based on big data analysis technology. The results show that the smart medical cloud service platform of the urban smart medical system, as a public information service system, supports users in obtaining medical services through various terminal devices without geographical restrictions. The smart medical cloud realizes service aggregation and data sharing compared to the traditional isolated medical service system. Cloud computing has been used as the technical basis, making the scalability and reliability of the system have unprecedented improvements. This paper discusses how to effectively absorb, understand, and use tools in the big data environment, extract information from data, find effective information, make image communication activities accurate, reduce the cost, and improve the efficiency of city image communication. The research shows that big data analysis technology improves patients' medical experience, improves medical efficiency, and alleviates urban medical resource allocation to a certain extent. This technology improves people's satisfaction with the dissemination of urban tourism images, makes urban tourism image dissemination activities accurate, reduces the cost of urban tourism image dissemination, and improves the efficiency of urban tourism image dissemination. The combination of the two can provide a reference for developing urban smart medical care and disseminating a tourism image.}, } @article {pmid36429833, year = {2022}, author = {Li, H and Ou, D and Ji, Y}, title = {An Environmentally Sustainable Software-Defined Networking Data Dissemination Method for Mixed Traffic Flows in RSU Clouds with Energy Restriction.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {22}, pages = {}, doi = {10.3390/ijerph192215112}, pmid = {36429833}, issn = {1660-4601}, mesh = {*Computer Communication Networks ; *Software ; Programming, Linear ; Algorithms ; Physical Phenomena ; }, abstract = {The connected multi road side unit (RSU) environment can be envisioned as the RSU cloud. In this paper, the Software-Defined Networking (SDN) framework is utilized to dynamically reconfigure the RSU clouds for the mixed traffic flows with energy restrictions, which are composed of five categories of vehicles with distinctive communication demands. An environmentally sustainable SDN data dissemination method for safer and greener transportation solutions is thus proposed, aiming to achieve the lowest overall SDN cloud delay with the least working hosts and minimum energy consumption, which is a mixed integer linear programming problem (MILP). To solve the problem, Joint optimization algorithms with Finite resources (JF) in three hyperparameters versions, JF (DW = 0.3, HW = 0.7), JF (DW = 0.5, HW = 0.5) and JF (DW = 0.7, HW = 0.3), were proposed, which are in contrast with single-objective optimization algorithms, the Host Optimization (H) algorithm, and the Delay optimization (D) algorithm. Results show that JF (DW = 0.3, HW = 0.7) and JF (DW = 0.5, HW = 0.5), when compared with the D algorithm, usually had slightly larger cloud delays, but fewer working hosts and energy consumptions, which has vital significance for enhancing energy efficiency and environmental protection, and shows the superiority of JFs over the D algorithm. Meanwhile, the H algorithm had the least working hosts and fewest energy consumptions under the same conditions, but completely ignored the explosive surge of delay, which is not desirable for most cases of the SDN RSU cloud. Further analysis showed that the larger the network topology of the SDN cloud, the harder it was to find a feasible network configuration. Therefore, when designing an environmentally sustainable SDN RSU cloud for the greener future mobility of intelligent transportation systems, its size should be limited or partitioned into a relatively small topology.}, } @article {pmid36417024, year = {2022}, author = {Cohen, RY and Sodickson, AD}, title = {An Orchestration Platform that Puts Radiologists in the Driver's Seat of AI Innovation: a Methodological Approach.}, journal = {Journal of digital imaging}, volume = {}, number = {}, pages = {}, pmid = {36417024}, issn = {1618-727X}, abstract = {Current AI-driven research in radiology requires resources and expertise that are often inaccessible to small and resource-limited labs. The clinicians who are able to participate in AI research are frequently well-funded, well-staffed, and either have significant experience with AI and computing, or have access to colleagues or facilities that do. Current imaging data is clinician-oriented and is not easily amenable to machine learning initiatives, resulting in inefficient, time consuming, and costly efforts that rely upon a crew of data engineers and machine learning scientists, and all too often preclude radiologists from driving AI research and innovation. We present the system and methodology we have developed to address infrastructure and platform needs, while reducing the staffing and resource barriers to entry. We emphasize a data-first and modular approach that streamlines the AI development and deployment process while providing efficient and familiar interfaces for radiologists, such that they can be the drivers of new AI innovations.}, } @article {pmid36415683, year = {2022}, author = {Xie, Y and Li, P and Nedjah, N and Gupta, BB and Taniar, D and Zhang, J}, title = {Privacy protection framework for face recognition in edge-based Internet of Things.}, journal = {Cluster computing}, volume = {}, number = {}, pages = {1-19}, pmid = {36415683}, issn = {1386-7857}, abstract = {Edge computing (EC) gets the Internet of Things (IoT)-based face recognition systems out of trouble caused by limited storage and computing resources of local or mobile terminals. However, data privacy leak remains a concerning problem. Previous studies only focused on some stages of face data processing, while this study focuses on the privacy protection of face data throughout its entire life cycle. Therefore, we propose a general privacy protection framework for edge-based face recognition (EFR) systems. To protect the privacy of face images and training models transmitted between edges and the remote cloud, we design a local differential privacy (LDP) algorithm based on the proportion difference of feature information. In addition, we also introduced identity authentication and hash technology to ensure the legitimacy of the terminal device and the integrity of the face image in the data acquisition phase. Theoretical analysis proves the rationality and feasibility of the scheme. Compared with the non-privacy protection situation and the equal privacy budget allocation method, our method achieves the best balance between availability and privacy protection in the numerical experiment.}, } @article {pmid36410105, year = {2022}, author = {Aguilar, B and Abdilleh, K and Acquaah-Mensah, GK}, title = {Multi-omics inference of differential breast cancer-related transcriptional regulatory network gene hubs between young Black and White patients.}, journal = {Cancer genetics}, volume = {270-271}, number = {}, pages = {1-11}, doi = {10.1016/j.cancergen.2022.11.001}, pmid = {36410105}, issn = {2210-7762}, abstract = {OBJECTIVE: Breast cancers (BrCA) are a leading cause of illness and mortality worldwide. Black women have a higher incidence rate relative to white women prior to age 40 years, and a lower incidence rate after 50 years. The objective of this study is to identify -omics differences between the two breast cancer cohorts to better understand the disparities observed in patient outcomes.

MATERIALS AND METHODS: Using Standard SQL, we queried ISB-CGC hosted Google BigQuery tables storing TCGA BrCA gene expression, methylation, and somatic mutation data and analyzed the combined multi-omics results using a variety of methods.

RESULTS: Among Stage II patients 50 years or younger, genes PIK3CA and CDH1 are more frequently mutated in White (W50) than in Black or African American patients (BAA50), while HUWE1, HYDIN, and FBXW7 mutations are more frequent in BAA50. Over-representation analysis (ORA) and Gene Set Enrichment Analysis (GSEA) results indicate that, among others, the Reactome Signaling by ROBO Receptors gene set is enriched in BAA50. Using the Virtual Inference of Protein-activity by Enriched Regulon analysis (VIPER) algorithm, putative top 20 master regulators identified include NUPR1, NFKBIL1, ZBTB17, TEAD1, EP300, TRAF6, CACTIN, and MID2. CACTIN and MID2 are of prognostic value. We identified driver genes, such as OTUB1, with suppressed expression whose DNA methylation status were inversely correlated with gene expression. Networks capturing microRNA and gene expression correlations identified notable microRNA hubs, such as miR-93 and miR-92a-2, expressed at higher levels in BAA50 than in W50.

DISCUSSION/CONCLUSION: The results point to several driver genes as being involved in the observed differences between the cohorts. The findings here form the basis for further mechanistic exploration.}, } @article {pmid36408731, year = {2022}, author = {Kucewicz, MT and Worrell, GA and Axmacher, N}, title = {Direct electrical brain stimulation of human memory: lessons learnt and future perspectives.}, journal = {Brain : a journal of neurology}, volume = {}, number = {}, pages = {}, doi = {10.1093/brain/awac435}, pmid = {36408731}, issn = {1460-2156}, abstract = {Modulation of cognitive functions supporting human declarative memory is one of the grand challenges of neuroscience, and of vast importance for a variety of neuropsychiatric, neurodegenerative and neurodevelopmental diseases. Despite a recent surge of successful attempts at improving performance in a range of memory tasks, the optimal approaches and parameters for memory enhancement have yet to be determined. On a more fundamental level, it remains elusive how delivering electrical current in a given brain area leads to enhanced memory processing. Starting from the local and distal physiological effects on neural populations, the mechanisms of enhanced memory encoding, maintenance, consolidation, or recall in response to direct electrical stimulation are only now being unraveled. With the advent of innovative neurotechnologies for concurrent recording and stimulation intracranially in the human brain, it becomes possible to study both acute and chronic effects of stimulation on memory performance and the underlying neural activities. In this review, we summarize the effects of various invasive stimulation approaches for modulating memory functions. We first outline the challenges that were faced in the initial studies of memory enhancement and the lessons learned. Electrophysiological biomarkers are then reviewed as more objective measures of the stimulation effects than behavioral outcomes. Finally, we classify the various stimulation approaches into continuous and phasic modulation with open or closed loop for responsive stimulation based on analysis of the recorded neural activities. Although the potential advantage of closed-loop responsive stimulation over the classic open-loop approaches is inconclusive, we foresee the emerging results from ongoing longitudinal studies and clinical trials to shed light on both the mechanisms and optimal strategies for improving declarative memory. Adaptive stimulation based on the biomarker analysis over extended periods of time is proposed as a future direction for obtaining lasting effects on memory functions. Chronic tracking and modulation of neural activities intracranially through adaptive stimulation opens tantalizing new avenues to continually monitor and treat memory and cognitive deficits in a range of brain disorders. Brain co-processors created with machine-learning tools and wireless bi-directional connectivity to seamlessly integrate implanted devices with smartphones and cloud computing are poised to enable real-time automated analysis of large data volumes and adaptively tune electrical stimulation based on electrophysiological biomarkers of behavioral states. Next generation implantable devices for high-density recording and stimulation of electrophysiological activities, and technologies for distributed brain-computer interfaces are presented as selected future perspectives for modulating human memory and associated mental processes.}, } @article {pmid36408485, year = {2022}, author = {Al-Khafaji, HMR and Jaleel, RA}, title = {Adopting effective hierarchal IoMTs computing with K-efficient clustering to control and forecast COVID-19 cases.}, journal = {Computers & electrical engineering : an international journal}, volume = {104}, number = {}, pages = {108472}, pmid = {36408485}, issn = {0045-7906}, abstract = {The Internet of Medical Things (IoMTs) based on fog/cloud computing has been effectively proven to improve the controlling, monitoring, and care quality of Coronavirus disease 2019 (COVID-19) patients. One of the convenient approaches to assess symptomatic patients is to group patients with comparable symptoms and provide an overview of the required level of care to patients with similar conditions. Therefore, this study adopts an effective hierarchal IoMTs computing with K-Efficient clustering to control and forecast COVID-19 cases. The proposed system integrates the K-Means and K-Medoids clusterings to monitor the health status of patients, early detection of COVID-19 cases, and process data in real-time with ultra-low latency. In addition, the data analysis takes into account the primary requirements of the network to assist in understanding the nature of COVID-19. Based on the findings, the K-Efficient clustering with fog computing is a more effective approach to analyse the status of patients compared to that of K-Means and K-Medoids in terms of intra-class, inter-class, running time, the latency of network, and RAM consumption. In summary, the outcome of this study provides a novel approach for remote monitoring and handling of infected COVID-19 patients through real-time personalised treatment services.}, } @article {pmid36404909, year = {2022}, author = {Narasimha Raju, AS and Jayavel, K and Rajalakshmi, T}, title = {ColoRectalCADx: Expeditious Recognition of Colorectal Cancer with Integrated Convolutional Neural Networks and Visual Explanations Using Mixed Dataset Evidence.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {8723957}, pmid = {36404909}, issn = {1748-6718}, mesh = {Humans ; Neural Networks, Computer ; Support Vector Machine ; Diagnosis, Computer-Assisted/methods ; Colonoscopy ; *Polyps ; *Colorectal Neoplasms/diagnostic imaging ; }, abstract = {Colorectal cancer typically affects the gastrointestinal tract within the human body. Colonoscopy is one of the most accurate methods of detecting cancer. The current system facilitates the identification of cancer by computer-assisted diagnosis (CADx) systems with a limited number of deep learning methods. It does not imply the depiction of mixed datasets for the functioning of the system. The proposed system, called ColoRectalCADx, is supported by deep learning (DL) models suitable for cancer research. The CADx system comprises five stages: convolutional neural networks (CNN), support vector machine (SVM), long short-term memory (LSTM), visual explanation such as gradient-weighted class activation mapping (Grad-CAM), and semantic segmentation phases. Here, the key components of the CADx system are equipped with 9 individual and 12 integrated CNNs, implying that the system consists mainly of investigational experiments with a total of 21 CNNs. In the subsequent phase, the CADx has a combination of CNNs of concatenated transfer learning functions associated with the machine SVM classification. Additional classification is applied to ensure effective transfer of results from CNN to LSTM. The system is mainly made up of a combination of CVC Clinic DB, Kvasir2, and Hyper Kvasir input as a mixed dataset. After CNN and LSTM, in advanced stage, malignancies are detected by using a better polyp recognition technique with Grad-CAM and semantic segmentation using U-Net. CADx results have been stored on Google Cloud for record retention. In these experiments, among all the CNNs, the individual CNN DenseNet-201 (87.1% training and 84.7% testing accuracies) and the integrated CNN ADaDR-22 (84.61% training and 82.17% testing accuracies) were the most efficient for cancer detection with the CNN+LSTM model. ColoRectalCADx accurately identifies cancer through individual CNN DesnseNet-201 and integrated CNN ADaDR-22. In Grad-CAM's visual explanations, CNN DenseNet-201 displays precise visualization of polyps, and CNN U-Net provides precise malignant polyps.}, } @article {pmid36395912, year = {2022}, author = {Xu, H and Yang, X and Wang, D and Hu, Y and Cheng, Z and Shi, Y and Zheng, P and Shi, L}, title = {Multivariate and spatio-temporal groundwater pollution risk assessment: A new long-time serial groundwater environmental impact assessment system.}, journal = {Environmental pollution (Barking, Essex : 1987)}, volume = {317}, number = {}, pages = {120621}, doi = {10.1016/j.envpol.2022.120621}, pmid = {36395912}, issn = {1873-6424}, abstract = {Groundwater pollution risk assessment is an important part of environmental assessment. Although it has been developed for many years, there has not yet been a multi-dimensional method that takes into account long time series and spatial factors. We proposed a new method combines the advantages of remote sensing cloud computing, long-term groundwater modeling simulation and GIS technology to solve it efficiently. A coastal industrial park in Hainan was used as the study area. The depth of groundwater level, rainfall, topography and geomorphology, soil moisture, pollution source, pollution toxicity and other more than 10 parameters were used as the indexes. A comprehensive model with remote sensing cloud computing, DRASTIC model and Modflow + MT3DMS was established to assess the pollution risk from 2014 to 2021. The multi-year results indicated that the risk assessment of groundwater pollution was usually on the vertical coastal direction, and the risk increased from far away to near coast. With the discharge of pollutants in the industrial park, the pollution risk in the area 5 km away from the centre increased year by year until it became stable in 2019, and the risk in the centre of the park reached 1 level, covered an area of up to 145400 square metres, accounted for 0.012% of the whole study area. The assessment results in 2020 and 2021 fluctuate slightly compared with those in 2019. Therefore, in terms of groundwater resource protection and resource management, it is necessary to focus on the detection of pollution in the coastal zone and the pollution within 5 km of the centre to strictly control pollution discharge. In this study, the comprehensive assessment includes surface indicators, subsurface indicators, and pollutant indicators. Finally, we achieve a multivariate, spatial and long time series groundwater pollution risk assessment system, which is a new groundwater environmental impact assessment (GEIA) system.}, } @article {pmid36395210, year = {2022}, author = {Datta, S and Chakraborty, W and Radosavljevic, M}, title = {Toward attojoule switching energy in logic transistors.}, journal = {Science (New York, N.Y.)}, volume = {378}, number = {6621}, pages = {733-740}, doi = {10.1126/science.ade7656}, pmid = {36395210}, issn = {1095-9203}, abstract = {Advances in the theory of semiconductors in the 1930s in addition to the purification of germanium and silicon crystals in the 1940s enabled the point-contact junction transistor in 1947 and initiated the era of semiconductor electronics. Gordon Moore postulated 18 years later that the number of components in an integrated circuit would double every 1 to 2 years with associated reductions in cost per transistor. Transistor density doubling through scaling-the decrease of component sizes-with each new process node continues today, albeit at a slower pace compared with historical rates of scaling. Transistor scaling has resulted in exponential gain in performance and energy efficiency of integrated circuits, which transformed computing from mainframes to personal computers and from mobile computing to cloud computing. Innovations in new materials, transistor structures, and lithographic technologies will enable further scaling. Monolithic 3D integration, design technology co-optimization, alternative switching mechanisms, and cryogenic operation could enable further transistor scaling and improved energy efficiency in the foreseeable future.}, } @article {pmid36388591, year = {2022}, author = {Pei, J and Wang, L and Huang, H and Wang, L and Li, W and Wang, X and Yang, H and Cao, J and Fang, H and Niu, Z}, title = {Characterization and attribution of vegetation dynamics in the ecologically fragile South China Karst: Evidence from three decadal Landsat observations.}, journal = {Frontiers in plant science}, volume = {13}, number = {}, pages = {1043389}, pmid = {36388591}, issn = {1664-462X}, abstract = {Plant growth and its changes over space and time are effective indicators for signifying ecosystem health. However, large uncertainties remain in characterizing and attributing vegetation changes in the ecologically fragile South China Karst region, since most existing studies were conducted at a coarse spatial resolution or covered limited time spans. Considering the highly fragmented landscapes in the region, this hinders their capability in detecting fine information of vegetation dynamics taking place at local scales and comprehending the influence of climate change usually over relatively long temporal ranges. Here, we explored the spatiotemporal variations in vegetation greenness for the entire South China Karst region (1.9 million km[2]) at a resolution of 30m for the notably increased time span (1987-2018) using three decadal Landsat images and the cloud-based Google Earth Engine. Moreover, we spatially attributed the vegetation changes and quantified the relative contribution of driving factors. Our results revealed a widespread vegetation recovery in the South China Karst (74.80%) during the past three decades. Notably, the area of vegetation recovery tripled following the implementation of ecological engineering compared with the reference period (1987-1999). Meanwhile, the vegetation restoration trend was strongly sustainable beyond 2018 as demonstrated by the Hurst exponent. Furthermore, climate change contributed only one-fifth to vegetation restoration, whereas major vegetation recovery was highly attributable to afforestation projects, implying that anthropogenic influences accelerated vegetation greenness gains in karst areas since the start of the new millennium during which ecological engineering was continually established. Our study provides additional insights into ecological restoration and conservation in the highly heterogeneous karst landscapes and other similar ecologically fragile areas worldwide.}, } @article {pmid36387768, year = {2022}, author = {Noh, SK}, title = {Deep Learning System for Recycled Clothing Classification Linked to Cloud and Edge Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6854626}, pmid = {36387768}, issn = {1687-5273}, mesh = {Humans ; *Artificial Intelligence ; *Deep Learning ; Cloud Computing ; Automation ; Clothing ; }, abstract = {Recently, IT technologies related to the Fourth Industrial Revolution (4IR), such as artificial intelligence (AI), Internet of things (IoT), cloud computing, and edge computing have been studied. Although there are many used clothing occurrences with 61 trillion worn of clothing consumption per year in Korea, it is not properly collected due to the efficiency of the used clothing collection system, and the collected used clothing is not properly recycled due to insufficient recycling system, lack of skilled labor force, and health problems of workers. To solve this problem, this study proposes a deep learning clothing classification system (DLCCS) using cloud and edge computing. The system proposed is to classify clothing image data input from camera terminals installed in various clothing classification sites in various regions into two classes, as well as nine classes, by deep learning using convolution neural network (CNN). And the classification results are stored in the cloud through edge computing. The edge computing enables the analysis of the data of the Internet of Things (IoT) device on the edge of the network before transmitting it to the cloud. The performance evaluation parameters that are considered for the proposed research study are transmission velocity and latency. Proposed system can efficiently improve the process and automation in the classification and processing of recycled clothing in various places. It is also expected that the waste of clothing resources and health problems of clothing classification workers will be improved.}, } @article {pmid36374893, year = {2022}, author = {Nguyen, AD and Choi, S and Kim, W and Kim, J and Oh, H and Kang, J and Lee, S}, title = {Single-Image 3-D Reconstruction: Rethinking Point Cloud Deformation.}, journal = {IEEE transactions on neural networks and learning systems}, volume = {PP}, number = {}, pages = {}, doi = {10.1109/TNNLS.2022.3211929}, pmid = {36374893}, issn = {2162-2388}, abstract = {Single-image 3-D reconstruction has long been a challenging problem. Recent deep learning approaches have been introduced to this 3-D area, but the ability to generate point clouds still remains limited due to inefficient and expensive 3-D representations, the dependency between the output and the number of model parameters, or the lack of a suitable computing operation. In this article, we present a novel deep-learning-based method to reconstruct a point cloud of an object from a single still image. The proposed method can be decomposed into two steps: feature fusion and deformation. The first step extracts both global and point-specific shape features from a 2-D object image, and then injects them into a randomly generated point cloud. In the second step, which is deformation, we introduce a new layer termed as GraphX that considers the interrelationship between points like common graph convolutions but operates on unordered sets. The framework can be applicable to realistic image data with background as we optionally learn a mask branch to segment objects from input images. To complement the quality of point clouds, we further propose an objective function to control the point uniformity. In addition, we introduce different variants of GraphX that cover from best performance to best memory budget. Moreover, the proposed model can generate an arbitrary-sized point cloud, which is the first deep method to do so. Extensive experiments demonstrate that we outperform the existing models and set a new height for different performance metrics in single-image 3-D reconstruction.}, } @article {pmid36366266, year = {2022}, author = {Kawa, J and Pyciński, B and Smoliński, M and Bożek, P and Kwasecki, M and Pietrzyk, B and Szymański, D}, title = {Design and Implementation of a Cloud PACS Architecture.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366266}, issn = {1424-8220}, mesh = {*Radiology Information Systems ; Cloud Computing ; Computers ; Software ; Tomography, X-Ray Computed ; }, abstract = {The limitations of the classic PACS (picture archiving and communication system), such as the backward-compatible DICOM network architecture and poor security and maintenance, are well-known. They are challenged by various existing solutions employing cloud-related patterns and services. However, a full-scale cloud-native PACS has not yet been demonstrated. The paper introduces a vendor-neutral cloud PACS architecture. It is divided into two main components: a cloud platform and an access device. The cloud platform is responsible for nearline (long-term) image archive, data flow, and backend management. It operates in multi-tenant mode. The access device is responsible for the local DICOM (Digital Imaging and Communications in Medicine) interface and serves as a gateway to cloud services. The cloud PACS was first implemented in an Amazon Web Services environment. It employs a number of general-purpose services designed or adapted for a cloud environment, including Kafka, OpenSearch, and Memcached. Custom services, such as a central PACS node, queue manager, or flow worker, also developed as cloud microservices, bring DICOM support, external integration, and a management layer. The PACS was verified using image traffic from, among others, computed tomography (CT), magnetic resonance (MR), and computed radiography (CR) modalities. During the test, the system was reliably storing and accessing image data. In following tests, scaling behavior differences between the monolithic Dcm4chee server and the proposed solution are shown. The growing number of parallel connections did not influence the monolithic server's overall throughput, whereas the performance of cloud PACS noticeably increased. In the final test, different retrieval patterns were evaluated to assess performance under different scenarios. The current production environment stores over 450 TB of image data and handles over 4000 DICOM nodes.}, } @article {pmid36366264, year = {2022}, author = {Kim, JK and Park, BS and Kim, W and Park, JT and Lee, S and Seo, YH}, title = {Robust Estimation and Optimized Transmission of 3D Feature Points for Computer Vision on Mobile Communication Network.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366264}, issn = {1424-8220}, mesh = {*Algorithms ; *Vision, Ocular ; Computers ; }, abstract = {Due to the amount of transmitted data and the security of personal or private information in wireless communication, there are cases where the information for a multimedia service should be directly transferred from the user's device to the cloud server without the captured original images. This paper proposes a new method to generate 3D (dimensional) keypoints based on a user's mobile device with a commercial RGB camera in a distributed computing environment such as a cloud server. The images are captured with a moving camera and 2D keypoints are extracted from them. After executing feature extraction between continuous frames, disparities are calculated between frames using the relationships between matched keypoints. The physical distance of the baseline is estimated by using the motion information of the camera, and the actual distance is calculated by using the calculated disparity and the estimated baseline. Finally, 3D keypoints are generated by adding the extracted 2D keypoints to the calculated distance. A keypoint-based scene change method is proposed as well. Due to the existing similarity between continuous frames captured from a camera, not all 3D keypoints are transferred and stored, only the new ones. Compared with the ground truth of the TUM dataset, the average error of the estimated 3D keypoints was measured as 5.98 mm, which shows that the proposed method has relatively good performance considering that it uses a commercial RGB camera on a mobile device. Furthermore, the transferred 3D keypoints were decreased to about 73.6%.}, } @article {pmid36366095, year = {2022}, author = {Gonzalez-Compean, JL and Sosa-Sosa, VJ and Garcia-Hernandez, JJ and Galeana-Zapien, H and Reyes-Anastacio, HG}, title = {A Blockchain and Fingerprinting Traceability Method for Digital Product Lifecycle Management.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366095}, issn = {1424-8220}, mesh = {*Blockchain ; Computer Security ; *Internet of Things ; Cloud Computing ; Technology ; }, abstract = {The rise of digitalization, sensory devices, cloud computing and internet of things (IoT) technologies enables the design of novel digital product lifecycle management (DPLM) applications for use cases such as manufacturing and delivery of digital products. The verification of the accomplishment/violations of agreements defined in digital contracts is a key task in digital business transactions. However, this verification represents a challenge when validating both the integrity of digital product content and the transactions performed during multiple stages of the DPLM. This paper presents a traceability method for DPLM based on the integration of online and offline verification mechanisms based on blockchain and fingerprinting, respectively. A blockchain lifecycle registration model is used for organizations to register the exchange of digital products in the cloud with partners and/or consumers throughout the DPLM stages as well as to verify the accomplishment of agreements at each DPLM stage. The fingerprinting scheme is used for offline verification of digital product integrity and to register the DPLM logs within digital products, which is useful in either dispute or violation of agreements scenarios. We built a DPLM service prototype based on this method, which was implemented as a cloud computing service. A case study based on the DPLM of audios was conducted to evaluate this prototype. The experimental evaluation revealed the ability of this method to be applied to DPLM in real scenarios in an efficient manner.}, } @article {pmid36366082, year = {2022}, author = {Hijji, M and Ahmad, B and Alam, G and Alwakeel, A and Alwakeel, M and Abdulaziz Alharbi, L and Aljarf, A and Khan, MU}, title = {Cloud Servers: Resource Optimization Using Different Energy Saving Techniques.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366082}, issn = {1424-8220}, mesh = {*Cloud Computing ; Physical Phenomena ; *Workload ; }, abstract = {Currently, researchers are working to contribute to the emerging fields of cloud computing, edge computing, and distributed systems. The major area of interest is to examine and understand their performance. The major globally leading companies, such as Google, Amazon, ONLIVE, Giaki, and eBay, are truly concerned about the impact of energy consumption. These cloud computing companies use huge data centers, consisting of virtual computers that are positioned worldwide and necessitate exceptionally high-power costs to preserve. The increased requirement for energy consumption in IT firms has posed many challenges for cloud computing companies pertinent to power expenses. Energy utilization is reliant upon numerous aspects, for example, the service level agreement, techniques for choosing the virtual machine, the applied optimization strategies and policies, and kinds of workload. The present paper tries to provide an answer to challenges related to energy-saving through the assistance of both dynamic voltage and frequency scaling techniques for gaming data centers. Also, to evaluate both the dynamic voltage and frequency scaling techniques compared to non-power-aware and static threshold detection techniques. The findings will facilitate service suppliers in how to encounter the quality of service and experience limitations by fulfilling the service level agreements. For this purpose, the CloudSim platform is applied for the application of a situation in which game traces are employed as a workload for analyzing the procedure. The findings evidenced that an assortment of good quality techniques can benefit gaming servers to conserve energy expenditures and sustain the best quality of service for consumers located universally. The originality of this research presents a prospect to examine which procedure performs good (for example, dynamic, static, or non-power aware). The findings validate that less energy is utilized by applying a dynamic voltage and frequency method along with fewer service level agreement violations, and better quality of service and experience, in contrast with static threshold consolidation or non-power aware technique.}, } @article {pmid36366068, year = {2022}, author = {Baca, A and Dabnichki, P and Hu, CW and Kornfeind, P and Exel, J}, title = {Ubiquitous Computing in Sports and Physical Activity-Recent Trends and Developments.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366068}, issn = {1424-8220}, mesh = {Humans ; Artificial Intelligence ; *Sports ; *Wearable Electronic Devices ; Exercise ; Athletes ; }, abstract = {The use of small, interconnected and intelligent tools within the broad framework of pervasive computing for analysis and assessments in sport and physical activity is not a trend in itself but defines a way for information to be handled, processed and utilised: everywhere, at any time. The demand for objective data to support decision making prompted the adoption of wearables that evolve to fulfil the aims of assessing athletes and practitioners as closely as possible with their performance environments. In the present paper, we mention and discuss the advancements in ubiquitous computing in sports and physical activity in the past 5 years. Thus, recent developments in wearable sensors, cloud computing and artificial intelligence tools have been the pillars for a major change in the ways sport-related analyses are performed. The focus of our analysis is wearable technology, computer vision solutions for markerless tracking and their major contribution to the process of acquiring more representative data from uninhibited actions in realistic ecological conditions. We selected relevant literature on the applications of such approaches in various areas of sports and physical activity while outlining some limitations of the present-day data acquisition and data processing practices and the resulting sensors' functionalities, as well as the limitations to the data-driven informed decision making in the current technological and scientific framework. Finally, we hypothesise that a continuous merger of measurement, processing and analysis will lead to the development of more reliable models utilising the advantages of open computing and unrestricted data access and allow for the development of personalised-medicine-type approaches to sport training and performance.}, } @article {pmid36366060, year = {2022}, author = {Niebla-Montero, Á and Froiz-Míguez, I and Fraga-Lamas, P and Fernández-Caramés, TM}, title = {Practical Latency Analysis of a Bluetooth 5 Decentralized IoT Opportunistic Edge Computing System for Low-Cost SBCs.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366060}, issn = {1424-8220}, abstract = {IoT devices can be deployed almost anywhere, but they usually need to be connected to other IoT devices, either through the Internet or local area networks. For such communications, many IoT devices make use of wireless communications, whose coverage is key: if no coverage is available, an IoT device becomes isolated. This can happen both indoors (e.g., large buildings, industrial warehouses) or outdoors (e.g., rural areas, cities). To tackle such an issue, opportunistic networks can be useful, since they use gateways to provide services to IoT devices when they are in range (i.e., IoT devices take the opportunity of having a nearby gateway to exchange data or to use a computing service). Moreover, opportunistic networks can provide Edge Computing capabilities, thus creating Opportunistic Edge Computing (OEC) systems, which deploy smart gateways able to perform certain tasks faster than a remote Cloud. This article presents a novel decentralized OEC system based on Bluetooth 5 IoT nodes whose latency is evaluated to determine the feasibility of using it in practical applications. The obtained results indicate that, for the selected scenario, the average end-to-end latency is relatively low (736 ms), but it is impacted by factors such as the location of the bootstrap node, the smart gateway hardware or the use of high-security mechanisms.}, } @article {pmid36366028, year = {2022}, author = {Lo, SC and Tsai, HH}, title = {Design of 3D Virtual Reality in the Metaverse for Environmental Conservation Education Based on Cognitive Theory.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366028}, issn = {1424-8220}, mesh = {Humans ; *Virtual Reality ; *Computer-Assisted Instruction/methods ; Learning ; Cognition ; }, abstract = {BACKGROUND: Climate change causes devastating impacts with extreme weather conditions, such as flooding, polar ice caps melting, sea level rise, and droughts. Environmental conservation education is an important and ongoing project nowadays for all governments in the world. In this paper, a novel 3D virtual reality architecture in the metaverse (VRAM) is proposed to foster water resources education using modern information technology.

METHODS: A quasi-experimental study was performed to observe a comparison between learning involving VRAM and learning without VRAM. The 3D VRAM multimedia content comes from a picture book for learning environmental conservation concepts, based on the cognitive theory of multimedia learning to enhance human cognition. Learners wear VRAM helmets to run VRAM Android apps by entering the immersive environment for playing and/or interacting with 3D VRAM multimedia content in the metaverse. They shake their head to move the interaction sign to initiate interactive actions, such as replaying, going to consecutive video clips, displaying text annotations, and replying to questions when learning soil-and-water conservation course materials. Interactive portfolios of triggering actions are transferred to the cloud computing database immediately by the app.

RESULTS: Experimental results showed that participants who received instruction involving VRAM had significant improvement in their flow experience, learning motivation, learning interaction, self-efficacy, and presence in learning environmental conservation concepts.

CONCLUSIONS: The novel VRAM is highly suitable for multimedia educational systems. Moreover, learners' interactive VRAM portfolios can be analyzed by big-data analytics to understand behaviors for using VRAM in the future to improve the quality of environmental conservation education.}, } @article {pmid36365971, year = {2022}, author = {Na, D and Park, S}, title = {IoT-Chain and Monitoring-Chain Using Multilevel Blockchain for IoT Security.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36365971}, issn = {1424-8220}, abstract = {In general, the Internet of Things (IoT) relies on centralized servers due to limited computing power and storage capacity. These server-based architectures have vulnerabilities such as DDoS attacks, single-point errors, and data forgery, and cannot guarantee stability and reliability. Blockchain technology can guarantee reliability and stability with a P2P network-based consensus algorithm and distributed ledger technology. However, it requires the high storage capacity of the existing blockchain and the computational power of the consensus algorithm. Therefore, blockchain nodes for IoT data management are maintained through an external cloud, an edge node. As a result, the vulnerability of the existing centralized structure cannot be guaranteed, and reliability cannot be guaranteed in the process of storing IoT data on the blockchain. In this paper, we propose a multi-level blockchain structure and consensus algorithm to solve the vulnerability. A multi-level blockchain operates on IoT devices, and there is an IoT chain layer that stores sensor data to ensure reliability. In addition, there is a hyperledger fabric-based monitoring chain layer that operates the access control for the metadata and data of the IoT chain to lighten the weight. We propose an export consensus method between the two blockchains, the Schnorr signature method, and a random-based lightweight consensus algorithm within the IoT-Chain. Experiments to measure the blockchain size, propagation time, consensus delay time, and transactions per second (TPS) were conducted using IoT. The blockchain did not exceed a certain size, and the delay time was reduced by 96% to 99% on average compared to the existing consensus algorithm. In the throughput tests, the maximum was 1701 TPS and the minimum was 1024 TPS.}, } @article {pmid36365871, year = {2022}, author = {Kaur, A and Singh, G and Kukreja, V and Sharma, S and Singh, S and Yoon, B}, title = {Adaptation of IoT with Blockchain in Food Supply Chain Management: An Analysis-Based Review in Development, Benefits and Potential Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36365871}, issn = {1424-8220}, mesh = {*Blockchain ; *Internet of Things ; Food Supply ; Monitoring, Physiologic ; Technology ; }, abstract = {In today's scenario, blockchain technology is an emerging area and promising technology in the field of the food supply chain industry (FSCI). A literature survey comprising an analytical review of blockchain technology with the Internet of things (IoT) for food supply chain management (FSCM) is presented to better understand the associated research benefits, issues, and challenges. At present, with the concept of farm-to-fork gaining increasing popularity, food safety and quality certification are of critical concern. Blockchain technology provides the traceability of food supply from the source, i.e., the seeding factories, to the customer's table. The main idea of this paper is to identify blockchain technology with the Internet of things (IoT) devices to investigate the food conditions and various issues faced by transporters while supplying fresh food. Blockchain provides applications such as smart contracts to monitor, observe, and manage all transactions and communications among stakeholders. IoT technology provides approaches for verifying all transactions; these transactions are recorded and then stored in a centralized database system. Thus, IoT enables a safe and cost-effective FSCM system for stakeholders. In this paper, we contribute to the awareness of blockchain applications that are relevant to the food supply chain (FSC), and we present an analysis of the literature on relevant blockchain applications which has been conducted concerning various parameters. The observations in the present survey are also relevant to the application of blockchain technology with IoT in other areas.}, } @article {pmid36365848, year = {2022}, author = {Shamshad, S and Riaz, F and Riaz, R and Rizvi, SS and Abdulla, S}, title = {An Enhanced Architecture to Resolve Public-Key Cryptographic Issues in the Internet of Things (IoT), Employing Quantum Computing Supremacy.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36365848}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) strongly influences the world economy; this emphasizes the importance of securing all four aspects of the IoT model: sensors, networks, cloud, and applications. Considering the significant value of public-key cryptography threats on IoT system confidentiality, it is vital to secure it. One of the potential candidates to assist in securing public key cryptography in IoT is quantum computing. Although the notion of IoT and quantum computing convergence is not new, it has been referenced in various works of literature and covered by many scholars. Quantum computing eliminates most of the challenges in IoT. This research provides a comprehensive introduction to the Internet of Things and quantum computing before moving on to public-key cryptography difficulties that may be encountered across the convergence of quantum computing and IoT. An enhanced architecture is then proposed for resolving these public-key cryptography challenges using SimuloQron to implement the BB84 protocol for quantum key distribution (QKD) and one-time pad (OTP). The proposed model prevents eavesdroppers from performing destructive operations in the communication channel and cyber side by preserving its state and protecting the public key using quantum cryptography and the BB84 protocol. A modified version is introduced for this IoT situation. A traditional cryptographic mechanism called "one-time pad" (OTP) is employed in hybrid management.}, } @article {pmid36357557, year = {2022}, author = {Tuli, S and Casale, G and Jennings, NR}, title = {SimTune: bridging the simulator reality gap for resource management in edge-cloud computing.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {19158}, pmid = {36357557}, issn = {2045-2322}, mesh = {Humans ; *Cloud Computing ; Computer Simulation ; }, abstract = {Industries and services are undergoing an Internet of Things centric transformation globally, giving rise to an explosion of multi-modal data generated each second. This, with the requirement of low-latency result delivery, has led to the ubiquitous adoption of edge and cloud computing paradigms. Edge computing follows the data gravity principle, wherein the computational devices move closer to the end-users to minimize data transfer and communication times. However, large-scale computation has exacerbated the problem of efficient resource management in hybrid edge-cloud platforms. In this regard, data-driven models such as deep neural networks (DNNs) have gained popularity to give rise to the notion of edge intelligence. However, DNNs face significant problems of data saturation when fed volatile data. Data saturation is when providing more data does not translate to improvements in performance. To address this issue, prior work has leveraged coupled simulators that, akin to digital twins, generate out-of-distribution training data alleviating the data-saturation problem. However, simulators face the reality-gap problem, which is the inaccuracy in the emulation of real computational infrastructure due to the abstractions in such simulators. To combat this, we develop a framework, SimTune, that tackles this challenge by leveraging a low-fidelity surrogate model of the high-fidelity simulator to update the parameters of the latter, so to increase the simulation accuracy. This further helps co-simulated methods to generalize to edge-cloud configurations for which human encoded parameters are not known apriori. Experiments comparing SimTune against state-of-the-art data-driven resource management solutions on a real edge-cloud platform demonstrate that simulator tuning can improve quality of service metrics such as energy consumption and response time by up to 14.7% and 7.6% respectively.}, } @article {pmid36351936, year = {2022}, author = {Benhammou, Y and Alcaraz-Segura, D and Guirado, E and Khaldi, R and Achchab, B and Herrera, F and Tabik, S}, title = {Sentinel2GlobalLULC: A Sentinel-2 RGB image tile dataset for global land use/cover mapping with deep learning.}, journal = {Scientific data}, volume = {9}, number = {1}, pages = {681}, pmid = {36351936}, issn = {2052-4463}, abstract = {Land-Use and Land-Cover (LULC) mapping is relevant for many applications, from Earth system and climate modelling to territorial and urban planning. Global LULC products are continuously developing as remote sensing data and methods grow. However, there still exists low consistency among LULC products due to low accuracy in some regions and LULC types. Here, we introduce Sentinel2GlobalLULC, a Sentinel-2 RGB image dataset, built from the spatial-temporal consensus of up to 15 global LULC maps available in Google Earth Engine. Sentinel2GlobalLULC v2.1 contains 194877 single-class RGB image tiles organized into 29 LULC classes. Each image is a 224 × 224 pixels tile at 10 × 10 m resolution built as a cloud-free composite from Sentinel-2 images acquired between June 2015 and October 2020. Metadata includes a unique LULC annotation per image, together with level of consensus, reverse geo-referencing, global human modification index, and number of dates used in the composite. Sentinel2GlobalLULC is designed for training deep learning models aiming to build precise and robust global or regional LULC maps.}, } @article {pmid36350854, year = {2022}, author = {Zhang, X and Han, L and Sobeih, T and Han, L and Dempsey, N and Lechareas, S and Tridente, A and Chen, H and White, S and Zhang, D}, title = {CXR-Net: A Multitask Deep Learning Network for Explainable and Accurate Diagnosis of COVID-19 Pneumonia from Chest X-ray Images.}, journal = {IEEE journal of biomedical and health informatics}, volume = {PP}, number = {}, pages = {}, doi = {10.1109/JBHI.2022.3220813}, pmid = {36350854}, issn = {2168-2208}, abstract = {Accurate and rapid detection of COVID-19 pneumonia is crucial for optimal patient treatment. Chest X-Ray (CXR) is the first-line imaging technique for COVID-19 pneumonia diagnosis as it is fast, cheap and easily accessible. Currently, many deep learning (DL) models have been proposed to detect COVID-19 pneumonia from CXR images. Unfortunately, these deep classifiers lack the transparency in interpreting findings, which may limit their applications in clinical practice. The existing explanation methods produce either too noisy or imprecise results, and hence are unsuitable for diagnostic purposes. In this work, we propose a novel explainable CXR deep neural Network (CXR-Net) for accurate COVID-19 pneumonia detection with an enhanced pixel-level visual explanation using CXR images. An Encoder-Decoder-Encoder architecture is proposed, in which an extra encoder is added after the encoder-decoder structure to ensure the model can be trained on category samples. The method has been evaluated on real world CXR datasets from both public and private sources, including healthy, bacterial pneumonia, viral pneumonia and COVID-19 pneumonia cases. The results demonstrate that the proposed method can achieve a satisfactory accuracy and provide fine-resolution activation maps for visual explanation in the lung disease detection. The Average Accuracy, Sensitivity, Specificity, PPV and F1-score of models in the COVID-19 pneumonia detection reach 0.992, 0.998, 0.985 and 0.989, respectively. Compared to current state-of-the-art visual explanation methods, the proposed method can provide more detailed, high-resolution, visual explanation for the classification results. It can be deployed in various computing environments, including cloud, CPU and GPU environments. It has a great potential to be used in clinical practice for COVID-19 pneumonia diagnosis.}, } @article {pmid36335750, year = {2022}, author = {Tomassini, S and Sbrollini, A and Covella, G and Sernani, P and Falcionelli, N and Müller, H and Morettini, M and Burattini, L and Dragoni, AF}, title = {Brain-on-Cloud for automatic diagnosis of Alzheimer's disease from 3D structural magnetic resonance whole-brain scans.}, journal = {Computer methods and programs in biomedicine}, volume = {227}, number = {}, pages = {107191}, doi = {10.1016/j.cmpb.2022.107191}, pmid = {36335750}, issn = {1872-7565}, mesh = {Humans ; *Alzheimer Disease/diagnostic imaging/pathology ; Quality of Life ; Neuroimaging/methods ; Magnetic Resonance Imaging/methods ; Brain/diagnostic imaging/pathology ; Magnetic Resonance Spectroscopy ; *Cognitive Dysfunction ; }, abstract = {BACKGROUND AND OBJECTIVE: Alzheimer's disease accounts for approximately 70% of all dementia cases. Cortical and hippocampal atrophy caused by Alzheimer's disease can be appreciated easily from a T1-weighted structural magnetic resonance scan. Since a timely therapeutic intervention during the initial stages of the syndrome has a positive impact on both disease progression and quality of life of affected subjects, Alzheimer's disease diagnosis is crucial. Thus, this study relies on the development of a robust yet lightweight 3D framework, Brain-on-Cloud, dedicated to efficient learning of Alzheimer's disease-related features from 3D structural magnetic resonance whole-brain scans by improving our recent convolutional long short-term memory-based framework with the integration of a set of data handling techniques in addition to the tuning of the model hyper-parameters and the evaluation of its diagnostic performance on independent test data.

METHODS: For this objective, four serial experiments were conducted on a scalable GPU cloud service. They were compared and the hyper-parameters of the best experiment were tuned until reaching the best-performing configuration. In parallel, two branches were designed. In the first branch of Brain-on-Cloud, training, validation and testing were performed on OASIS-3. In the second branch, unenhanced data from ADNI-2 were employed as independent test set, and the diagnostic performance of Brain-on-Cloud was evaluated to prove its robustness and generalization capability. The prediction scores were computed for each subject and stratified according to age, sex and mini mental state examination.

RESULTS: In its best guise, Brain-on-Cloud is able to discriminate Alzheimer's disease with an accuracy of 92% and 76%, sensitivity of 94% and 82%, and area under the curve of 96% and 92% on OASIS-3 and independent ADNI-2 test data, respectively.

CONCLUSIONS: Brain-on-Cloud shows to be a reliable, lightweight and easily-reproducible framework for automatic diagnosis of Alzheimer's disease from 3D structural magnetic resonance whole-brain scans, performing well without segmenting the brain into its portions. Preserving the brain anatomy, its application and diagnostic ability can be extended to other cognitive disorders. Due to its cloud nature, computational lightness and fast execution, it can also be applied in real-time diagnostic scenarios providing prompt clinical decision support.}, } @article {pmid36321417, year = {2022}, author = {Golkar, A and Malekhosseini, R and RahimiZadeh, K and Yazdani, A and Beheshti, A}, title = {A priority queue-based telemonitoring system for automatic diagnosis of heart diseases in integrated fog computing environments.}, journal = {Health informatics journal}, volume = {28}, number = {4}, pages = {14604582221137453}, doi = {10.1177/14604582221137453}, pmid = {36321417}, issn = {1741-2811}, mesh = {Humans ; *Cloud Computing ; Delivery of Health Care ; *Heart Diseases ; }, abstract = {Various studies have shown the benefits of using distributed fog computing for healthcare systems. The new pattern of fog and edge computing reduces latency for data processing compared to cloud computing. Nevertheless, the proposed fog models still have many limitations in improving system performance and patients' response time.This paper, proposes a new performance model by integrating fog computing, priority queues and certainty theory into the Edge computing devices and validating it by analyzing heart disease patients' conditions in clinical decision support systems (CDSS). In this model, a Certainty Factor (CF) value is assigned to each symptom of heart disease. When one or more symptoms show an abnormal value, the patient's condition will be evaluated using CF values in the fog layer. In the fog layer, requests are categorized in different priority queues before arriving into the system. The results demonstrate that network usage, latency, and response time of patients' requests are respectively improved by 25.55%, 42.92%, and 34.28% compared to the cloud model. Prioritizing patient requests with respect to CF values in the CDSS provides higher system Quality of Service (QoS) and patients' response time.}, } @article {pmid36318260, year = {2022}, author = {Ament, SA and Adkins, RS and Carter, R and Chrysostomou, E and Colantuoni, C and Crabtree, J and Creasy, HH and Degatano, K and Felix, V and Gandt, P and Garden, GA and Giglio, M and Herb, BR and Khajouei, F and Kiernan, E and McCracken, C and McDaniel, K and Nadendla, S and Nickel, L and Olley, D and Orvis, J and Receveur, JP and Schor, M and Sonthalia, S and Tickle, TL and Way, J and Hertzano, R and Mahurkar, AA and White, OR}, title = {The Neuroscience Multi-Omic Archive: a BRAIN Initiative resource for single-cell transcriptomic and epigenomic data from the mammalian brain.}, journal = {Nucleic acids research}, volume = {}, number = {}, pages = {}, doi = {10.1093/nar/gkac962}, pmid = {36318260}, issn = {1362-4962}, support = {R24 MH114788/NH/NIH HHS/United States ; }, abstract = {Scalable technologies to sequence the transcriptomes and epigenomes of single cells are transforming our understanding of cell types and cell states. The Brain Research through Advancing Innovative Neurotechnologies (BRAIN) Initiative Cell Census Network (BICCN) is applying these technologies at unprecedented scale to map the cell types in the mammalian brain. In an effort to increase data FAIRness (Findable, Accessible, Interoperable, Reusable), the NIH has established repositories to make data generated by the BICCN and related BRAIN Initiative projects accessible to the broader research community. Here, we describe the Neuroscience Multi-Omic Archive (NeMO Archive; nemoarchive.org), which serves as the primary repository for genomics data from the BRAIN Initiative. Working closely with other BRAIN Initiative researchers, we have organized these data into a continually expanding, curated repository, which contains transcriptomic and epigenomic data from over 50 million brain cells, including single-cell genomic data from all of the major regions of the adult and prenatal human and mouse brains, as well as substantial single-cell genomic data from non-human primates. We make available several tools for accessing these data, including a searchable web portal, a cloud-computing interface for large-scale data processing (implemented on Terra, terra.bio), and a visualization and analysis platform, NeMO Analytics (nemoanalytics.org).}, } @article {pmid36316488, year = {2022}, author = {Prakash, AJ and Kumar, S and Behera, MD and Das, P and Kumar, A and Srivastava, PK}, title = {Impact of extreme weather events on cropland inundation over Indian subcontinent.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {1}, pages = {50}, pmid = {36316488}, issn = {1573-2959}, mesh = {*Extreme Weather ; Environmental Monitoring/methods ; Floods ; Crops, Agricultural ; Water ; Weather ; }, abstract = {Cyclonic storms and extreme precipitation lead to loss of lives and significant damage to land and property, crop productivity, etc. The "Gulab" cyclonic storm formed on the 24[th] of September 2021 in the Bay of Bengal (BoB), hit the eastern Indian coasts on the 26[th] of September and caused massive damage and water inundation. This study used Integrated Multi-satellite Retrievals for GPM (IMERG) satellite precipitation data for daily to monthly scale assessments focusing on the "Gulab" cyclonic event. The Otsu's thresholding approach was applied to Sentinel-1 data to map water inundation. Standardized Precipitation Index (SPI) was employed to analyze the precipitation deviation compared to the 20 years mean climatology across India from June to November 2021 on a monthly scale. The water-inundated areas were overlaid on a recent publicly available high-resolution land use land cover (LULC) map to demarcate crop area damage in four eastern Indian states such as Andhra Pradesh, Chhattisgarh, Odisha, and Telangana. The maximum water inundation and crop area damages were observed in Andhra Pradesh (~2700 km[2]), followed by Telangana (~2040 km[2]) and Odisha (~1132 km[2]), and the least in Chhattisgarh (~93.75 km[2]). This study has potential implications for an emergency response to extreme weather events, such as cyclones, extreme precipitation, and flood. The spatio-temporal data layers and rapid assessment methodology can be helpful to various users such as disaster management authorities, mitigation and response teams, and crop insurance scheme development. The relevant satellite data, products, and cloud-computing facility could operationalize systematic disaster monitoring under the rising threats of extreme weather events in the coming years.}, } @article {pmid36316226, year = {2022}, author = {Khosla, A and Sonu, and Awan, HTA and Singh, K and Gaurav, and Walvekar, R and Zhao, Z and Kaushik, A and Khalid, M and Chaudhary, V}, title = {Emergence of MXene and MXene-Polymer Hybrid Membranes as Future- Environmental Remediation Strategies.}, journal = {Advanced science (Weinheim, Baden-Wurttemberg, Germany)}, volume = {}, number = {}, pages = {e2203527}, doi = {10.1002/advs.202203527}, pmid = {36316226}, issn = {2198-3844}, abstract = {The continuous deterioration of the environment due to extensive industrialization and urbanization has raised the requirement to devise high-performance environmental remediation technologies. Membrane technologies, primarily based on conventional polymers, are the most commercialized air, water, solid, and radiation-based environmental remediation strategies. Low stability at high temperatures, swelling in organic contaminants, and poor selectivity are the fundamental issues associated with polymeric membranes restricting their scalable viability. Polymer-metal-carbides and nitrides (MXenes) hybrid membranes possess remarkable physicochemical attributes, including strong mechanical endurance, high mechanical flexibility, superior adsorptive behavior, and selective permeability, due to multi-interactions between polymers and MXene's surface functionalities. This review articulates the state-of-the-art MXene-polymer hybrid membranes, emphasizing its fabrication routes, enhanced physicochemical properties, and improved adsorptive behavior. It comprehensively summarizes the utilization of MXene-polymer hybrid membranes for environmental remediation applications, including water purification, desalination, ion-separation, gas separation and detection, containment adsorption, and electromagnetic and nuclear radiation shielding. Furthermore, the review highlights the associated bottlenecks of MXene-Polymer hybrid-membranes and its possible alternate solutions to meet industrial requirements. Discussed are opportunities and prospects related to MXene-polymer membrane to devise intelligent and next-generation environmental remediation strategies with the integration of modern age technologies of internet-of-things, artificial intelligence, machine-learning, 5G-communication and cloud-computing are elucidated.}, } @article {pmid36304269, year = {2022}, author = {Raveendran, K and Freese, NH and Kintali, C and Tiwari, S and Bole, P and Dias, C and Loraine, AE}, title = {BioViz Connect: Web Application Linking CyVerse Cloud Resources to Genomic Visualization in the Integrated Genome Browser.}, journal = {Frontiers in bioinformatics}, volume = {2}, number = {}, pages = {764619}, pmid = {36304269}, issn = {2673-7647}, abstract = {Genomics researchers do better work when they can interactively explore and visualize data. Due to the vast size of experimental datasets, researchers are increasingly using powerful, cloud-based systems to process and analyze data. These remote systems, called science gateways, offer user-friendly, Web-based access to high performance computing and storage resources, but typically lack interactive visualization capability. In this paper, we present BioViz Connect, a middleware Web application that links CyVerse science gateway resources to the Integrated Genome Browser (IGB), a highly interactive native application implemented in Java that runs on the user's personal computer. Using BioViz Connect, users can 1) stream data from the CyVerse data store into IGB for visualization, 2) improve the IGB user experience for themselves and others by adding IGB specific metadata to CyVerse data files, including genome version and track appearance, and 3) run compute-intensive visual analytics functions on CyVerse infrastructure to create new datasets for visualization in IGB or other applications. To demonstrate how BioViz Connect facilitates interactive data visualization, we describe an example RNA-Seq data analysis investigating how heat and desiccation stresses affect gene expression in the model plant Arabidopsis thaliana. The RNA-Seq use case illustrates how interactive visualization with IGB can help a user identify problematic experimental samples, sanity-check results using a positive control, and create new data files for interactive visualization in IGB (or other tools) using a Docker image deployed to CyVerse via the Terrain API. Lastly, we discuss limitations of the technologies used and suggest opportunities for future work. BioViz Connect is available from https://bioviz.org.}, } @article {pmid36303792, year = {2021}, author = {Guérinot, C and Marcon, V and Godard, C and Blanc, T and Verdier, H and Planchon, G and Raimondi, F and Boddaert, N and Alonso, M and Sailor, K and Lledo, PM and Hajj, B and El Beheiry, M and Masson, JB}, title = {New Approach to Accelerated Image Annotation by Leveraging Virtual Reality and Cloud Computing.}, journal = {Frontiers in bioinformatics}, volume = {1}, number = {}, pages = {777101}, pmid = {36303792}, issn = {2673-7647}, abstract = {Three-dimensional imaging is at the core of medical imaging and is becoming a standard in biological research. As a result, there is an increasing need to visualize, analyze and interact with data in a natural three-dimensional context. By combining stereoscopy and motion tracking, commercial virtual reality (VR) headsets provide a solution to this critical visualization challenge by allowing users to view volumetric image stacks in a highly intuitive fashion. While optimizing the visualization and interaction process in VR remains an active topic, one of the most pressing issue is how to utilize VR for annotation and analysis of data. Annotating data is often a required step for training machine learning algorithms. For example, enhancing the ability to annotate complex three-dimensional data in biological research as newly acquired data may come in limited quantities. Similarly, medical data annotation is often time-consuming and requires expert knowledge to identify structures of interest correctly. Moreover, simultaneous data analysis and visualization in VR is computationally demanding. Here, we introduce a new procedure to visualize, interact, annotate and analyze data by combining VR with cloud computing. VR is leveraged to provide natural interactions with volumetric representations of experimental imaging data. In parallel, cloud computing performs costly computations to accelerate the data annotation with minimal input required from the user. We demonstrate multiple proof-of-concept applications of our approach on volumetric fluorescent microscopy images of mouse neurons and tumor or organ annotations in medical images.}, } @article {pmid36301785, year = {2022}, author = {Reani, Y and Bobrowski, O}, title = {Cycle Registration in Persistent Homology with Applications in Topological Bootstrap.}, journal = {IEEE transactions on pattern analysis and machine intelligence}, volume = {PP}, number = {}, pages = {}, doi = {10.1109/TPAMI.2022.3217443}, pmid = {36301785}, issn = {1939-3539}, abstract = {We propose a novel approach for comparing the persistent homology representations of two spaces (or filtrations). Commonly used methods are based on numerical summaries such as persistence diagrams and persistence landscapes, along with suitable metrics (e.g. Wasserstein). These summaries are useful for computational purposes, but they are merely a marginal of the actual topological information that persistent homology can provide. Instead, our approach compares between two topological representations directly in the data space. We do so by defining a correspondence relation between individual persistent cycles of two different spaces, and devising a method for computing this correspondence. Our matching of cycles is based on both the persistence intervals and the spatial placement of each feature. We demonstrate our new framework in the context of topological inference, where we use statistical bootstrap methods in order to differentiate between real features and noise in point cloud data.}, } @article {pmid36299750, year = {2022}, author = {Li, X and You, K}, title = {Real-time tracking and detection of patient conditions in the intelligent m-Health monitoring system.}, journal = {Frontiers in public health}, volume = {10}, number = {}, pages = {922718}, pmid = {36299750}, issn = {2296-2565}, mesh = {Humans ; *Telemedicine ; }, abstract = {In order to help patients monitor their personal health in real time, this paper proposes an intelligent mobile health monitoring system and establishes a corresponding health network to track and process patients' physical activity and other health-related factors in real time. Performance was analyzed. The experimental results show that after comparing the accuracy, delay time, error range, efficiency, and energy utilization of Im-HMS and existing UCD systems, it is found that the accuracy of Im-HMS is mostly between 98 and 100%, while the accuracy of UCD is mostly between 98 and 100%. Most of the systems are between 91 and 97%; in terms of delay comparison, the delay of the Im-HMS system is between 18 and 39 ms, which is far lower than the lowest value of the UCD system of 84 ms, and the Im-HMS is significantly better than the existing UCD system; the error range of Im-HMS is mainly between 0.2 and 1.4, while the error range of UCD system is mainly between -2 and 14; and in terms of efficiency and energy utilization, Im-HMS values are higher than those of UCD system. In general, the Im-HMS system proposed in this study is more accurate than UCD system and has lower delay, smaller error, and higher efficiency, and energy utilization is more efficient than UCD system, which is of great significance for mobile health monitoring in practical applications.}, } @article {pmid36299577, year = {2022}, author = {Yu, L and Yu, PS and Duan, Y and Qiao, H}, title = {A resource scheduling method for reliable and trusted distributed composite services in cloud environment based on deep reinforcement learning.}, journal = {Frontiers in genetics}, volume = {13}, number = {}, pages = {964784}, pmid = {36299577}, issn = {1664-8021}, abstract = {With the vigorous development of Internet technology, applications are increasingly migrating to the cloud. Cloud, a distributed network environment, has been widely extended to many fields such as digital finance, supply chain management, and biomedicine. In order to meet the needs of the rapid development of the modern biomedical industry, the biological cloud platform is an inevitable choice for the integration and analysis of medical information. It improves the work efficiency of the biological information system and also realizes reliable and credible intelligent processing of biological resources. Cloud services in bioinformatics are mainly for the processing of biological data, such as the analysis and processing of genes, the testing and detection of human tissues and organs, and the storage and transportation of vaccines. Biomedical companies form a data chain on the cloud, and they provide services and transfer data to each other to create composite services. Therefore, our motivation is to improve process efficiency of biological cloud services. Users' business requirements have become complicated and diversified, which puts forward higher requirements for service scheduling strategies in cloud computing platforms. In addition, deep reinforcement learning shows strong perception and continuous decision-making capabilities in automatic control problems, which provides a new idea and method for solving the service scheduling and resource allocation problems in the cloud computing field. Therefore, this paper designs a composite service scheduling model under the containers instance mode which hybrids reservation and on-demand. The containers in the cluster are divided into two instance modes: reservation and on-demand. A composite service is described as a three-level structure: a composite service consists of multiple services, and a service consists of multiple service instances, where the service instance is the minimum scheduling unit. In addition, an improved Deep Q-Network (DQN) algorithm is proposed and applied to the scheduling algorithm of composite services. The experimental results show that applying our improved DQN algorithm to the composite services scheduling problem in the container cloud environment can effectively reduce the completion time of the composite services. Meanwhile, the method improves Quality of Service (QoS) and resource utilization in the container cloud environment.}, } @article {pmid36298902, year = {2022}, author = {Zhang, Y and Wu, Z and Lin, P and Pan, Y and Wu, Y and Zhang, L and Huangfu, J}, title = {Hand gestures recognition in videos taken with a lensless camera.}, journal = {Optics express}, volume = {30}, number = {22}, pages = {39520-39533}, doi = {10.1364/OE.470324}, pmid = {36298902}, issn = {1094-4087}, mesh = {*Gestures ; *Pattern Recognition, Automated/methods ; Algorithms ; Neural Networks, Computer ; }, abstract = {A lensless camera is an imaging system that uses a mask in place of a lens, making it thinner, lighter, and less expensive than a lensed camera. However, additional complex computation and time are required for image reconstruction. This work proposes a deep learning model named Raw3dNet that recognizes hand gestures directly on raw videos captured by a lensless camera without the need for image restoration. In addition to conserving computational resources, the reconstruction-free method provides privacy protection. Raw3dNet is a novel end-to-end deep neural network model for the recognition of hand gestures in lensless imaging systems. It is created specifically for raw video captured by a lensless camera and has the ability to properly extract and combine temporal and spatial features. The network is composed of two stages: 1. spatial feature extractor (SFE), which enhances the spatial features of each frame prior to temporal convolution; 2. 3D-ResNet, which implements spatial and temporal convolution of video streams. The proposed model achieves 98.59% accuracy on the Cambridge Hand Gesture dataset in the lensless optical experiment, which is comparable to the lensed-camera result. Additionally, the feasibility of physical object recognition is assessed. Further, we show that the recognition can be achieved with respectable accuracy using only a tiny portion of the original raw data, indicating the potential for reducing data traffic in cloud computing scenarios.}, } @article {pmid36298422, year = {2022}, author = {Amin, F and Abbasi, R and Mateen, A and Ali Abid, M and Khan, S}, title = {A Step toward Next-Generation Advancements in the Internet of Things Technologies.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {20}, pages = {}, pmid = {36298422}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) devices generate a large amount of data over networks; therefore, the efficiency, complexity, interfaces, dynamics, robustness, and interaction need to be re-examined on a large scale. This phenomenon will lead to seamless network connectivity and the capability to provide support for the IoT. The traditional IoT is not enough to provide support. Therefore, we designed this study to provide a systematic analysis of next-generation advancements in the IoT. We propose a systematic catalog that covers the most recent advances in the traditional IoT. An overview of the IoT from the perspectives of big data, data science, and network science disciplines and also connecting technologies is given. We highlight the conceptual view of the IoT, key concepts, growth, and most recent trends. We discuss and highlight the importance and the integration of big data, data science, and network science along with key applications such as artificial intelligence, machine learning, blockchain, federated learning, etc. Finally, we discuss various challenges and issues of IoT such as architecture, integration, data provenance, and important applications such as cloud and edge computing, etc. This article will provide aid to the readers and other researchers in an understanding of the IoT's next-generation developments and tell how they apply to the real world.}, } @article {pmid36298408, year = {2022}, author = {Farag, MM}, title = {Matched Filter Interpretation of CNN Classifiers with Application to HAR.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {20}, pages = {}, pmid = {36298408}, issn = {1424-8220}, mesh = {Humans ; *Neural Networks, Computer ; *Human Activities ; Machine Learning ; Smartphone ; }, abstract = {Time series classification is an active research topic due to its wide range of applications and the proliferation of sensory data. Convolutional neural networks (CNNs) are ubiquitous in modern machine learning (ML) models. In this work, we present a matched filter (MF) interpretation of CNN classifiers accompanied by an experimental proof of concept using a carefully developed synthetic dataset. We exploit this interpretation to develop an MF CNN model for time series classification comprising a stack of a Conv1D layer followed by a GlobalMaxPooling layer acting as a typical MF for automated feature extraction and a fully connected layer with softmax activation for computing class probabilities. The presented interpretation enables developing superlight highly accurate classifier models that meet the tight requirements of edge inference. Edge inference is emerging research that addresses the latency, availability, privacy, and connectivity concerns of the commonly deployed cloud inference. The MF-based CNN model has been applied to the sensor-based human activity recognition (HAR) problem due to its significant importance in a broad range of applications. The UCI-HAR, WISDM-AR, and MotionSense datasets are used for model training and testing. The proposed classifier is tested and benchmarked on an android smartphone with average accuracy and F1 scores of 98% and 97%, respectively, which outperforms state-of-the-art HAR methods in terms of classification accuracy and run-time performance. The proposed model size is less than 150 KB, and the average inference time is less than 1 ms. The presented interpretation helps develop a better understanding of CNN operation and decision mechanisms. The proposed model is distinguished from related work by jointly featuring interpretability, high accuracy, and low computational cost, enabling its ready deployment on a wide set of mobile devices for a broad range of applications.}, } @article {pmid36298402, year = {2022}, author = {Munir, T and Akbar, MS and Ahmed, S and Sarfraz, A and Sarfraz, Z and Sarfraz, M and Felix, M and Cherrez-Ojeda, I}, title = {A Systematic Review of Internet of Things in Clinical Laboratories: Opportunities, Advantages, and Challenges.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {20}, pages = {}, pmid = {36298402}, issn = {1424-8220}, mesh = {*Internet of Things ; Computer Security ; Laboratories, Clinical ; Privacy ; Software ; }, abstract = {The Internet of Things (IoT) is the network of physical objects embedded with sensors, software, electronics, and online connectivity systems. This study explores the role of IoT in clinical laboratory processes; this systematic review was conducted adhering to the PRISMA Statement 2020 guidelines. We included IoT models and applications across preanalytical, analytical, and postanalytical laboratory processes. PubMed, Cochrane Central, CINAHL Plus, Scopus, IEEE, and A.C.M. Digital library were searched between August 2015 to August 2022; the data were tabulated. Cohen's coefficient of agreement was calculated to quantify inter-reviewer agreements; a total of 18 studies were included with Cohen's coefficient computed to be 0.91. The included studies were divided into three classifications based on availability, including preanalytical, analytical, and postanalytical. The majority (77.8%) of the studies were real-tested. Communication-based approaches were the most common (83.3%), followed by application-based approaches (44.4%) and sensor-based approaches (33.3%) among the included studies. Open issues and challenges across the included studies included scalability, costs and energy consumption, interoperability, privacy and security, and performance issues. In this study, we identified, classified, and evaluated IoT applicability in clinical laboratory systems. This study presents pertinent findings for IoT development across clinical laboratory systems, for which it is essential that more rigorous and efficient testing and studies be conducted in the future.}, } @article {pmid36298235, year = {2022}, author = {Velichko, A and Huyut, MT and Belyaev, M and Izotov, Y and Korzun, D}, title = {Machine Learning Sensors for Diagnosis of COVID-19 Disease Using Routine Blood Values for Internet of Things Application.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {20}, pages = {}, pmid = {36298235}, issn = {1424-8220}, mesh = {Humans ; *Internet of Things ; *COVID-19/diagnosis ; Cholesterol, HDL ; Machine Learning ; Amylases ; Triglycerides ; }, abstract = {Healthcare digitalization requires effective applications of human sensors, when various parameters of the human body are instantly monitored in everyday life due to the Internet of Things (IoT). In particular, machine learning (ML) sensors for the prompt diagnosis of COVID-19 are an important option for IoT application in healthcare and ambient assisted living (AAL). Determining a COVID-19 infected status with various diagnostic tests and imaging results is costly and time-consuming. This study provides a fast, reliable and cost-effective alternative tool for the diagnosis of COVID-19 based on the routine blood values (RBVs) measured at admission. The dataset of the study consists of a total of 5296 patients with the same number of negative and positive COVID-19 test results and 51 routine blood values. In this study, 13 popular classifier machine learning models and the LogNNet neural network model were exanimated. The most successful classifier model in terms of time and accuracy in the detection of the disease was the histogram-based gradient boosting (HGB) (accuracy: 100%, time: 6.39 sec). The HGB classifier identified the 11 most important features (LDL, cholesterol, HDL-C, MCHC, triglyceride, amylase, UA, LDH, CK-MB, ALP and MCH) to detect the disease with 100% accuracy. In addition, the importance of single, double and triple combinations of these features in the diagnosis of the disease was discussed. We propose to use these 11 features and their binary combinations as important biomarkers for ML sensors in the diagnosis of the disease, supporting edge computing on Arduino and cloud IoT service.}, } @article {pmid36298158, year = {2022}, author = {Merone, M and Graziosi, A and Lapadula, V and Petrosino, L and d'Angelis, O and Vollero, L}, title = {A Practical Approach to the Analysis and Optimization of Neural Networks on Embedded Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {20}, pages = {}, pmid = {36298158}, issn = {1424-8220}, mesh = {*Artificial Intelligence ; *Neural Networks, Computer ; Cloud Computing ; Algorithms ; Computers ; }, abstract = {The exponential increase in internet data poses several challenges to cloud systems and data centers, such as scalability, power overheads, network load, and data security. To overcome these limitations, research is focusing on the development of edge computing systems, i.e., based on a distributed computing model in which data processing occurs as close as possible to where the data are collected. Edge computing, indeed, mitigates the limitations of cloud computing, implementing artificial intelligence algorithms directly on the embedded devices enabling low latency responses without network overhead or high costs, and improving solution scalability. Today, the hardware improvements of the edge devices make them capable of performing, even if with some constraints, complex computations, such as those required by Deep Neural Networks. Nevertheless, to efficiently implement deep learning algorithms on devices with limited computing power, it is necessary to minimize the production time and to quickly identify, deploy, and, if necessary, optimize the best Neural Network solution. This study focuses on developing a universal method to identify and port the best Neural Network on an edge system, valid regardless of the device, Neural Network, and task typology. The method is based on three steps: a trade-off step to obtain the best Neural Network within different solutions under investigation; an optimization step to find the best configurations of parameters under different acceleration techniques; eventually, an explainability step using local interpretable model-agnostic explanations (LIME), which provides a global approach to quantify the goodness of the classifier decision criteria. We evaluated several MobileNets on the Fudan Shangai-Tech dataset to test the proposed approach.}, } @article {pmid36298065, year = {2022}, author = {Torrisi, F and Amato, E and Corradino, C and Mangiagli, S and Del Negro, C}, title = {Characterization of Volcanic Cloud Components Using Machine Learning Techniques and SEVIRI Infrared Images.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {20}, pages = {}, pmid = {36298065}, issn = {1424-8220}, mesh = {Humans ; *Sodium Glutamate ; *Volcanic Eruptions ; Atmosphere ; Gases ; Machine Learning ; }, abstract = {Volcanic explosive eruptions inject several different types of particles and gasses into the atmosphere, giving rise to the formation and propagation of volcanic clouds. These can pose a serious threat to the health of people living near an active volcano and cause damage to air traffic. Many efforts have been devoted to monitor and characterize volcanic clouds. Satellite infrared (IR) sensors have been shown to be well suitable for volcanic cloud monitoring tasks. Here, a machine learning (ML) approach was developed in Google Earth Engine (GEE) to detect a volcanic cloud and to classify its main components using satellite infrared images. We implemented a supervised support vector machine (SVM) algorithm to segment a combination of thermal infrared (TIR) bands acquired by the geostationary MSG-SEVIRI (Meteosat Second Generation-Spinning Enhanced Visible and Infrared Imager). This ML algorithm was applied to some of the paroxysmal explosive events that occurred at Mt. Etna between 2020 and 2022. We found that the ML approach using a combination of TIR bands from the geostationary satellite is very efficient, achieving an accuracy of 0.86, being able to properly detect, track and map automatically volcanic ash clouds in near real-time.}, } @article {pmid36294134, year = {2022}, author = {Li, Z}, title = {Forecasting Weekly Dengue Cases by Integrating Google Earth Engine-Based Risk Predictor Generation and Google Colab-Based Deep Learning Modeling in Fortaleza and the Federal District, Brazil.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {20}, pages = {}, pmid = {36294134}, issn = {1660-4601}, mesh = {Humans ; Brazil/epidemiology ; *Dengue/epidemiology ; *Deep Learning ; Artificial Intelligence ; Search Engine ; Forecasting ; }, abstract = {Efficient and accurate dengue risk prediction is an important basis for dengue prevention and control, which faces challenges, such as downloading and processing multi-source data to generate risk predictors and consuming significant time and computational resources to train and validate models locally. In this context, this study proposed a framework for dengue risk prediction by integrating big geospatial data cloud computing based on Google Earth Engine (GEE) platform and artificial intelligence modeling on the Google Colab platform. It enables defining the epidemiological calendar, delineating the predominant area of dengue transmission in cities, generating the data of risk predictors, and defining multi-date ahead prediction scenarios. We implemented the experiments based on weekly dengue cases during 2013-2020 in the Federal District and Fortaleza, Brazil to evaluate the performance of the proposed framework. Four predictors were considered, including total rainfall (Rsum), mean temperature (Tmean), mean relative humidity (RHmean), and mean normalized difference vegetation index (NDVImean). Three models (i.e., random forest (RF), long-short term memory (LSTM), and LSTM with attention mechanism (LSTM-ATT)), and two modeling scenarios (i.e., modeling with or without dengue cases) were set to implement 1- to 4-week ahead predictions. A total of 24 models were built, and the results showed in general that LSTM and LSTM-ATT models outperformed RF models; modeling could benefit from using historical dengue cases as one of the predictors, and it makes the predicted curve fluctuation more stable compared with that only using climate and environmental factors; attention mechanism could further improve the performance of LSTM models. This study provides implications for future dengue risk prediction in terms of the effectiveness of GEE-based big geospatial data processing for risk predictor generation and Google Colab-based risk modeling and presents the benefits of using historical dengue data as one of the input features and the attention mechanism for LSTM modeling.}, } @article {pmid36293656, year = {2022}, author = {Alenoghena, CO and Onumanyi, AJ and Ohize, HO and Adejo, AO and Oligbi, M and Ali, SI and Okoh, SA}, title = {eHealth: A Survey of Architectures, Developments in mHealth, Security Concerns and Solutions.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {20}, pages = {}, pmid = {36293656}, issn = {1660-4601}, mesh = {Humans ; Pandemics ; *COVID-19/epidemiology ; *Telemedicine ; Technology ; }, abstract = {The ramifications of the COVID-19 pandemic have contributed in part to a recent upsurge in the study and development of eHealth systems. Although it is almost impossible to cover all aspects of eHealth in a single discussion, three critical areas have gained traction. These include the need for acceptable eHealth architectures, the development of mobile health (mHealth) technologies, and the need to address eHealth system security concerns. Existing survey articles lack a synthesis of the most recent advancements in the development of architectures, mHealth solutions, and innovative security measures, which are essential components of effective eHealth systems. Consequently, the present article aims at providing an encompassing survey of these three aspects towards the development of successful and efficient eHealth systems. Firstly, we discuss the most recent innovations in eHealth architectures, such as blockchain-, Internet of Things (IoT)-, and cloud-based architectures, focusing on their respective benefits and drawbacks while also providing an overview of how they might be implemented and used. Concerning mHealth and security, we focus on key developments in both areas while discussing other critical topics of importance for eHealth systems. We close with a discussion of the important research challenges and potential future directions as they pertain to architecture, mHealth, and security concerns. This survey gives a comprehensive overview, including the merits and limitations of several possible technologies for the development of eHealth systems. This endeavor offers researchers and developers a quick snapshot of the information necessary during the design and decision-making phases of the eHealth system development lifecycle. Furthermore, we conclude that building a unified architecture for eHealth systems would require combining several existing designs. It also points out that there are still a number of problems to be solved, so more research and investment are needed to develop and deploy functional eHealth systems.}, } @article {pmid36280715, year = {2022}, author = {Schubert, PJ and Dorkenwald, S and Januszewski, M and Klimesch, J and Svara, F and Mancu, A and Ahmad, H and Fee, MS and Jain, V and Kornfeld, J}, title = {SyConn2: dense synaptic connectivity inference for volume electron microscopy.}, journal = {Nature methods}, volume = {19}, number = {11}, pages = {1367-1370}, pmid = {36280715}, issn = {1548-7105}, support = {RF1 MH117809/MH/NIMH NIH HHS/United States ; }, mesh = {Microscopy, Electron ; *Connectome ; Synapses ; Neurons ; Brain ; }, abstract = {The ability to acquire ever larger datasets of brain tissue using volume electron microscopy leads to an increasing demand for the automated extraction of connectomic information. We introduce SyConn2, an open-source connectome analysis toolkit, which works with both on-site high-performance compute environments and rentable cloud computing clusters. SyConn2 was tested on connectomic datasets with more than 10 million synapses, provides a web-based visualization interface and makes these data amenable to complex anatomical and neuronal connectivity queries.}, } @article {pmid36275963, year = {2022}, author = {Zhang, Y and Geng, P}, title = {Multi-Task Assignment Method of the Cloud Computing Platform Based on Artificial Intelligence.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {1789490}, pmid = {36275963}, issn = {1687-5273}, mesh = {*Cloud Computing ; *Artificial Intelligence ; Bayes Theorem ; Algorithms ; Big Data ; }, abstract = {To realize load balancing of cloud computing platforms in big data processing, the method of finding the optimal load balancing physical host in the algorithm cycle is adopted at present. This optimal load balancing strategy that overly focuses on the current deployment problem has certain limitations. It will make the system less efficient and the user's waiting time unnecessarily prolonged. This paper proposes a task assignment method for long-term resource load balancing of cloud platforms based on artificial intelligence and big data (TABAI). The maximum posterior probability for each physical host is calculated using Bayesian theory. Euler's formula is used to calculate the similarity between the host with the largest posterior probability and other hosts as a threshold. The hosts are classified according to the threshold to determine the optimal cluster and then form the final set of candidate physical hosts. It improves the resource utilization and external service capability of the cloud platform by combining cluster analysis with Bayes' theorem to achieve global load balancing in the time dimension. The experimental results show that: TABAI has a smaller processing time than the traditional load balancing multi-task assignment method. When the time is >600 s, the standard deviation of TABAI decreases to a greater extent, and it has stronger external service capabilities.}, } @article {pmid36274993, year = {2022}, author = {Yentes, JM and Liu, WY and Zhang, K and Markvicka, E and Rennard, SI}, title = {Updated Perspectives on the Role of Biomechanics in COPD: Considerations for the Clinician.}, journal = {International journal of chronic obstructive pulmonary disease}, volume = {17}, number = {}, pages = {2653-2675}, pmid = {36274993}, issn = {1178-2005}, mesh = {Humans ; Biomechanical Phenomena ; *Pulmonary Disease, Chronic Obstructive/diagnosis ; Gait/physiology ; Walking ; Walking Speed ; }, abstract = {Patients with chronic obstructive pulmonary disease (COPD) demonstrate extra-pulmonary functional decline such as an increased prevalence of falls. Biomechanics offers insight into functional decline by examining mechanics of abnormal movement patterns. This review discusses biomechanics of functional outcomes, muscle mechanics, and breathing mechanics in patients with COPD as well as future directions and clinical perspectives. Patients with COPD demonstrate changes in their postural sway during quiet standing compared to controls, and these deficits are exacerbated when sensory information (eg, eyes closed) is manipulated. If standing balance is disrupted with a perturbation, patients with COPD are slower to return to baseline and their muscle activity is differential from controls. When walking, patients with COPD appear to adopt a gait pattern that may increase stability (eg, shorter and wider steps, decreased gait speed) in addition to altered gait variability. Biomechanical muscle mechanics (ie, tension, extensibility, elasticity, and irritability) alterations with COPD are not well documented, with relatively few articles investigating these properties. On the other hand, dyssynchronous motion of the abdomen and rib cage while breathing is well documented in patients with COPD. Newer biomechanical technologies have allowed for estimation of regional, compartmental, lung volumes during activity such as exercise, as well as respiratory muscle activation during breathing. Future directions of biomechanical analyses in COPD are trending toward wearable sensors, big data, and cloud computing. Each of these offers unique opportunities as well as challenges. Advanced analytics of sensor data can offer insight into the health of a system by quantifying complexity or fluctuations in patterns of movement, as healthy systems demonstrate flexibility and are thus adaptable to changing conditions. Biomechanics may offer clinical utility in prediction of 30-day readmissions, identifying disease severity, and patient monitoring. Biomechanics is complementary to other assessments, capturing what patients do, as well as their capability.}, } @article {pmid36274815, year = {2023}, author = {Bonino da Silva Santos, LO and Ferreira Pires, L and Graciano Martinez, V and Rebelo Moreira, JL and Silva Souza Guizzardi, R}, title = {Personal Health Train Architecture with Dynamic Cloud Staging.}, journal = {SN computer science}, volume = {4}, number = {1}, pages = {14}, pmid = {36274815}, issn = {2661-8907}, abstract = {Scientific advances, especially in the healthcare domain, can be accelerated by making data available for analysis. However, in traditional data analysis systems, data need to be moved to a central processing unit that performs analyses, which may be undesirable, e.g. due to privacy regulations in case these data contain personal information. This paper discusses the Personal Health Train (PHT) approach in which data processing is brought to the (personal health) data rather than the other way around, allowing (private) data accessed to be controlled, and to observe ethical and legal concerns. This paper introduces the PHT architecture and discusses the data staging solution that allows processing to be delegated to components spawned in a private cloud environment in case the (health) organisation hosting the data has limited resources to execute the required processing. This paper shows the feasibility and suitability of the solution with a relatively simple, yet representative, case study of data analysis of Covid-19 infections, which is performed by components that are created on demand and run in the Amazon Web Services platform. This paper also shows that the performance of our solution is acceptable, and that our solution is scalable. This paper demonstrates that the PHT approach enables data analysis with controlled access, preserving privacy and complying with regulations such as GDPR, while the solution is deployed in a private cloud environment.}, } @article {pmid36269974, year = {2022}, author = {Proctor, T and Seritan, S and Rudinger, K and Nielsen, E and Blume-Kohout, R and Young, K}, title = {Scalable Randomized Benchmarking of Quantum Computers Using Mirror Circuits.}, journal = {Physical review letters}, volume = {129}, number = {15}, pages = {150502}, doi = {10.1103/PhysRevLett.129.150502}, pmid = {36269974}, issn = {1079-7114}, abstract = {The performance of quantum gates is often assessed using some form of randomized benchmarking. However, the existing methods become infeasible for more than approximately five qubits. Here we show how to use a simple and customizable class of circuits-randomized mirror circuits-to perform scalable, robust, and flexible randomized benchmarking of Clifford gates. We show that this technique approximately estimates the infidelity of an average many-qubit logic layer, and we use simulations of up to 225 qubits with physically realistic error rates in the range 0.1%-1% to demonstrate its scalability. We then use up to 16 physical qubits of a cloud quantum computing platform to demonstrate that our technique can reveal and quantify crosstalk errors in many-qubit circuits.}, } @article {pmid36269885, year = {2022}, author = {Matar, A and Hansson, M and Slokenberga, S and Panagiotopoulos, A and Chassang, G and Tzortzatou, O and Pormeister, K and Uhlin, E and Cardone, A and Beauvais, M}, title = {A proposal for an international Code of Conduct for data sharing in genomics.}, journal = {Developing world bioethics}, volume = {}, number = {}, pages = {}, doi = {10.1111/dewb.12381}, pmid = {36269885}, issn = {1471-8847}, abstract = {As genomic research becomes commonplace across the world, there is an increased need to coordinate practices among researchers, especially with regard to data sharing. One such way is an international code of conduct. In September 2020, an expert panel consisting of representatives from various fields convened to discuss a draft proposal formed via a synthesis of existing professional codes and other recommendations. This article presents an overview and analysis of the main issues related to international genomic research that were discussed by the expert panel, and the results of the discussion and follow up responses by the experts. As a result, the article presents as an annex a proposal for an international code of conduct for data sharing in genomics that is meant to establish best practices.}, } @article {pmid36268157, year = {2022}, author = {Asif, RN and Abbas, S and Khan, MA and Atta-Ur-Rahman, and Sultan, K and Mahmud, M and Mosavi, A}, title = {Development and Validation of Embedded Device for Electrocardiogram Arrhythmia Empowered with Transfer Learning.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {5054641}, pmid = {36268157}, issn = {1687-5273}, mesh = {Humans ; *Electrocardiography/methods ; *Arrhythmias, Cardiac/diagnosis ; Cloud Computing ; Machine Learning ; Software ; }, abstract = {With the emergence of the Internet of Things (IoT), investigation of different diseases in healthcare improved, and cloud computing helped to centralize the data and to access patient records throughout the world. In this way, the electrocardiogram (ECG) is used to diagnose heart diseases or abnormalities. The machine learning techniques have been used previously but are feature-based and not as accurate as transfer learning; the proposed development and validation of embedded device prove ECG arrhythmia by using the transfer learning (DVEEA-TL) model. This model is the combination of hardware, software, and two datasets that are augmented and fused and further finds the accuracy results in high proportion as compared to the previous work and research. In the proposed model, a new dataset is made by the combination of the Kaggle dataset and the other, which is made by taking the real-time healthy and unhealthy datasets, and later, the AlexNet transfer learning approach is applied to get a more accurate reading in terms of ECG signals. In this proposed research, the DVEEA-TL model diagnoses the heart abnormality in respect of accuracy during the training and validation stages as 99.9% and 99.8%, respectively, which is the best and more reliable approach as compared to the previous research in this field.}, } @article {pmid36268145, year = {2022}, author = {Han, Z and Li, F and Wang, G}, title = {Financial Data Mining Model Based on K-Truss Community Query Model and Artificial Intelligence.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9467623}, pmid = {36268145}, issn = {1687-5273}, mesh = {Humans ; *Artificial Intelligence ; *Data Mining ; Big Data ; Cloud Computing ; Algorithms ; }, abstract = {With the continuous development of Internet technology and related industries, emerging technologies such as big data and cloud computing have gradually integrated into and influenced social life. Emerging technologies have, to a large extent, revolutionized people's way of production and life and provided a lot of convenience for people's life. With the popularity of these technologies, information and data have also begun to explode. When we usually use an image storage system to process this information, we all know that an image contains countless pixels, and these pixels are interconnected to form the entire image. In real life, communities are like these pixels. On the Internet, communities are composed of interconnected parts. Nowadays, in various fields such as image modeling, we still have some problems, such as the problem of recognition rate, and we also found many problems when studying the community structure, which attracts more and more researchers, but the research on community query problems started late and the development is still relatively slow, so designing an excellent community query algorithm is a problem we urgently need to solve. With this goal, and based on previous research results, we have conducted in-depth discussions on community query algorithms, and hope that our research results can be applied to real life.}, } @article {pmid36267554, year = {2022}, author = {Jia, Z}, title = {Garden Landscape Design Method in Public Health Urban Planning Based on Big Data Analysis Technology.}, journal = {Journal of environmental and public health}, volume = {2022}, number = {}, pages = {2721247}, pmid = {36267554}, issn = {1687-9813}, mesh = {*Big Data ; *City Planning ; Gardens ; Public Health ; Data Analysis ; Technology ; }, abstract = {Aiming at the goal of high-quality development of the landscape architecture industry, we should actively promote the development and integration of digital, networked, and intelligent technologies and promote the intelligent and diversified development of the landscape architecture industry. Due to the limitation of drawing design technology and construction method, the traditional landscape architecture construction cannot really understand the public demands, and the construction scheme also relies on the experience and subjective aesthetics of professionals, resulting in improper connection between design and construction. At present, under the guidance of the national strategy, under the background of the rapid development of digital technologies such as 5G, big data, cloud computing, Internet of Things, and digital twins, the high integration of landscape architecture construction and digital technology has led to the transformation of the production mode of landscape architecture construction. Abundant professional data and convenient information processing platform enable landscape planners, designers, and builders to evaluate the whole life cycle of the project more scientifically and objectively and realize the digitalization of the whole process of investigation, analysis, design, construction, operation, and maintenance. For the landscape architecture industry, the significance of digital technology is not only to change the production tools but also to update the environmental awareness, design response, and construction methods, which makes the landscape architecture planning and design achieve the organic combination of qualitative and quantitative and also makes the landscape architecture discipline more scientific and rational. In this paper, the new method of combining grey relational degree with machine learning is used to provide new guidance for traditional landscape planning by using big data information in landscape design and has achieved very good results. The article analyzes the guidance of landscape architecture design under the big data in China and provides valuable reference for promoting the construction of landscape architecture in China.}, } @article {pmid36264891, year = {2022}, author = {Su, J and Su, K and Wang, S}, title = {Evaluation of digital economy development level based on multi-attribute decision theory.}, journal = {PloS one}, volume = {17}, number = {10}, pages = {e0270859}, pmid = {36264891}, issn = {1932-6203}, mesh = {Pregnancy ; Humans ; Female ; *Economic Development ; Artificial Intelligence ; Pandemics ; *COVID-19/epidemiology ; Decision Theory ; China ; }, abstract = {The maturity and commercialization of emerging digital technologies represented by artificial intelligence, cloud computing, block chain and virtual reality are giving birth to a new and higher economic form, that is, digital economy. Digital economy is different from the traditional industrial economy. It is clean, efficient, green and recyclable. It represents and promotes the future direction of global economic development, especially in the context of the sudden COVID-19 pandemic as a continuing disaster. Therefore, it is essential to establish the comprehensive evaluation model of digital economy development scientifically and reasonably. In this paper, first on the basis of literature analysis, the relevant indicators of digital economy development are collected manually and then screened by the grey dynamic clustering and rough set reduction theory. The evaluation index system of digital economy development is constructed from four dimensions: digital innovation impetus support, digital infrastructure construction support, national economic environment and digital policy guarantee, digital integration and application. Next the subjective weight and objective weight are calculated by the group FAHP method, entropy method and improved CRITIC method, and the combined weight is integrated with the thought of maximum variance. The grey correlation analysis and improved VIKOR model are combined to systematically evaluate the digital economy development level of 31 provinces and cities in China from 2013 to 2019. The results of empirical analysis show that the overall development of China's digital economy shows a trend of superposition and rise, and the development of digital economy in the four major economic zones is unbalanced. Finally, we put forward targeted opinions on the construction of China's provincial digital economy.}, } @article {pmid36264608, year = {2022}, author = {Moya-Galé, G and Walsh, SJ and Goudarzi, A}, title = {Automatic Assessment of Intelligibility in Noise in Parkinson Disease: Validation Study.}, journal = {Journal of medical Internet research}, volume = {24}, number = {10}, pages = {e40567}, pmid = {36264608}, issn = {1438-8871}, mesh = {Humans ; Dysarthria/etiology/complications ; *Parkinson Disease/complications ; Artificial Intelligence ; Speech Intelligibility ; *Speech Perception ; }, abstract = {BACKGROUND: Most individuals with Parkinson disease (PD) experience a degradation in their speech intelligibility. Research on the use of automatic speech recognition (ASR) to assess intelligibility is still sparse, especially when trying to replicate communication challenges in real-life conditions (ie, noisy backgrounds). Developing technologies to automatically measure intelligibility in noise can ultimately assist patients in self-managing their voice changes due to the disease.

OBJECTIVE: The goal of this study was to pilot-test and validate the use of a customized web-based app to assess speech intelligibility in noise in individuals with dysarthria associated with PD.

METHODS: In total, 20 individuals with dysarthria associated with PD and 20 healthy controls (HCs) recorded a set of sentences using their phones. The Google Cloud ASR API was used to automatically transcribe the speakers' sentences. An algorithm was created to embed speakers' sentences in +6-dB signal-to-noise multitalker babble. Results from ASR performance were compared to those from 30 listeners who orthographically transcribed the same set of sentences. Data were reduced into a single event, defined as a success if the artificial intelligence (AI) system transcribed a random speaker or sentence as well or better than the average of 3 randomly chosen human listeners. These data were further analyzed by logistic regression to assess whether AI success differed by speaker group (HCs or speakers with dysarthria) or was affected by sentence length. A discriminant analysis was conducted on the human listener data and AI transcriber data independently to compare the ability of each data set to discriminate between HCs and speakers with dysarthria.

RESULTS: The data analysis indicated a 0.8 probability (95% CI 0.65-0.91) that AI performance would be as good or better than the average human listener. AI transcriber success probability was not found to be dependent on speaker group. AI transcriber success was found to decrease with sentence length, losing an estimated 0.03 probability of transcribing as well as the average human listener for each word increase in sentence length. The AI transcriber data were found to offer the same discrimination of speakers into categories (HCs and speakers with dysarthria) as the human listener data.

CONCLUSIONS: ASR has the potential to assess intelligibility in noise in speakers with dysarthria associated with PD. Our results hold promise for the use of AI with this clinical population, although a full range of speech severity needs to be evaluated in future work, as well as the effect of different speaking tasks on ASR.}, } @article {pmid36259975, year = {2022}, author = {}, title = {Understanding enterprise data warehouses to support clinical and translational research: enterprise information technology relationships, data governance, workforce, and cloud computing.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {}, number = {}, pages = {}, doi = {10.1093/jamia/ocac206}, pmid = {36259975}, issn = {1527-974X}, } @article {pmid36259009, year = {2022}, author = {Gendia, A}, title = {Cloud Based AI-Driven Video Analytics (CAVs) in Laparoscopic Surgery: A Step Closer to a Virtual Portfolio.}, journal = {Cureus}, volume = {14}, number = {9}, pages = {e29087}, pmid = {36259009}, issn = {2168-8184}, abstract = {AIMS: To outline the use of cloud-based artificial intelligence (AI)-driven video analytics (CAVs) in minimally invasive surgery and to propose their potential as a virtual portfolio for trainee and established surgeons. Methods: An independent online demonstration was requested from three platforms, namely Theator (Palo Alto, California, USA), Touch Surgery™ (Medtronic, London, England, UK), and C-SATS® (Seattle, Washington, USA). The assessed domains were online and app-based accessibility, the ability for timely trainee feedback, and AI integration for operation-specific steps and critical views.

RESULTS: The CAVs enable users to record surgeries with the advantage of limitless video storage through clouding and smart integration into theatre settings. This can be used to view surgeries and review trainee videos through a medium of communication and sharing with the ability to provide feedback. Theator and C-SATS® provide their users with surgical skills scoring systems with customizable options that can be used to provide structured feedback to trainees. Additionally, AI plays an important role in all three platforms by providing time-based analysis of steps and highlighting critical milestones.  Conclusion: Cloud-based AI-driven video analytics is an emerging new technology that enables users to store, analyze, and review videos. This technology has the potential to improve training, governance, and standardization procedures. Moreover, with the future adaptation of the technology, CAVs can be integrated into the trainees' portfolios as part of their virtual curriculum. This can enable a structured assessment of a surgeon's progression and degree of experience throughout their surgical career.}, } @article {pmid36258393, year = {2022}, author = {Yamamoto, Y and Shimobaba, T and Ito, T}, title = {HORN-9: Special-purpose computer for electroholography with the Hilbert transform.}, journal = {Optics express}, volume = {30}, number = {21}, pages = {38115-38127}, doi = {10.1364/OE.471720}, pmid = {36258393}, issn = {1094-4087}, abstract = {Holography is a technology that uses light interference and diffraction to record and reproduce three-dimensional (3D) information. Using computers, holographic 3D scenes (electroholography) have been widely studied. Nevertheless, its practical application requires enormous computing power, and current computers have limitations in real-time processing. In this study, we show that holographic reconstruction (HORN)-9, a special-purpose computer for electroholography with the Hilbert transform, can compute a 1, 920 × 1, 080-pixel computer-generated hologram from a point cloud of 65,000 points in 0.030 s (33 fps) on a single card. This performance is 8, 7, and 170 times more efficient than a previously developed HORN-8, a graphics processing unit, and a central processing unit (CPU), respectively. We also demonstrated the real-time processing and display of 400,000 points on multiple HORN-9s, achieving an acceleration of 600 times with four HORN-9 units compared with a single CPU.}, } @article {pmid36255917, year = {2022}, author = {Houskeeper, HF and Hooker, SB and Cavanaugh, KC}, title = {Spectrally simplified approach for leveraging legacy geostationary oceanic observations.}, journal = {Applied optics}, volume = {61}, number = {27}, pages = {7966-7977}, doi = {10.1364/AO.465491}, pmid = {36255917}, issn = {1539-4522}, mesh = {*Environmental Monitoring/methods ; *Ecosystem ; Satellite Imagery ; Oceans and Seas ; Water ; }, abstract = {The use of multispectral geostationary satellites to study aquatic ecosystems improves the temporal frequency of observations and mitigates cloud obstruction, but no operational capability presently exists for the coastal and inland waters of the United States. The Advanced Baseline Imager (ABI) on the current iteration of the Geostationary Operational Environmental Satellites, termed the R Series (GOES-R), however, provides sub-hourly imagery and the opportunity to overcome this deficit and to leverage a large repository of existing GOES-R aquatic observations. The fulfillment of this opportunity is assessed herein using a spectrally simplified, two-channel aquatic algorithm consistent with ABI wave bands to estimate the diffuse attenuation coefficient for photosynthetically available radiation, Kd(PAR). First, an in situ ABI dataset was synthesized using a globally representative dataset of above- and in-water radiometric data products. Values of Kd(PAR) were estimated by fitting the ratio of the shortest and longest visible wave bands from the in situ ABI dataset to coincident, in situKd(PAR) data products. The algorithm was evaluated based on an iterative cross-validation analysis in which 80% of the dataset was randomly partitioned for fitting and the remaining 20% was used for validation. The iteration producing the median coefficient of determination (R[2]) value (0.88) resulted in a root mean square difference of 0.319m[-1], or 8.5% of the range in the validation dataset. Second, coincident mid-day images of central and southern California from ABI and from the Moderate Resolution Imaging Spectroradiometer (MODIS) were compared using Google Earth Engine (GEE). GEE default ABI reflectance values were adjusted based on a near infrared signal. Matchups between the ABI and MODIS imagery indicated similar spatial variability (R[2]=0.60) between ABI adjusted blue-to-red reflectance ratio values and MODIS default diffuse attenuation coefficient for spectral downward irradiance at 490 nm, Kd(490), values. This work demonstrates that if an operational capability to provide ABI aquatic data products was realized, the spectral configuration of ABI would potentially support a sub-hourly, visible aquatic data product that is applicable to water-mass tracing and physical oceanography research.}, } @article {pmid36254227, year = {2022}, author = {Song, L and Wang, H and Shi, Z}, title = {A Literature Review Research on Monitoring Conditions of Mechanical Equipment Based on Edge Computing.}, journal = {Applied bionics and biomechanics}, volume = {2022}, number = {}, pages = {9489306}, pmid = {36254227}, issn = {1176-2322}, abstract = {The motivation of this research is to review all methods used in data compression of collected data in monitoring the condition of equipment based on the framework of edge computing. Since a large amount of signal data is collected when monitoring conditions of mechanical equipment, namely, signals of running machines are continuously transmitted to be crunched, compressed data should be handled effectively. However, this process occupies resources since data transmission requires the allocation of a large capacity. To resolve this problem, this article examines the monitoring conditions of equipment based on edge computing. First, the signal is pre-processed by edge computing, so that the fault characteristics can be identified quickly. Second, signals with difficult-to-identify fault characteristics need to be compressed to save transmission resources. Then, different types of signal data collected in mechanical equipment conditions are compressed by various compression methods and uploaded to the cloud. Finally, the cloud platform, which has powerful processing capability, is processed to improve the volume of the data transmission. By examining and analyzing the monitoring conditions and signal compression methods of mechanical equipment, the future development trend is elaborated to provide references and ideas for the contemporary research of data monitoring and data compression algorithms. Consequently, the manuscript presents different compression methods in detail and clarifies the data compression methods used for the signal compression of equipment based on edge computing.}, } @article {pmid36253343, year = {2022}, author = {Kobayashi, K and Yoshida, H and Tanjo, T and Aida, K}, title = {Cloud service checklist for academic communities and customization for genome medical research.}, journal = {Human genome variation}, volume = {9}, number = {1}, pages = {36}, pmid = {36253343}, issn = {2054-345X}, abstract = {In this paper, we present a cloud service checklist designed to help IT administrators or researchers in academic organizations select the most suitable cloud services. This checklist, which comprises items that we believe IT administrators or researchers in academic organizations should consider when they adopt cloud services, comprehensively covers the issues related to a variety of cloud services, including security, functionality, performance, and law. In response to the increasing demands for storage and computing resources in genome medical science communities, various guidelines for using resources operated by external organizations, such as cloud services, have been published by different academic funding agencies and the Japanese government. However, it is sometimes difficult to identify the checklist items that satisfy the genome medical science community's guidelines, and some of these requirements are not included in the existing checklists. This issue provided our motivation for creating a cloud service checklist customized for genome medical research communities. The resulting customized checklist is designed to help researchers easily find information about the cloud services that satisfy the guidelines in genome medical science communities. Additionally, we explore whether many cloud service providers satisfy the requirements or checklist items in the cloud service checklist for genome medical research by evaluating their survey responses.}, } @article {pmid36248925, year = {2022}, author = {Bu, H and Xia, J and Wu, Q and Chen, L}, title = {Relationship Discovery and Hierarchical Embedding for Web Service Quality Prediction.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9240843}, pmid = {36248925}, issn = {1687-5273}, mesh = {*Cloud Computing ; *Internet ; Research Design ; }, abstract = {Web Services Quality Prediction has become a popular research theme in Cloud Computing and the Internet of Things. Graph Convolutional Network (GCN)-based methods are more efficient by aggregating feature information from the local graph neighborhood. Despite the fact that these prior works have demonstrated better prediction performance, they are still challenged as follows: (1) first, the user-service bipartite graph is essentially a heterogeneous graph that contains four kinds of relationships. Previous GCN-based models have only focused on using some of these relationships. Therefore, how to fully mine and use the above relationships is critical to improving the prediction accuracy. (2) After the embedding is obtained from the GCNs, the commonly used similarity calculation methods for downstream prediction need to traverse the data one by one, which is time-consuming. To address these challenges, this work proposes a novel relationship discovery and hierarchical embedding method based on GCNs (named as RDHE), which designs a dual mechanism to represent services and users, respectively, designs a new community discovery method and a fast similarity calculation process, which can fully mine and utilize the relationships in the graph. The results of the experiment on the real data set show that this method greatly improved the accuracy of the web service quality prediction.}, } @article {pmid36248269, year = {2022}, author = {Mondal, P and Dutta, T and Qadir, A and Sharma, S}, title = {Radar and optical remote sensing for near real-time assessments of cyclone impacts on coastal ecosystems.}, journal = {Remote sensing in ecology and conservation}, volume = {8}, number = {4}, pages = {506-520}, pmid = {36248269}, issn = {2056-3485}, abstract = {Rapid impact assessment of cyclones on coastal ecosystems is critical for timely rescue and rehabilitation operations in highly human-dominated landscapes. Such assessments should also include damage assessments of vegetation for restoration planning in impacted natural landscapes. Our objective is to develop a remote sensing-based approach combining satellite data derived from optical (Sentinel-2), radar (Sentinel-1), and LiDAR (Global Ecosystem Dynamics Investigation) platforms for rapid assessment of post-cyclone inundation in non-forested areas and vegetation damage in a primarily forested ecosystem. We apply this multi-scalar approach for assessing damages caused by the cyclone Amphan that hit coastal India and Bangladesh in May 2020, severely flooding several districts in the two countries, and causing destruction to the Sundarban mangrove forests. Our analysis shows that at least 6821 sq. km. land across the 39 study districts was inundated even after 10 days after the cyclone. We further calculated the change in forest greenness as the difference in normalized difference vegetation index (NDVI) pre- and post-cyclone. Our findings indicate a <0.2 unit decline in NDVI in 3.45 sq. km. of the forest. Rapid assessment of post-cyclone damage in mangroves is challenging due to limited navigability of waterways, but critical for planning of mitigation and recovery measures. We demonstrate the utility of Otsu method, an automated statistical approach of the Google Earth Engine platform to identify inundated areas within days after a cyclone. Our radar-based inundation analysis advances current practices because it requires minimal user inputs, and is effective in the presence of high cloud cover. Such rapid assessment, when complemented with detailed information on species and vegetation composition, can inform appropriate restoration efforts in severely impacted regions and help decision makers efficiently manage resources for recovery and aid relief. We provide the datasets from this study on an open platform to aid in future research and planning endeavors.}, } @article {pmid36247859, year = {2022}, author = {Saba Raoof, S and Durai, MAS}, title = {A Comprehensive Review on Smart Health Care: Applications, Paradigms, and Challenges with Case Studies.}, journal = {Contrast media & molecular imaging}, volume = {2022}, number = {}, pages = {4822235}, pmid = {36247859}, issn = {1555-4317}, mesh = {Delivery of Health Care ; Humans ; *Internet of Things ; Quality of Life ; *Telemedicine/methods ; }, abstract = {Growth and advancement of the Deep Learning (DL) and the Internet of Things (IoT) are figuring out their way over the modern contemporary world through integrating various technologies in distinct fields viz, agriculture, manufacturing, energy, transportation, supply chains, cities, healthcare, and so on. Researchers had identified the feasibility of integrating deep learning, cloud, and IoT to enhance the overall automation, where IoT may prolong its application area through utilizing cloud services and the cloud can even prolong its applications through data acquired by IoT devices like sensors and deep learning for disease detection and diagnosis. This study explains a summary of various techniques utilized in smart healthcare, i.e., deep learning, cloud-based-IoT applications in smart healthcare, fog computing in smart healthcare, and challenges and issues faced by smart healthcare and it presents a wider scope as it is not intended for a particular application such aspatient monitoring, disease detection, and diagnosing and the technologies used for developing this smart systems are outlined. Smart health bestows the quality of life. Convenient and comfortable living is made possible by the services provided by smart healthcare systems (SHSs). Since healthcare is a massive area with enormous data and a broad spectrum of diseases associated with different organs, immense research can be done to overcome the drawbacks of traditional healthcare methods. Deep learning with IoT can effectively be applied in the healthcare sector to automate the diagnosing and treatment process even in rural areas remotely. Applications may include disease prevention and diagnosis, fitness and patient monitoring, food monitoring, mobile health, telemedicine, emergency systems, assisted living, self-management of chronic diseases, and so on.}, } @article {pmid36246518, year = {2022}, author = {Coelho, R and Braga, R and David, JMN and Stroele, V and Campos, F and Dantas, M}, title = {A Blockchain-Based Architecture for Trust in Collaborative Scientific Experimentation.}, journal = {Journal of grid computing}, volume = {20}, number = {4}, pages = {35}, pmid = {36246518}, issn = {1572-9184}, abstract = {In scientific collaboration, data sharing, the exchange of ideas and results are essential to knowledge construction and the development of science. Hence, we must guarantee interoperability, privacy, traceability (reinforcing transparency), and trust. Provenance has been widely recognized for providing a history of the steps taken in scientific experiments. Consequently, we must support traceability, assisting in scientific results' reproducibility. One of the technologies that can enhance trust in collaborative scientific experimentation is blockchain. This work proposes an architecture, named BlockFlow, based on blockchain, provenance, and cloud infrastructure to bring trust and traceability in the execution of collaborative scientific experiments. The proposed architecture is implemented on Hyperledger, and a scenario about the genomic sequencing of the SARS-CoV-2 coronavirus is used to evaluate the architecture, discussing the benefits of providing traceability and trust in collaborative scientific experimentation. Furthermore, the architecture addresses the heterogeneity of shared data, facilitating interpretation by geographically distributed researchers and analysis of such data. Through a blockchain-based architecture that provides support on provenance and blockchain, we can enhance data sharing, traceability, and trust in collaborative scientific experiments.}, } @article {pmid36240003, year = {2022}, author = {Kang, G and Kim, YG}, title = {Secure Collaborative Platform for Health Care Research in an Open Environment: Perspective on Accountability in Access Control.}, journal = {Journal of medical Internet research}, volume = {24}, number = {10}, pages = {e37978}, pmid = {36240003}, issn = {1438-8871}, mesh = {*Blockchain ; *Computer Security ; Health Services Research ; Humans ; Privacy ; Social Responsibility ; }, abstract = {BACKGROUND: With the recent use of IT in health care, a variety of eHealth data are increasingly being collected and stored by national health agencies. As these eHealth data can advance the modern health care system and make it smarter, many researchers want to use these data in their studies. However, using eHealth data brings about privacy and security concerns. The analytical environment that supports health care research must also consider many requirements. For these reasons, countries generally provide research platforms for health care, but some data providers (eg, patients) are still concerned about the security and privacy of their eHealth data. Thus, a more secure platform for health care research that guarantees the utility of eHealth data while focusing on its security and privacy is needed.

OBJECTIVE: This study aims to implement a research platform for health care called the health care big data platform (HBDP), which is more secure than previous health care research platforms. The HBDP uses attribute-based encryption to achieve fine-grained access control and encryption of stored eHealth data in an open environment. Moreover, in the HBDP, platform administrators can perform the appropriate follow-up (eg, block illegal users) and monitoring through a private blockchain. In other words, the HBDP supports accountability in access control.

METHODS: We first identified potential security threats in the health care domain. We then defined the security requirements to minimize the identified threats. In particular, the requirements were defined based on the security solutions used in existing health care research platforms. We then proposed the HBDP, which meets defined security requirements (ie, access control, encryption of stored eHealth data, and accountability). Finally, we implemented the HBDP to prove its feasibility.

RESULTS: This study carried out case studies for illegal user detection via the implemented HBDP based on specific scenarios related to the threats. As a result, the platform detected illegal users appropriately via the security agent. Furthermore, in the empirical evaluation of massive data encryption (eg, 100,000 rows with 3 sensitive columns within 46 columns) for column-level encryption, full encryption after column-level encryption, and full decryption including column-level decryption, our approach achieved approximately 3 minutes, 1 minute, and 9 minutes, respectively. In the blockchain, average latencies and throughputs in 1Org with 2Peers reached approximately 18 seconds and 49 transactions per second (TPS) in read mode and approximately 4 seconds and 120 TPS in write mode in 300 TPS.

CONCLUSIONS: The HBDP enables fine-grained access control and secure storage of eHealth data via attribute-based encryption cryptography. It also provides nonrepudiation and accountability through the blockchain. Therefore, we consider that our proposal provides a sufficiently secure environment for the use of eHealth data in health care research.}, } @article {pmid36237741, year = {2022}, author = {Konstantinou, C and Xanthopoulos, A and Tsaras, K and Skoularigis, J and Triposkiadis, F and Papagiannis, D}, title = {Vaccination Coverage Against Human Papillomavirus in Female Students in Cyprus.}, journal = {Cureus}, volume = {14}, number = {9}, pages = {e28936}, pmid = {36237741}, issn = {2168-8184}, abstract = {Background Human papillomavirus (HPV) has been associated with the development of several cancers and cardiovascular diseases in females. Nevertheless, there is still poor data on vaccination coverage against HPV in several countries, including Cyprus. The main target of the present research was to assess the vaccination status of female students in Cyprus. Methodology An online survey was conducted via a cloud-based short questionnaire on Google Forms. Students with a known email address were initially invited via email to complete the survey. The questionnaire was distributed to 340 students, aged 18-49 years old, who lived in Cyprus (60% response rate). Results The total vaccination coverage was 38.1%. The mean age of participants was 23.5 (±6.5) years. The major reason for non-vaccination was the belief that participants were not at risk of serious illness from HPV infection (22%), followed by the reported lack of time to get vaccinated (16%) and inertia (13%). The students who had information about the safety of HPV vaccines from electronic sources of information (television, websites, and blogs) had lower vaccination coverage compared to those who had received information from alternative sources (primary health centers, family doctors, or obstetricians) (relative risk (RR) = 1.923, 95% confidence interval (CI) = 0.9669-3.825; p = 0.033). No significant differences in vaccination rates between participants who were coming from schools of health sciences versus those from financial schools (RR = 1.082, 95% CI = 0.7574-1.544; p = 0.3348) were observed. Conclusions Public health policy interventions and education on HPV vaccines are effective ways to improve the awareness and acceptance rate of HPV vaccination among female students and improve the HPV vaccination coverage level in Cyprus.}, } @article {pmid36236773, year = {2022}, author = {Shumba, AT and Montanaro, T and Sergi, I and Fachechi, L and De Vittorio, M and Patrono, L}, title = {Leveraging IoT-Aware Technologies and AI Techniques for Real-Time Critical Healthcare Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236773}, issn = {1424-8220}, mesh = {Aged ; *Artificial Intelligence ; Biocompatible Materials ; *Blood Glucose ; Delivery of Health Care ; Humans ; Technology ; }, abstract = {Personalised healthcare has seen significant improvements due to the introduction of health monitoring technologies that allow wearable devices to unintrusively monitor physiological parameters such as heart health, blood pressure, sleep patterns, and blood glucose levels, among others. Additionally, utilising advanced sensing technologies based on flexible and innovative biocompatible materials in wearable devices allows high accuracy and precision measurement of biological signals. Furthermore, applying real-time Machine Learning algorithms to highly accurate physiological parameters allows precise identification of unusual patterns in the data to provide health event predictions and warnings for timely intervention. However, in the predominantly adopted architectures, health event predictions based on Machine Learning are typically obtained by leveraging Cloud infrastructures characterised by shortcomings such as delayed response times and privacy issues. Fortunately, recent works highlight that a new paradigm based on Edge Computing technologies and on-device Artificial Intelligence significantly improve the latency and privacy issues. Applying this new paradigm to personalised healthcare architectures can significantly improve their efficiency and efficacy. Therefore, this paper reviews existing IoT healthcare architectures that utilise wearable devices and subsequently presents a scalable and modular system architecture to leverage emerging technologies to solve identified shortcomings. The defined architecture includes ultrathin, skin-compatible, flexible, high precision piezoelectric sensors, low-cost communication technologies, on-device intelligence, Edge Intelligence, and Edge Computing technologies. To provide development guidelines and define a consistent reference architecture for improved scalable wearable IoT-based critical healthcare architectures, this manuscript outlines the essential functional and non-functional requirements based on deductions from existing architectures and emerging technology trends. The presented system architecture can be applied to many scenarios, including ambient assisted living, where continuous surveillance and issuance of timely warnings can afford independence to the elderly and chronically ill. We conclude that the distribution and modularity of architecture layers, local AI-based elaboration, and data packaging consistency are the more essential functional requirements for critical healthcare application use cases. We also identify fast response time, utility, comfort, and low cost as the essential non-functional requirements for the defined system architecture.}, } @article {pmid36236664, year = {2022}, author = {Shahzad, K and Zia, T and Qazi, EU}, title = {A Review of Functional Encryption in IoT Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236664}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) represents a growing aspect of how entities, including humans and organizations, are likely to connect with others in their public and private interactions. The exponential rise in the number of IoT devices, resulting from ever-growing IoT applications, also gives rise to new opportunities for exploiting potential security vulnerabilities. In contrast to conventional cryptosystems, frameworks that incorporate fine-grained access control offer better opportunities for protecting valuable assets, especially when the connectivity level is dense. Functional encryption is an exciting new paradigm of public-key encryption that supports fine-grained access control, generalizing a range of existing fine-grained access control mechanisms. This survey reviews the recent applications of functional encryption and the major cryptographic primitives that it covers, identifying areas where the adoption of these primitives has had the greatest impact. We first provide an overview of different application areas where these access control schemes have been applied. Then, an in-depth survey of how the schemes are used in a multitude of applications related to IoT is given, rendering a potential vision of security and integrity that this growing field promises. Towards the end, we identify some research trends and state the open challenges that current developments face for a secure IoT realization.}, } @article {pmid36236587, year = {2022}, author = {Qin, M and Liu, T and Hou, B and Gao, Y and Yao, Y and Sun, H}, title = {A Low-Latency RDP-CORDIC Algorithm for Real-Time Signal Processing of Edge Computing Devices in Smart Grid Cyber-Physical Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236587}, issn = {1424-8220}, abstract = {Smart grids are being expanded in scale with the increasing complexity of the equipment. Edge computing is gradually replacing conventional cloud computing due to its low latency, low power consumption, and high reliability. The CORDIC algorithm has the characteristics of high-speed real-time processing and is very suitable for hardware accelerators in edge computing devices. The iterative calculation method of the CORDIC algorithm yet leads to problems such as complex structure and high consumption of hardware resource. In this paper, we propose an RDP-CORDIC algorithm which pre-computes all micro-rotation directions and transforms the conventional single-stage iterative structure into a three-stage and multi-stage combined iterative structure, thereby enabling it to solve the problems of the conventional CORDIC algorithm with many iterations and high consumption. An accuracy compensation algorithm for the direction prediction constant is also proposed to solve the problem of high ROM consumption in the high precision implementation of the RDP-CORDIC algorithm. The experimental results showed that the RDP-CORDIC algorithm had faster computation speed and lower resource consumption with higher guaranteed accuracy than other CORDIC algorithms. Therefore, the RDP-CORDIC algorithm proposed in this paper may effectively increase computation performance while reducing the power and resource consumption of edge computing devices in smart grid systems.}, } @article {pmid36236546, year = {2022}, author = {Busaeed, S and Katib, I and Albeshri, A and Corchado, JM and Yigitcanlar, T and Mehmood, R}, title = {LidSonic V2.0: A LiDAR and Deep-Learning-Based Green Assistive Edge Device to Enhance Mobility for the Visually Impaired.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236546}, issn = {1424-8220}, mesh = {*Deep Learning ; *Disabled Persons ; Humans ; *Self-Help Devices ; *Visually Impaired Persons ; *Wheelchairs ; }, abstract = {Over a billion people around the world are disabled, among whom 253 million are visually impaired or blind, and this number is greatly increasing due to ageing, chronic diseases, and poor environments and health. Despite many proposals, the current devices and systems lack maturity and do not completely fulfill user requirements and satisfaction. Increased research activity in this field is required in order to encourage the development, commercialization, and widespread acceptance of low-cost and affordable assistive technologies for visual impairment and other disabilities. This paper proposes a novel approach using a LiDAR with a servo motor and an ultrasonic sensor to collect data and predict objects using deep learning for environment perception and navigation. We adopted this approach using a pair of smart glasses, called LidSonic V2.0, to enable the identification of obstacles for the visually impaired. The LidSonic system consists of an Arduino Uno edge computing device integrated into the smart glasses and a smartphone app that transmits data via Bluetooth. Arduino gathers data, operates the sensors on the smart glasses, detects obstacles using simple data processing, and provides buzzer feedback to visually impaired users. The smartphone application collects data from Arduino, detects and classifies items in the spatial environment, and gives spoken feedback to the user on the detected objects. In comparison to image-processing-based glasses, LidSonic uses far less processing time and energy to classify obstacles using simple LiDAR data, according to several integer measurements. We comprehensively describe the proposed system's hardware and software design, having constructed their prototype implementations and tested them in real-world environments. Using the open platforms, WEKA and TensorFlow, the entire LidSonic system is built with affordable off-the-shelf sensors and a microcontroller board costing less than USD 80. Essentially, we provide designs of an inexpensive, miniature green device that can be built into, or mounted on, any pair of glasses or even a wheelchair to help the visually impaired. Our approach enables faster inference and decision-making using relatively low energy with smaller data sizes, as well as faster communications for edge, fog, and cloud computing.}, } @article {pmid36236536, year = {2022}, author = {Lei, L and Kou, L and Zhan, X and Zhang, J and Ren, Y}, title = {An Anomaly Detection Algorithm Based on Ensemble Learning for 5G Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236536}, issn = {1424-8220}, mesh = {*Algorithms ; Cloud Computing ; Learning ; Machine Learning ; *Software ; }, abstract = {With the advent of the digital information age, new data services such as virtual reality, industrial Internet, and cloud computing have proliferated in recent years. As a result, it increases operator demand for 5G bearer networks by providing features such as high transmission capacity, ultra-long transmission distance, network slicing, and intelligent management and control. Software-defined networking, as a new network architecture, intends to increase network flexibility and agility and can better satisfy the demands of 5G networks for network slicing. Nevertheless, software-defined networking still faces the challenge of network intrusion. We propose an abnormal traffic detection method based on the stacking method and self-attention mechanism, which makes up for the shortcoming of the inability to track long-term dependencies between data samples in ensemble learning. Our method utilizes a self-attention mechanism and a convolutional network to automatically learn long-term associations between traffic samples and provide them to downstream tasks in sample embedding. In addition, we design a novel stacking ensemble method, which computes the sample embedding and the predicted values of the heterogeneous base learner through the fusion module to obtain the final outlier results. This paper conducts experiments on abnormal traffic datasets in the software-defined network environment, calculates precision, recall and F1-score, and compares and analyzes them with other algorithms. The experimental results show that the method designed in this paper achieves 0.9972, 0.9996, and 0.9984 in multiple indicators of precision, recall, and F1-score, respectively, which are better than the comparison methods.}, } @article {pmid36236523, year = {2022}, author = {Yi, F and Zhang, L and Xu, L and Yang, S and Lu, Y and Zhao, D}, title = {WSNEAP: An Efficient Authentication Protocol for IIoT-Oriented Wireless Sensor Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236523}, issn = {1424-8220}, mesh = {Computer Communication Networks ; *Computer Security ; *Internet of Things ; }, abstract = {With the development of the Industrial Internet of Things (IIoT), industrial wireless sensors need to upload the collected private data to the cloud servers, resulting in a large amount of private data being exposed on the Internet. Private data are vulnerable to hacking. Many complex wireless-sensor-authentication protocols have been proposed. In this paper, we proposed an efficient authentication protocol for IIoT-oriented wireless sensor networks. The protocol introduces the PUF chip, and uses the Bloom filter to save and query the challenge-response pairs generated by the PUF chip. It ensures the security of the physical layer of the device and reduces the computing cost and communication cost of the wireless sensor side. The protocol introduces a pre-authentication mechanism to achieve continuous authentication between the gateway and the cloud server. The overall computational cost of the protocol is reduced. Formal security analysis and informal security analysis proved that our proposed protocol has more security features. We implemented various security primitives using the MIRACL cryptographic library and GMP large number library. Our proposed protocol was compared in-depth with related work. Detailed experiments show that our proposed protocol significantly reduces the computational cost and communication cost on the wireless sensor side and the overall computational cost of the protocol.}, } @article {pmid36236264, year = {2022}, author = {Thirumalaisamy, M and Basheer, S and Selvarajan, S and Althubiti, SA and Alenezi, F and Srivastava, G and Lin, JC}, title = {Interaction of Secure Cloud Network and Crowd Computing for Smart City Data Obfuscation.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236264}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; Software ; }, abstract = {There can be many inherent issues in the process of managing cloud infrastructure and the platform of the cloud. The platform of the cloud manages cloud software and legality issues in making contracts. The platform also handles the process of managing cloud software services and legal contract-based segmentation. In this paper, we tackle these issues directly with some feasible solutions. For these constraints, the Averaged One-Dependence Estimators (AODE) classifier and the SELECT Applicable Only to Parallel Server (SELECT-APSL ASA) method are proposed to separate the data related to the place. ASA is made up of the AODE and SELECT Applicable Only to Parallel Server. The AODE classifier is used to separate the data from smart city data based on the hybrid data obfuscation technique. The data from the hybrid data obfuscation technique manages 50% of the raw data, and 50% of hospital data is masked using the proposed transmission. The analysis of energy consumption before the cryptosystem shows the total packet delivered by about 71.66% compared with existing algorithms. The analysis of energy consumption after cryptosystem assumption shows 47.34% consumption, compared to existing state-of-the-art algorithms. The average energy consumption before data obfuscation decreased by 2.47%, and the average energy consumption after data obfuscation was reduced by 9.90%. The analysis of the makespan time before data obfuscation decreased by 33.71%. Compared to existing state-of-the-art algorithms, the study of makespan time after data obfuscation decreased by 1.3%. These impressive results show the strength of our methodology.}, } @article {pmid36227021, year = {2022}, author = {Yang, DM and Chang, TJ and Hung, KF and Wang, ML and Cheng, YF and Chiang, SH and Chen, MF and Liao, YT and Lai, WQ and Liang, KH}, title = {Smart healthcare: A prospective future medical approach for COVID-19.}, journal = {Journal of the Chinese Medical Association : JCMA}, volume = {}, number = {}, pages = {}, doi = {10.1097/JCMA.0000000000000824}, pmid = {36227021}, issn = {1728-7731}, abstract = {COVID-19 has greatly affected human life for over 3 years. In this review, we focus on smart healthcare solutions that address major requirements for coping with the COVID-19 pandemic, including (1) the continuous monitoring of severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), (2) patient stratification with distinct short-term outcomes (e.g. mild or severe diseases) and long-term outcomes (e.g. long COVID), and (3) adherence to medication and treatments for patients with COVID-19. Smart healthcare often utilizes medical artificial intelligence (AI) and cloud computing and integrates cutting-edge biological and optoelectronic techniques. These are valuable technologies for addressing the unmet needs in the management of COVID. By leveraging deep/machine learning (DL/ML) capabilities and big data, medical AI can perform precise prognosis predictions and provide reliable suggestions for physicians' decision-making. Through the assistance of the Internet of Medical Things (IoMT), which encompasses wearable devices, smartphone apps, Internet-based drug delivery systems, and telemedicine technologies, the status of mild cases can be continuously monitored and medications provided at home without the need for hospital care. In cases that develop into severe cases, emergency feedback can be provided through the hospital for rapid treatment. Smart healthcare can possibly prevent the development of severe COVID-19 cases and therefore lower the burden on intensive care units.}, } @article {pmid36225544, year = {2022}, author = {Li, H}, title = {Cloud Computing Image Processing Application in Athlete Training High-Resolution Image Detection.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7423411}, pmid = {36225544}, issn = {1687-5273}, mesh = {Algorithms ; *Artificial Intelligence ; Athletes ; *Cloud Computing ; Humans ; Image Processing, Computer-Assisted/methods ; }, abstract = {The rapid development of Internet of things mobile application technology and artificial intelligence technology has given birth to a lot of services that can meet the needs of modern life, such as augmented reality technology, face recognition services, and language recognition and translation, which are often applied to various fields, and some other aspects of information communication and processing services. It has been used on various mobile phone, computer, or tablet user clients. Terminal equipment is subject to the ultralow latency and low energy consumption requirements of the above-mentioned applications. Therefore, the gap between resource-demanding application services and resource-limited mobile devices will bring great problems to the current and future development of IoT mobile applications. Based on the local image features of depth images, this paper designs an image detection method for athletes' motion posture. First, according to the characteristics of the local image, the depth image of the athlete obtained through Kinect is converted into bone point data. Next, a 3-stage exploration algorithm is used to perform block matching calculations on the athlete's bone point image to predict the athlete's movement posture. At the same time, using the characteristics of the Euclidean distance of the bone point image, the movement behavior is recognized. According to the experimental results, for some external environmental factors, such as sun illumination and other factors, the image detection method designed in this paper can effectively avoid their interference and influence and show the movement posture of athletes, showing excellent accuracy and robustness in predicting the movement posture of athletes and action recognition. This method can simplify a series of calibration tasks in the initial stage of 3D video surveillance and infer the posture of the observation target and recognize it in real time. The one that has good application values has specific reference values for the same job.}, } @article {pmid36210997, year = {2022}, author = {B, D and M, L and R, A and Kallimani, JS and Walia, R and Belete, B}, title = {A Novel Feature Selection with Hybrid Deep Learning Based Heart Disease Detection and Classification in the e-Healthcare Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {1167494}, pmid = {36210997}, issn = {1687-5273}, mesh = {Cloud Computing ; *Deep Learning ; *Heart Diseases/diagnosis ; Humans ; Neural Networks, Computer ; *Telemedicine ; }, abstract = {With the advancements in data mining, wearables, and cloud computing, online disease diagnosis services have been widely employed in the e-healthcare environment and improved the quality of the services. The e-healthcare services help to reduce the death rate by the earlier identification of the diseases. Simultaneously, heart disease (HD) is a deadly disorder, and patient survival depends on early diagnosis of HD. Early HD diagnosis and categorization play a key role in the analysis of clinical data. In the context of e-healthcare, we provide a novel feature selection with hybrid deep learning-based heart disease detection and classification (FSHDL-HDDC) model. The two primary preprocessing processes of the FSHDL-HDDC approach are data normalisation and the replacement of missing values. The FSHDL-HDDC method also necessitates the development of a feature selection method based on the elite opposition-based squirrel searchalgorithm (EO-SSA) in order to determine the optimal subset of features. Moreover, an attention-based convolutional neural network (ACNN) with long short-term memory (LSTM), called (ACNN-LSTM) model, is utilized for the detection of HD by using medical data. An extensive experimental study is performed to ensure the improved classification performance of the FSHDL-HDDC technique. A detailed comparison study reported the betterment of the FSHDL-HDDC method on existing techniques interms of different performance measures. The suggested system, the FSHDL-HDDC, has reached its maximum level of accuracy, which is 0.9772.}, } @article {pmid36210990, year = {2022}, author = {Chen, X and Huang, X}, title = {Application of Price Competition Model Based on Computational Neural Network in Risk Prediction of Transnational Investment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8906385}, pmid = {36210990}, issn = {1687-5273}, mesh = {Algorithms ; Commerce ; Industry ; *Investments ; *Neural Networks, Computer ; }, abstract = {Aiming at the scenario where edge devices rely on cloud servers for collaborative computing, this paper proposes an efficient edge-cloud collaborative reasoning method. In order to meet the application's specific requirements for delay or accuracy, an optimal division point selection algorithm is proposed. A kind of multichannel supply chain price game model is constructed, and nonlinear dynamics theory is introduced into the research of the multichannel supply chain market. According to the actual competition situation, the different business strategies of retailers are considered in the modeling, which makes the model closer to the actual competition situation. Taking the retailer's profit as an indicator, the influence of the chaos phenomenon on the market performance is analyzed. Compared with the previous studies, this thesis uses nonlinear theory to better reveal the operating laws of the economic system. This paper selects company A in the financial industry to acquire company B in Sweden. It is concluded that company B is currently facing financial difficulties, but its brand and technical advantages are far superior to company A. The indirect financial risk index of company B, that is, the investment environment, is analyzed, and the final investment environment score of the country where company B is located is 90 points, which is an excellent grade by scoring the investment environment of the target enterprise. Combining the investment environment score and the alarm situation prediction score, it is concluded that the postmerger financial risk warning level of company A is in serious alarm.}, } @article {pmid36207705, year = {2022}, author = {Zhao, Y and Rokhani, FZ and Sazlina, SG and Devaraj, NK and Su, J and Chew, BH}, title = {Defining the concepts of a smart nursing home and its potential technology utilities that integrate medical services and are acceptable to stakeholders: a scoping review.}, journal = {BMC geriatrics}, volume = {22}, number = {1}, pages = {787}, pmid = {36207705}, issn = {1471-2318}, mesh = {Aged ; Humans ; *Nursing Homes ; *Quality of Life ; Skilled Nursing Facilities ; Technology ; }, abstract = {BACKGROUND AND OBJECTIVES: Smart technology in nursing home settings has the potential to elevate an operation that manages more significant number of older residents. However, the concepts, definitions, and types of smart technology, integrated medical services, and stakeholders' acceptability of smart nursing homes are less clear. This scoping review aims to define a smart nursing home and examine the qualitative evidence on technological feasibility, integration of medical services, and acceptability of the stakeholders.

METHODS: Comprehensive searches were conducted on stakeholders' websites (Phase 1) and 11 electronic databases (Phase 2), for existing concepts of smart nursing home, on what and how technologies and medical services were implemented in nursing home settings, and acceptability assessment by the stakeholders. The publication year was inclusive from January 1999 to September 2021. The language was limited to English and Chinese. Included articles must report nursing home settings related to older adults ≥ 60 years old with or without medical demands but not bed-bound. Technology Readiness Levels were used to measure the readiness of new technologies and system designs. The analysis was guided by the Framework Method and the smart technology adoption behaviours of elder consumers theoretical model. The results were reported according to the PRISMA-ScR.

RESULTS: A total of 177 literature (13 website documents and 164 journal articles) were selected. Smart nursing homes are technology-assisted nursing homes that allow the life enjoyment of their residents. They used IoT, computing technologies, cloud computing, big data and AI, information management systems, and digital health to integrate medical services in monitoring abnormal events, assisting daily living, conducting teleconsultation, managing health information, and improving the interaction between providers and residents. Fifty-five percent of the new technologies were ready for use in nursing homes (levels 6-7), and the remaining were proven the technical feasibility (levels 1-5). Healthcare professionals with higher education, better tech-savviness, fewer years at work, and older adults with more severe illnesses were more acceptable to smart technologies.

CONCLUSIONS: Smart nursing homes with integrated medical services have great potential to improve the quality of care and ensure older residents' quality of life.}, } @article {pmid36206751, year = {2022}, author = {Chen, L and Yu, L and Liu, Y and Xu, H and Ma, L and Tian, P and Zhu, J and Wang, F and Yi, K and Xiao, H and Zhou, F and Yang, Y and Cheng, Y and Bai, L and Wang, F and Zhu, Y}, title = {Space-time-regulated imaging analyzer for smart coagulation diagnosis.}, journal = {Cell reports. Medicine}, volume = {3}, number = {10}, pages = {100765}, pmid = {36206751}, issn = {2666-3791}, mesh = {*Artificial Intelligence ; Prospective Studies ; *Blood Coagulation ; Blood Coagulation Factors ; Fibrinogen/analysis ; }, abstract = {The development of intelligent blood coagulation diagnoses is awaited to meet the current need for large clinical time-sensitive caseloads due to its efficient and automated diagnoses. Herein, a method is reported and validated to realize it through artificial intelligence (AI)-assisted optical clotting biophysics (OCB) properties identification. The image differential calculation is used for precise acquisition of OCB properties with elimination of initial differences, and the strategy of space-time regulation allows on-demand space time OCB properties identification and enables diverse blood function diagnoses. The integrated applications of smartphones and cloud computing offer a user-friendly automated analysis for accurate and convenient diagnoses. The prospective assays of clinical cases (n = 41) show that the system realizes 97.6%, 95.1%, and 100% accuracy for coagulation factors, fibrinogen function, and comprehensive blood coagulation diagnoses, respectively. This method should enable more low-cost and convenient diagnoses and provide a path for potential diagnostic-markers finding.}, } @article {pmid36206264, year = {2022}, author = {Fu, Z}, title = {Computer cyberspace security mechanism supported by cloud computing.}, journal = {PloS one}, volume = {17}, number = {10}, pages = {e0271546}, pmid = {36206264}, issn = {1932-6203}, mesh = {Algorithms ; *Cloud Computing ; Computer Security ; Computers ; Internet ; *NAD ; }, abstract = {To improve the cybersecurity of Cloud Computing (CC) system. This paper proposes a Network Anomaly Detection (NAD) model based on the Fuzzy-C-Means (FCM) clustering algorithm. Secondly, the Cybersecurity Assessment Model (CAM) based on Grey Relational Grade (GRG) is creatively constructed. Finally, combined with Rivest Shamir Adleman (RSA) algorithm, this work proposes a CC network-oriented data encryption technology, selects different data sets for different models, and tests each model through design experiments. The results show that the average Correct Detection Rate (CDR) of the NAD model for different types of abnormal data is 93.33%. The average False Positive Rate (FPR) and the average Unreported Rate (UR) are 6.65% and 16.27%, respectively. Thus, the NAD model can ensure a high detection accuracy in the case of sufficient data. Meanwhile, the cybersecurity situation prediction by the CAM is in good agreement with the actual situation. The error between the average value of cybersecurity situation prediction and the actual value is only 0.82%, and the prediction accuracy is high. The RSA algorithm can control the average encryption time for very large text, about 12s. The decryption time is slightly longer but within a reasonable range. For different-size text, the encryption time is maintained within 0.5s. This work aims to provide important technical support for anomaly detection, overall security situation analysis, and data transmission security protection of CC systems to improve their cybersecurity.}, } @article {pmid36204298, year = {2022}, author = {Zhang, C and Cheng, T and Li, D and Yu, X and Chen, F and He, Q}, title = {Low-host double MDA workflow for uncultured ASFV positive blood and serum sample sequencing.}, journal = {Frontiers in veterinary science}, volume = {9}, number = {}, pages = {936781}, pmid = {36204298}, issn = {2297-1769}, abstract = {African swine fever (ASF) is a highly lethal and contagious disease caused by African swine fever virus (ASFV). Whole-genome sequencing of ASFV is necessary to study its mutation, recombination, and trace its transmission. Uncultured samples have a considerable amount of background DNA, which causes waste of sequencing throughput, storage space, and computing resources. Sequencing methods attempted for uncultured samples have various drawbacks. In this study, we improved C18 spacer MDA (Multiple Displacement Amplification)-combined host DNA exhaustion strategy to remove background DNA and fit NGS and TGS sequencing. Using this workflow, we successfully sequenced two uncultured ASFV positive samples. The results show that this method can significantly reduce the percentage of background DNA. We also developed software that can perform real-time base call and analyses in set intervals of ASFV TGS sequencing reads on a cloud server.}, } @article {pmid36197869, year = {2022}, author = {Guo, MH and Liu, ZN and Mu, TJ and Hu, SM}, title = {Beyond Self-Attention: External Attention Using Two Linear Layers for Visual Tasks.}, journal = {IEEE transactions on pattern analysis and machine intelligence}, volume = {PP}, number = {}, pages = {}, doi = {10.1109/TPAMI.2022.3211006}, pmid = {36197869}, issn = {1939-3539}, abstract = {Attention mechanisms, especially self-attention, have played an increasingly important role in deep feature representation for visual tasks. Self-attention updates the feature at each position by computing a weighted sum of features using pair-wise affinities across all positions to capture the long-range dependency within a single sample. However, self-attention has quadratic complexity and ignores potential correlation between different samples. This paper proposes a novel attention mechanism which we call external attention, based on two external, small, learnable, shared memories, which can be implemented easily by simply using two cascaded linear layers and two normalization layers; it conveniently replaces self-attention in existing popular architectures. External attention has linear complexity and implicitly considers the correlations between all data samples. We further incorporate the multi-head mechanism into external attention to provide an all-MLP architecture, external attention MLP (EAMLP), for image classification. Extensive experiments on image classification, object detection, semantic segmentation, instance segmentation, image generation, and point cloud analysis reveal that our method provides results comparable or superior to the self-attention mechanism and some of its variants, with much lower computational and memory costs.}, } @article {pmid36194325, year = {2022}, author = {Zhou, Y and Hu, Z and Geng, Q and Ma, J and Liu, J and Wang, M and Wang, Y}, title = {Monitoring and analysis of desertification surrounding Qinghai Lake (China) using remote sensing big data.}, journal = {Environmental science and pollution research international}, volume = {}, number = {}, pages = {}, pmid = {36194325}, issn = {1614-7499}, abstract = {Desertification is one of the most serious ecological environmental problems in the world. Monitoring the spatiotemporal dynamics of desertification is crucial for its control. The region around Qinghai Lake, in the northeastern part of the Qinghai-Tibet Plateau in China, is a special ecological function area and a climate change sensitive area, making its environmental conditions a great concern. Using cloud computing via Google Earth Engine (GEE), we collected Landsat 5 TM, Landsat 8 OLI/TIRS, and MODIS Albedo images from 2000 to 2020 in the region around Qinghai Lake, acquired land surface albedo (Albedo), and normalized vegetation index (NDVI) to build a remote sensing monitoring model of desertification. Our results showed that the desertification difference index based on the Albedo-NDVI feature space could reflect the degree of desertification in the region around Qinghai Lake. GEE offers significant advantages, such as massive data processing and long-term dynamic monitoring. The desertification land area fluctuated downward in the study area from 2000 to 2020, and the overall desertification status improved. Natural factors, such as climate change from warm-dry to warm-wet and decreased wind speed, and human factors improved the desertification situation. The findings indicate that desertification in the region around Qinghai Lake has been effectively controlled, and the overall desertification trend is improving.}, } @article {pmid36190152, year = {2022}, author = {Greene, D}, title = {Landlords of the internet: Big data and big real estate.}, journal = {Social studies of science}, volume = {52}, number = {6}, pages = {904-927}, doi = {10.1177/03063127221124943}, pmid = {36190152}, issn = {1460-3659}, mesh = {Humans ; *Big Data ; *Internet ; Commerce ; Technology ; }, abstract = {Who owns the internet? It depends where you look. The physical assets at the core of the internet, the warehouses that store the cloud's data and interlink global networks, are owned not by technology firms like Google and Facebook but by commercial real estate barons who compete with malls and property storage empires. Granted an empire by the US at the moment of the internet's commercialization, these internet landlords shaped how the network of networks that we call the internet physically connects, and how personal and business data is stored and transmitted. Under their governance, internet exchanges, colocation facilities, and data centers take on a double life as financialized real estate assets that circle the globe even as their servers and cables are firmly rooted in place. The history of internet landlords forces a fundamental reconsideration of the business model at the base of the internet. This history makes clear that the internet was never an exogenous shock to capitalist social relations, but rather a touchstone example of an economic system increasingly ruled by asset owners like landlords.}, } @article {pmid36188195, year = {2022}, author = {Zhou, Y and Varzaneh, MG}, title = {Efficient and scalable patients clustering based on medical big data in cloud platform.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {49}, pmid = {36188195}, issn = {2192-113X}, abstract = {With the outbreak and popularity of COVID-19 pandemic worldwide, the volume of patients is increasing rapidly all over the world, which brings a big risk and challenge for the maintenance of public healthcare. In this situation, quick integration and analysis of the medical records of patients in a cloud platform are of positive and valuable significance for accurate recognition and scientific diagnosis of the healthy conditions of potential patients. However, due to the big volume of medical data of patients distributed in different platforms (e.g., multiple hospitals), how to integrate these data for patient clustering and analysis in a time-efficient and scalable manner in cloud platform is still a challenging task, while guaranteeing the capability of privacy-preservation. Motivated by this fact, a time-efficient, scalable and privacy-guaranteed patient clustering method in cloud platform is proposed in this work. At last, we demonstrate the competitive advantages of our method via a set of simulated experiments. Experiment results with competitive methods in current research literatures have proved the feasibility of our proposal.}, } @article {pmid36185458, year = {2022}, author = {Moser, N and Yu, LS and Rodriguez Manzano, J and Malpartida-Cardenas, K and Au, A and Arkell, P and Cicatiello, C and Moniri, A and Miglietta, L and Wang, WH and Wang, SF and Holmes, A and Chen, YH and Georgiou, P}, title = {Quantitative detection of dengue serotypes using a smartphone-connected handheld lab-on-chip platform.}, journal = {Frontiers in bioengineering and biotechnology}, volume = {10}, number = {}, pages = {892853}, pmid = {36185458}, issn = {2296-4185}, abstract = {Dengue is one of the most prevalent infectious diseases in the world. Rapid, accurate and scalable diagnostics are key to patient management and epidemiological surveillance of the dengue virus (DENV), however current technologies do not match required clinical sensitivity and specificity or rely on large laboratory equipment. In this work, we report the translation of our smartphone-connected handheld Lab-on-Chip (LoC) platform for the quantitative detection of two dengue serotypes. At its core, the approach relies on the combination of Complementary Metal-Oxide-Semiconductor (CMOS) microchip technology to integrate an array of 78 × 56 potentiometric sensors, and a label-free reverse-transcriptase loop mediated isothermal amplification (RT-LAMP) assay. The platform communicates to a smartphone app which synchronises results in real time with a secure cloud server hosted by Amazon Web Services (AWS) for epidemiological surveillance. The assay on our LoC platform (RT-eLAMP) was shown to match performance on a gold-standard fluorescence-based real-time instrument (RT-qLAMP) with synthetic DENV-1 and DENV-2 RNA and extracted RNA from 9 DENV-2 clinical isolates, achieving quantitative detection in under 15 min. To validate the portability of the platform and the geo-tagging capabilities, we led our study in the laboratories at Imperial College London, UK, and Kaohsiung Medical Hospital, Taiwan. This approach carries high potential for application in low resource settings at the point of care (PoC).}, } @article {pmid36179156, year = {2022}, author = {Sun, J and Endo, S and Lin, H and Hayden, P and Vedral, V and Yuan, X}, title = {Perturbative Quantum Simulation.}, journal = {Physical review letters}, volume = {129}, number = {12}, pages = {120505}, doi = {10.1103/PhysRevLett.129.120505}, pmid = {36179156}, issn = {1079-7114}, abstract = {Approximation based on perturbation theory is the foundation for most of the quantitative predictions of quantum mechanics, whether in quantum many-body physics, chemistry, quantum field theory, or other domains. Quantum computing provides an alternative to the perturbation paradigm, yet state-of-the-art quantum processors with tens of noisy qubits are of limited practical utility. Here, we introduce perturbative quantum simulation, which combines the complementary strengths of the two approaches, enabling the solution of large practical quantum problems using limited noisy intermediate-scale quantum hardware. The use of a quantum processor eliminates the need to identify a solvable unperturbed Hamiltonian, while the introduction of perturbative coupling permits the quantum processor to simulate systems larger than the available number of physical qubits. We present an explicit perturbative expansion that mimics the Dyson series expansion and involves only local unitary operations, and show its optimality over other expansions under certain conditions. We numerically benchmark the method for interacting bosons, fermions, and quantum spins in different topologies, and study different physical phenomena, such as information propagation, charge-spin separation, and magnetism, on systems of up to 48 qubits only using an 8+1 qubit quantum hardware. We demonstrate our scheme on the IBM quantum cloud, verifying its noise robustness and illustrating its potential for benchmarking large quantum processors with smaller ones.}, } @article {pmid36174081, year = {2022}, author = {Mul, E and Ancin Murguzur, FJ and Hausner, VH}, title = {Impact of the COVID-19 pandemic on human-nature relations in a remote nature-based tourism destination.}, journal = {PloS one}, volume = {17}, number = {9}, pages = {e0273354}, pmid = {36174081}, issn = {1932-6203}, mesh = {*COVID-19/epidemiology ; Human Characteristics ; Humans ; Pandemics ; *Tourism ; Travel ; }, abstract = {Tourism and nature-based recreation has changed dramatically during the COVID-19 pandemic. Travel restrictions caused sharp declines in visitation numbers, particularly in remote areas, such as northern Norway. In addition, the pandemic may have altered human-nature relationships by changing visitor behaviour and preferences. We studied visitor numbers and behaviour in northern Norway, based on user-generated data, in the form of photographic material that was uploaded to the popular online platform Flickr. A total of 195.200 photographs, taken by 5.247 photographers were subjected to Google's "Cloud Vision" automatic content analysis algorithm. The resulting collection of labels that were assigned to each photograph was analysed in structural topic models, using photography date (relative to the start of the pandemic measures in Norway) and reported or estimated photographers' nationality as explanatory variables. Our results show that nature-based recreation relating to "mountains" and "winter" became more prevalent during the pandemic, amongst both domestic and international photographers. Shifts in preferences due to the pandemic outbreak strongly depended on nationality, with domestic visitors demonstrating a wide interest in topics while international visitors maintained their preference for nature-based experiences. Among those activities that suffered the most from decline in international tourism was northern lights and cruises as indicated by the topic models. On the other hand, images depicting mountains and flora and fauna increased their prevalence during the pandemic. Domestic visitors, on the other hand, spent more time in urban settings as a result of restrictions, which results in a higher prevalence of non-nature related images. Our results underscore the need to consider the dynamic nature of human-nature relationships. The contrast in flexibility to adapt to changing conditions and travel restrictions should be incorporated in collaborative efforts of municipalities and tour operators to develop sustainable local nature-based tourism products, particularly in remote areas.}, } @article {pmid36172315, year = {2022}, author = {Jiang, Y and Lei, Y}, title = {Implementation of Trusted Traceability Query Using Blockchain and Deep Reinforcement Learning in Resource Management.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6559517}, pmid = {36172315}, issn = {1687-5273}, mesh = {Algorithms ; *Blockchain ; Cloud Computing ; Technology ; }, abstract = {To better track the source of goods and maintain the quality of goods, the present work uses blockchain technology to establish a system for trusted traceability queries and information management. Primarily, the analysis is made on the shortcomings of the traceability system in the field of agricultural products at the present stage; the study is conducted on the application of the traceability system to blockchain technology, and a new model of agricultural product traceability system is established based on the blockchain technology. Then, a study is carried out on the task scheduling problem of resource clusters in cloud computing resource management. The present work expands the task model and uses the deep Q network algorithm in deep reinforcement learning to solve various optimization objectives preset in the task scheduling problem. Next, a resource management algorithm based on a deep Q network is proposed. Finally, the performance of the algorithm is analyzed from the aspects of parameters, structure, and task load. Experiments show that the algorithm is better than Shortest Job First (SJF), Tetris [∗] , Packer, and other classic task scheduling algorithms in different optimization objectives. In the traceability system test, the traceability accuracy is 99% for the constructed system in the first group of samples. In the second group, the traceability accuracy reaches 98% for the constructed system. In general, the traceability accuracy of the system proposed here is above 98% in 8 groups of experimental samples, and the traceability accuracy is close for each experimental group. The resource management approach of the traceability system constructed here provides some ideas for the application of reinforcement learning technology in the construction of traceability systems.}, } @article {pmid36171329, year = {2022}, author = {Wolf, K and Dawson, RJ and Mills, JP and Blythe, P and Morley, J}, title = {Towards a digital twin for supporting multi-agency incident management in a smart city.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {16221}, pmid = {36171329}, issn = {2045-2322}, mesh = {*Ambulances ; Cities ; Cloud Computing ; *Emergency Medical Services ; Floods ; }, abstract = {Cost-effective on-demand computing resources can help to process the increasing number of large, diverse datasets generated from smart internet-enabled technology, such as sensors, CCTV cameras, and mobile devices, with high temporal resolution. Category 1 emergency services (Ambulance, Fire and Rescue, and Police) can benefit from access to (near) real-time traffic- and weather data to coordinate multiple services, such as reassessing a route on the transport network affected by flooding or road incidents. However, there is a tendency not to utilise available smart city data sources, due to the heterogeneous data landscape, lack of real-time information, and communication inefficiencies. Using a systems engineering approach, we identify the current challenges faced by stakeholders involved in incident response and formulate future requirements for an improved system. Based on these initial findings, we develop a use case using Microsoft Azure cloud computing technology for analytical functionalities that can better support stakeholders in their response to an incident. Our prototype allows stakeholders to view available resources, send automatic updates and integrate location-based real-time weather and traffic data. We anticipate our study will provide a foundation for the future design of a data ontology for multi-agency incident response in smart cities of the future.}, } @article {pmid36164525, year = {2022}, author = {Roy, B and Bari, E}, title = {Examining the relationship between land surface temperature and landscape features using spectral indices with Google Earth Engine.}, journal = {Heliyon}, volume = {8}, number = {9}, pages = {e10668}, pmid = {36164525}, issn = {2405-8440}, abstract = {Land surface temperature (LST) is strongly influenced by landscape features as they change the thermal characteristics of the surface greatly. Normalized Difference Vegetation Index (NDVI), Normalized Difference Water Index (NDWI), Normalized Difference Built-up Index (NDBI), and Normalized Difference Bareness Index (NDBAI) correspond to vegetation cover, water bodies, impervious build-ups, and bare lands, respectively. These indices were utilized to demonstrate the relationship between multiple landscape features and LST using the spectral indices derived from images of Landsat 5 Thematic Mapper (TM), and Landsat 8 Operational Land Imager (OLI) of Sylhet Sadar Upazila (2000-2018). Google Earth Engine (GEE) cloud computing platform was used to filter, process, and analyze trends with logistic regression. LST and other spectral indices were calculated. Changes in LST (2000-2018) range from -6 °C to +4 °C in the study area. Because of higher vegetation cover and reserve forest, the north-eastern part of the study region had the greatest variations in LST. The spectral indices corresponding to landscape features have a considerable explanatory capacity for describing LST scenarios. The correlation of these indices with LST ranges from -0.52 (NDBI) to +0.57 (NDVI).}, } @article {pmid36161827, year = {2022}, author = {Huemer, J and Kronschläger, M and Ruiss, M and Sim, D and Keane, PA and Findl, O and Wagner, SK}, title = {Diagnostic accuracy of code-free deep learning for detection and evaluation of posterior capsule opacification.}, journal = {BMJ open ophthalmology}, volume = {7}, number = {1}, pages = {}, pmid = {36161827}, issn = {2397-3269}, mesh = {Area Under Curve ; *Capsule Opacification/diagnosis ; *Deep Learning ; Humans ; Retrospective Studies ; Vision Disorders ; }, abstract = {OBJECTIVE: To train and validate a code-free deep learning system (CFDLS) on classifying high-resolution digital retroillumination images of posterior capsule opacification (PCO) and to discriminate between clinically significant and non-significant PCOs.

METHODS AND ANALYSIS: For this retrospective registry study, three expert observers graded two independent datasets of 279 images three separate times with no PCO to severe PCO, providing binary labels for clinical significance. The CFDLS was trained and internally validated using 179 images of a training dataset and externally validated with 100 images. Model development was through Google Cloud AutoML Vision. Intraobserver and interobserver variabilities were assessed using Fleiss kappa (κ) coefficients and model performance through sensitivity, specificity and area under the curve (AUC).

RESULTS: Intraobserver variability κ values for observers 1, 2 and 3 were 0.90 (95% CI 0.86 to 0.95), 0.94 (95% CI 0.90 to 0.97) and 0.88 (95% CI 0.82 to 0.93). Interobserver agreement was high, ranging from 0.85 (95% CI 0.79 to 0.90) between observers 1 and 2 to 0.90 (95% CI 0.85 to 0.94) for observers 1 and 3. On internal validation, the AUC of the CFDLS was 0.99 (95% CI 0.92 to 1.0); sensitivity was 0.89 at a specificity of 1. On external validation, the AUC was 0.97 (95% CI 0.93 to 0.99); sensitivity was 0.84 and specificity was 0.92.

CONCLUSION: This CFDLS provides highly accurate discrimination between clinically significant and non-significant PCO equivalent to human expert graders. The clinical value as a potential decision support tool in different models of care warrants further research.}, } @article {pmid36160943, year = {2022}, author = {Sulis, E and Amantea, IA and Aldinucci, M and Boella, G and Marinello, R and Grosso, M and Platter, P and Ambrosini, S}, title = {An ambient assisted living architecture for hospital at home coupled with a process-oriented perspective.}, journal = {Journal of ambient intelligence and humanized computing}, volume = {}, number = {}, pages = {1-19}, pmid = {36160943}, issn = {1868-5137}, abstract = {The growing number of next-generation applications offers a relevant opportunity for healthcare services, generating an urgent need for architectures for systems integration. Moreover, the huge amount of stored information related to events can be explored by adopting a process-oriented perspective. This paper discusses an Ambient Assisted Living healthcare architecture to manage hospital home-care services. The proposed solution relies on adopting an event manager to integrate sources ranging from personal devices to web-based applications. Data are processed on a federated cloud platform offering computing infrastructure and storage resources to improve scientific research. In a second step, a business process analysis of telehealth and telemedicine applications is considered. An initial study explored the business process flow to capture the main sequences of tasks, activities, events. This step paves the way for the integration of process mining techniques to compliance monitoring in an AAL architecture framework.}, } @article {pmid36157083, year = {2022}, author = {Ahmad, I and Abdullah, S and Ahmed, A}, title = {IoT-fog-based healthcare 4.0 system using blockchain technology.}, journal = {The Journal of supercomputing}, volume = {}, number = {}, pages = {1-22}, pmid = {36157083}, issn = {0920-8542}, abstract = {Real-time tracking and surveillance of patients' health has become ubiquitous in the healthcare sector as a result of the development of fog, cloud computing, and Internet of Things (IoT) technologies. Medical IoT (MIoT) equipment often transfers health data to a pharmaceutical data center, where it is saved, evaluated, and made available to relevant stakeholders or users. Fog layers have been utilized to increase the scalability and flexibility of IoT-based healthcare services, by providing quick response times and low latency. Our proposed solution focuses on an electronic healthcare system that manages both critical and non-critical patients simultaneously. Fog layer is distributed into two halves: critical fog cluster and non-critical fog cluster. Critical patients are handled at critical fog clusters for quick response, while non-critical patients are handled using blockchain technology at non-critical fog cluster, which protects the privacy of patient health records. The suggested solution requires little modification to the current IoT ecosystem while decrease the response time for critical messages and offloading the cloud infrastructure. Reduced storage requirements for cloud data centers benefit users in addition to saving money on construction and operating expenses. In addition, we examined the proposed work for recall, accuracy, precision, and F-score. The results show that the suggested approach is successful in protecting privacy while retaining standard network settings. Moreover, suggested system and benchmark are evaluated in terms of system response time, drop rate, throughput, fog, and cloud utilization. Evaluated results clearly indicate the performance of proposed system is better than benchmark.}, } @article {pmid36156947, year = {2022}, author = {Yue, Q}, title = {Dynamic Database Design of Sports Quality Based on Genetic Data Algorithm and Artificial Intelligence.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7473109}, pmid = {36156947}, issn = {1687-5273}, mesh = {*Artificial Intelligence ; Exercise ; Humans ; *Sports ; Students ; Surveys and Questionnaires ; }, abstract = {According to the traditional data mining method, it is no longer applicable to obtain knowledge from the database, and the knowledge mined in the past must be constantly updated. In the last few years, Internet technology and cloud computing technology have emerged. The emergence of these two technologies has brought about Earth-shaking changes in certain industries. In order to efficiently retrieve and count a large amount of data at a lower cost, big data technology is proposed. Big data technology has played an important role for data with various types, huge quantities, and extremely fast changing speeds. However, big data technology still has some limitations, and researchers still cannot obtain the value of data in a short period of time with low cost and high efficiency. The sports database constructed in this paper can effectively carry out statistics and analysis on the data of sports learning. In the prototype system, log files can be mined, classified, and preprocessed. For the incremental data obtained by preprocessing, incremental data mining can be performed, a classification model can be established, and the database can be updated to provide users with personalized services. Through the method of data survey, the author studied the students' exercise status, and the feedback data show that college students lack the awareness of physical exercise and have no fitness habit. It is necessary to accelerate the reform of college sports and cultivate students' good sports awareness.}, } @article {pmid36156946, year = {2022}, author = {Zhu, J}, title = {The Usage of Designing the Urban Sculpture Scene Based on Edge Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9346771}, pmid = {36156946}, issn = {1687-5273}, mesh = {*Algorithms ; *Computers ; Electrocardiography ; Humans ; }, abstract = {To not only achieve the goal of urban cultural construction but also save the cost of urban sculpture space design, EC (edge computing) is combined with urban sculpture space design and planning first. Then it briefly discusses the service category, system architecture, advantages, and characteristics of urban sculpture, as well as the key points and difficulties of its construction, and the layered architecture of EC for urban sculpture spaces is proposed. Secondly, the cloud edge combination technology is adopted, and the urban sculpture is used as a specific function of the edge system node to conduct an in-depth analysis to build an urban sculpture safety supervision system architecture platform. Finally, the actual energy required for implementation is predicted and evaluated, the specific monitoring system coverage is set up, and some equations are made for calculating the energy consumption of the monitored machines according to the number of devices and route planning required by the urban sculpture safety supervision system. An optimization algorithm for energy consumption is proposed based on reinforcement learning and compared with the three control groups. The results show that when the seven monitoring devices cover detection points less than 800, the required energy consumption increases linearly. When the detection devices cover more than 800 detection points, the required energy consumption is stable and varies from 10000 to 12000; that is, when the number of monitoring devices is 7, the optimal number of monitoring points is about 800. When the number of detection points is fixed, increasing the number of monitoring devices in a small range can reduce the total energy consumption. The optimization algorithm based on the reinforcement learning proposal can obtain an approximate optimal solution. The research results show that the combination of edge computing and urban sculpture can expand the function of urban sculpture and make it serve people better.}, } @article {pmid36156942, year = {2022}, author = {Zheng, M and Liu, B and Sun, L}, title = {LawRec: Automatic Recommendation of Legal Provisions Based on Legal Text Analysis.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6313161}, pmid = {36156942}, issn = {1687-5273}, mesh = {*Artificial Intelligence ; Humans ; *Neural Networks, Computer ; Technology ; }, abstract = {Smart court technologies are making full use of modern science to promote the modernization of the trial system and trial capabilities, for example, artificial intelligence, Internet of things, and cloud computing. The smart court technologies can improve the efficiency of case handling and achieving convenience for the people. Article recommendation is an important part of intelligent trial. For ordinary people without legal background, the traditional information retrieval system that searches laws and regulations based on keywords is not applicable because they do not have the ability to extract professional legal vocabulary from complex case processes. This paper proposes a law recommendation framework, called LawRec, based on Bidirectional Encoder Representation from Transformers (BERT) and Skip-Recurrent Neural Network (Skip-RNN) models. It intends to integrate the knowledge of legal provisions with the case description and uses the BERT model to learn the case description text and legal knowledge, respectively. At last, laws and regulations for cases can be recommended. Experiment results show that the proposed LawRec can achieve better performance than state-of-the-art methods.}, } @article {pmid36153857, year = {2022}, author = {Park, JY and Lee, K and Chung, DR}, title = {Public interest in the digital transformation accelerated by the COVID-19 pandemic and perception of its future impact.}, journal = {The Korean journal of internal medicine}, volume = {37}, number = {6}, pages = {1223-1233}, pmid = {36153857}, issn = {2005-6648}, mesh = {Humans ; Pandemics ; *COVID-19/epidemiology ; Artificial Intelligence ; *Virtual Reality ; Perception ; }, abstract = {BACKGROUND/AIMS: The coronavirus disease 2019 (COVID-19) pandemic has accelerated digital transformation (DT). We investigated the trend of the public interest in technologies regarding the DT and Koreans' experiences and their perceptions of the future impact of these technologies.

METHODS: Using Google Trends, the relative search volume (RSV) for topics including "coronavirus," "artificial intelligence," "cloud," "big data," and "metaverse" were retrieved for the period from January 2020 to January 2022. A survey was conducted to assess the population's knowledge, experience, and perceptions regarding the DT.

RESULTS: The RSV for "metaverse" showed an increasing trend, in contrast to those for "cloud," "big data," and "coronavirus." The RSVs for DT-related keywords had a negative correlation with the number of new weekly COVID-19 cases. In our survey, 78.1% responded that the positive impact of the DT on future lives would outweigh the negative impact. The predictors for this positive perception included experiences with the metaverse (4.0-fold) and virtual reality (VR)/augmented reality (AR) education (3.8-fold). Respondents predicted that the biggest change would occur in the healthcare sector after transportation/ communication.

CONCLUSION: Koreans' search interest for "metaverse" showed an increasing trend during the COVID-19 pandemic. Koreans believe that DT will bring about big changes in the healthcare sector. Most of the survey respondents have a positive outlook about the impact of DT on future life, and the predictors for this positive perception include the experiences with the metaverse or VR/AR education. Healthcare professionals need to accelerate the adoption of DT in clinical practice, education and training.}, } @article {pmid36151775, year = {2022}, author = {Zhao, XG and Cao, H}, title = {Linking research of biomedical datasets.}, journal = {Briefings in bioinformatics}, volume = {23}, number = {6}, pages = {}, doi = {10.1093/bib/bbac373}, pmid = {36151775}, issn = {1477-4054}, mesh = {Humans ; *Ecosystem ; *Algorithms ; Knowledge ; }, abstract = {Biomedical data preprocessing and efficient computing can be as important as the statistical methods used to fit the data; data processing needs to consider application scenarios, data acquisition and individual rights and interests. We review common principles, knowledge and methods of integrated research according to the whole-pipeline processing mechanism diverse, coherent, sharing, auditable and ecological. First, neuromorphic and native algorithms integrate diverse datasets, providing linear scalability and high visualization. Second, the choice mechanism of different preprocessing, analysis and transaction methods from raw to neuromorphic was summarized on the node and coordinator platforms. Third, combination of node, network, cloud, edge, swarm and graph builds an ecosystem of cohort integrated research and clinical diagnosis and treatment. Looking forward, it is vital to simultaneously combine deep computing, mass data storage and massively parallel communication.}, } @article {pmid36146408, year = {2022}, author = {Jeong, Y and Kim, T}, title = {A Cluster-Driven Adaptive Training Approach for Federated Learning.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146408}, issn = {1424-8220}, mesh = {*Algorithms ; Humans ; *Learning ; Machine Learning ; }, abstract = {Federated learning (FL) is a promising collaborative learning approach in edge computing, reducing communication costs and addressing the data privacy concerns of traditional cloud-based training. Owing to this, diverse studies have been conducted to distribute FL into industry. However, there still remain the practical issues of FL to be solved (e.g., handling non-IID data and stragglers) for an actual implementation of FL. To address these issues, in this paper, we propose a cluster-driven adaptive training approach (CATA-Fed) to enhance the performance of FL training in a practical environment. CATA-Fed employs adaptive training during the local model updates to enhance the efficiency of training, reducing the waste of time and resources due to the presence of the stragglers and also provides a straggler mitigating scheme, which can reduce the workload of straggling clients. In addition to this, CATA-Fed clusters the clients considering the data size and selects the training participants within a cluster to reduce the magnitude differences of local gradients collected in the global model update under a statistical heterogeneous condition (e.g., non-IID data). During this client selection process, a proportional fair scheduling is employed for securing the data diversity as well as balancing the load of clients. We conduct extensive experiments using three benchmark datasets (MNIST, Fashion-MNIST, and CIFAR-10), and the results show that CATA-Fed outperforms the previous FL schemes (FedAVG, FedProx, and TiFL) with regard to the training speed and test accuracy under the diverse FL conditions.}, } @article {pmid36146382, year = {2022}, author = {Caro-Via, S and Vidaña-Vila, E and Ginovart-Panisello, GJ and Martínez-Suquía, C and Freixes, M and Alsina-Pagès, RM}, title = {Edge-Computing Meshed Wireless Acoustic Sensor Network for Indoor Sound Monitoring.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146382}, issn = {1424-8220}, mesh = {*Acoustics ; Humans ; Monitoring, Physiologic ; }, abstract = {This work presents the design of a wireless acoustic sensor network (WASN) that monitors indoor spaces. The proposed network would enable the acquisition of valuable information on the behavior of the inhabitants of the space. This WASN has been conceived to work in any type of indoor environment, including houses, hospitals, universities or even libraries, where the tracking of people can give relevant insight, with a focus on ambient assisted living environments. The proposed WASN has several priorities and differences compared to the literature: (i) presenting a low-cost flexible sensor able to monitor wide indoor areas; (ii) balance between acoustic quality and microphone cost; and (iii) good communication between nodes to increase the connectivity coverage. A potential application of the proposed network could be the generation of a sound map of a certain location (house, university, offices, etc.) or, in the future, the acoustic detection of events, giving information about the behavior of the inhabitants of the place under study. Each node of the network comprises an omnidirectional microphone and a computation unit, which processes acoustic information locally following the edge-computing paradigm to avoid sending raw data to a cloud server, mainly for privacy and connectivity purposes. Moreover, this work explores the placement of acoustic sensors in a real scenario, following acoustic coverage criteria. The proposed network aims to encourage the use of real-time non-invasive devices to obtain behavioral and environmental information, in order to take decisions in real-time with the minimum intrusiveness in the location under study.}, } @article {pmid36146368, year = {2022}, author = {Barron, A and Sanchez-Gallegos, DD and Carrizales-Espinoza, D and Gonzalez-Compean, JL and Morales-Sandoval, M}, title = {On the Efficient Delivery and Storage of IoT Data in Edge-Fog-Cloud Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146368}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Computer Communication Networks ; Electrocardiography ; Internet ; }, abstract = {Cloud storage has become a keystone for organizations to manage large volumes of data produced by sensors at the edge as well as information produced by deep and machine learning applications. Nevertheless, the latency produced by geographic distributed systems deployed on any of the edge, the fog, or the cloud, leads to delays that are observed by end-users in the form of high response times. In this paper, we present an efficient scheme for the management and storage of Internet of Thing (IoT) data in edge-fog-cloud environments. In our proposal, entities called data containers are coupled, in a logical manner, with nano/microservices deployed on any of the edge, the fog, or the cloud. The data containers implement a hierarchical cache file system including storage levels such as in-memory, file system, and cloud services for transparently managing the input/output data operations produced by nano/microservices (e.g., a sensor hub collecting data from sensors at the edge or machine learning applications processing data at the edge). Data containers are interconnected through a secure and efficient content delivery network, which transparently and automatically performs the continuous delivery of data through the edge-fog-cloud. A prototype of our proposed scheme was implemented and evaluated in a case study based on the management of electrocardiogram sensor data. The obtained results reveal the suitability and efficiency of the proposed scheme.}, } @article {pmid36146364, year = {2022}, author = {Alvear-Puertas, VE and Burbano-Prado, YA and Rosero-Montalvo, PD and Tözün, P and Marcillo, F and Hernandez, W}, title = {Smart and Portable Air-Quality Monitoring IoT Low-Cost Devices in Ibarra City, Ecuador.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146364}, issn = {1424-8220}, mesh = {*Air Pollution/analysis ; Ecuador ; Environmental Monitoring/methods ; Gases/analysis ; *Internet of Things ; }, abstract = {Nowadays, increasing air-pollution levels are a public health concern that affects all living beings, with the most polluting gases being present in urban environments. For this reason, this research presents portable Internet of Things (IoT) environmental monitoring devices that can be installed in vehicles and that send message queuing telemetry transport (MQTT) messages to a server, with a time series database allocated in edge computing. The visualization stage is performed in cloud computing to determine the city air-pollution concentration using three different labels: low, normal, and high. To determine the environmental conditions in Ibarra, Ecuador, a data analysis scheme is used with outlier detection and supervised classification stages. In terms of relevant results, the performance percentage of the IoT nodes used to infer air quality was greater than 90%. In addition, the memory consumption was 14 Kbytes in a flash and 3 Kbytes in a RAM, reducing the power consumption and bandwidth needed in traditional air-pollution measuring stations.}, } @article {pmid36146329, year = {2022}, author = {Maruta, K and Nishiuchi, H and Nakazato, J and Tran, GK and Sakaguchi, K}, title = {5G/B5G mmWave Cellular Networks with MEC Prefetching Based on User Context Information.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146329}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; Computer Simulation ; Diffusion Magnetic Resonance Imaging ; Records ; }, abstract = {To deal with recent increasing mobile traffic, ultra-broadband communication with millimeter-wave (mmWave) has been regarded as a key technology for 5G cellular networks. In a previous study, a mmWave heterogeneous network was composed of several mmWave small cells overlaid on the coverage of a macro cell. However, as seen from the optical fiber penetration rate worldwide, it is difficult to say that backhaul with Gbps order is available everywhere. In the case of using mmWave access under a limited backhaul capacity, it becomes a bottleneck at the backhaul; thus, mmWave access cannot fully demonstrate its potential. On the other hand, the concept of multi-access edge computing (MEC) has been proposed to decrease the response latency compared to cloud computing by deploying storage and computation resources to the user side of mobile networks. This paper introduces MEC into mmWave heterogeneous networks and proposes a content prefetching algorithm to resolve such backhaul issues. Context information, such as the destination, mobility, and traffic tendency, is shared through the macro cell to the prefetch application and data that the users request. Prefetched data is stored in the MEC and then transmitted via mmWave without a backhaul bottleneck. The effectiveness is verified through computer simulations where we implement realistic user mobility as well as traffic and backhauling models. The results show that the proposed framework achieved 95% system capacity even under the constraint of a 1 Gbps backhaul link.}, } @article {pmid36146134, year = {2022}, author = {Alghamdi, A and Zhu, J and Yin, G and Shorfuzzaman, M and Alsufyani, N and Alyami, S and Biswas, S}, title = {Blockchain Empowered Federated Learning Ecosystem for Securing Consumer IoT Features Analysis.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146134}, issn = {1424-8220}, mesh = {*Blockchain ; Computer Security ; Ecosystem ; *Internet of Things ; Privacy ; }, abstract = {Resource constraint Consumer Internet of Things (CIoT) is controlled through gateway devices (e.g., smartphones, computers, etc.) that are connected to Mobile Edge Computing (MEC) servers or cloud regulated by a third party. Recently Machine Learning (ML) has been widely used in automation, consumer behavior analysis, device quality upgradation, etc. Typical ML predicts by analyzing customers' raw data in a centralized system which raises the security and privacy issues such as data leakage, privacy violation, single point of failure, etc. To overcome the problems, Federated Learning (FL) developed an initial solution to ensure services without sharing personal data. In FL, a centralized aggregator collaborates and makes an average for a global model used for the next round of training. However, the centralized aggregator raised the same issues, such as a single point of control leaking the updated model and interrupting the entire process. Additionally, research claims data can be retrieved from model parameters. Beyond that, since the Gateway (GW) device has full access to the raw data, it can also threaten the entire ecosystem. This research contributes a blockchain-controlled, edge intelligence federated learning framework for a distributed learning platform for CIoT. The federated learning platform allows collaborative learning with users' shared data, and the blockchain network replaces the centralized aggregator and ensures secure participation of gateway devices in the ecosystem. Furthermore, blockchain is trustless, immutable, and anonymous, encouraging CIoT end users to participate. We evaluated the framework and federated learning outcomes using the well-known Stanford Cars dataset. Experimental results prove the effectiveness of the proposed framework.}, } @article {pmid36146113, year = {2022}, author = {Liu, X and Zhao, X and Liu, G and Huang, F and Huang, T and Wu, Y}, title = {Collaborative Task Offloading and Service Caching Strategy for Mobile Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146113}, issn = {1424-8220}, mesh = {*Algorithms ; Computer Simulation ; }, abstract = {Mobile edge computing (MEC), which sinks the functions of cloud servers, has become an emerging paradigm to solve the contradiction between delay-sensitive tasks and resource-constrained terminals. Task offloading assisted by service caching in a collaborative manner can reduce delay and balance the edge load in MEC. Due to the limited storage resources of edge servers, it is a significant issue to develop a dynamical service caching strategy according to the actual variable user demands in task offloading. Therefore, this paper investigates the collaborative task offloading problem assisted by a dynamical caching strategy in MEC. Furthermore, a two-level computing strategy called joint task offloading and service caching (JTOSC) is proposed to solve the optimized problem. The outer layer in JTOSC iteratively updates the service caching decisions based on the Gibbs sampling. The inner layer in JTOSC adopts the fairness-aware allocation algorithm and the offloading revenue preference-based bilateral matching algorithm to get a great computing resource allocation and task offloading scheme. The simulation results indicate that the proposed strategy outperforms the other four comparison strategies in terms of maximum offloading delay, service cache hit rate, and edge load balance.}, } @article {pmid36146069, year = {2022}, author = {Li, D and Mao, Y and Chen, X and Li, J and Liu, S}, title = {Deployment and Allocation Strategy for MEC Nodes in Complex Multi-Terminal Scenarios.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146069}, issn = {1424-8220}, abstract = {Mobile edge computing (MEC) has become an effective solution for insufficient computing and communication problems for the Internet of Things (IoT) applications due to its rich computing resources on the edge side. In multi-terminal scenarios, the deployment scheme of edge nodes has an important impact on system performance and has become an essential issue in end-edge-cloud architecture. In this article, we consider specific factors, such as spatial location, power supply, and urgency requirements of terminals, with respect to building an evaluation model to solve the allocation problem. An evaluation model based on reward, energy consumption, and cost factors is proposed. The genetic algorithm is applied to determine the optimal edge node deployment and allocation strategies. Moreover, we compare the proposed method with the k-means and ant colony algorithms. The results show that the obtained strategies achieve good evaluation results under problem constraints. Furthermore, we conduct comparison tests with different attributes to further test the performance of the proposed method.}, } @article {pmid36141163, year = {2022}, author = {Tang, X and Xu, L and Chen, G}, title = {Research on the Rapid Diagnostic Method of Rolling Bearing Fault Based on Cloud-Edge Collaboration.}, journal = {Entropy (Basel, Switzerland)}, volume = {24}, number = {9}, pages = {}, pmid = {36141163}, issn = {1099-4300}, abstract = {Recent deep-learning methods for fault diagnosis of rolling bearings need a significant amount of computing time and resources. Most of them cannot meet the requirements of real-time fault diagnosis of rolling bearings under the cloud computing framework. This paper proposes a quick cloud-edge collaborative bearing fault diagnostic method based on the tradeoff between the advantages and disadvantages of cloud and edge computing. First, a collaborative cloud-based framework and an improved DSCNN-GAP algorithm are suggested to build a general model using the public bearing fault dataset. Second, the general model is distributed to each edge node, and a limited number of unique fault samples acquired by each edge node are used to quickly adjust the parameters of the model before running diagnostic tests. Finally, a fusion result is made from the diagnostic results of each edge node by DS evidence theory. Experiment results show that the proposed method not only improves diagnostic accuracy by DSCNN-GAP and fusion of multi-sensors, but also decreases diagnosis time by migration learning with the cloud-edge collaborative framework. Additionally, the method can effectively enhance data security and privacy protection.}, } @article {pmid36124594, year = {2022}, author = {Lin, HY and Tsai, TT and Wu, HR and Ku, MS}, title = {Secure access control using updateable attribute keys.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {19}, number = {11}, pages = {11367-11379}, doi = {10.3934/mbe.2022529}, pmid = {36124594}, issn = {1551-0018}, mesh = {*Algorithms ; Cloud Computing ; *Computer Security ; Confidentiality ; Humans ; }, abstract = {In the era of cloud computing, the technique of access control is vital to protect the confidentiality and integrity of cloud data. From the perspective of servers, they should only allow authenticated clients to gain the access of data. Specifically, the server will share a communication channel with the client by generating a common session key. It is thus regarded as a symmetric key for encrypting data in the current channel. An access control mechanism using attribute-based encryptions is most flexible, since the decryption privilege can be granted to the ones who have sufficient attributes. In the paper, the authors propose a secure access control consisting of the attributed-based mutual authentication and the attribute-based encryption. The most appealing property of our system is that the attribute keys associated with each user is periodically updatable. Moreover, we will also show that our system fulfills the security of fuzzy selective-ID assuming the hardness of Decisional Modified Bilinear Diffie-Hellman (DMBDH) problem.}, } @article {pmid36124579, year = {2022}, author = {Liu, D and Li, Z and Wang, C and Ren, Y}, title = {Enabling secure mutual authentication and storage checking in cloud-assisted IoT.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {19}, number = {11}, pages = {11034-11046}, doi = {10.3934/mbe.2022514}, pmid = {36124579}, issn = {1551-0018}, abstract = {Internet of things (IoT) is a technology that can collect the data sensed by the devices for the further real-time services. Using the technique of cloud computing to assist IoT devices in data storing can eliminate the disadvantage of the constrained local storage and computing capability. However, the complex network environment makes cloud servers vulnerable to attacks, and adversaries pretend to be legal IoT clients trying to access the cloud server. Hence, it is necessary to provide a mechanism of mutual authentication for the cloud system to enhance the storage security. In this paper, a secure mutual authentication is proposed for cloud-assisted IoT. Note that the technique of chameleon hash signature is used to construct the authentication. Moreover, the proposed scheme can provide storage checking with the assist of a fully-trusted entity, which highly improves the checking fairness and efficiency. Security analysis proves that the proposed scheme in this paper is correct. Performance analysis demonstrates that the proposed scheme can be performed with high efficiency.}, } @article {pmid36124116, year = {2022}, author = {Wu, Y and Zheng, C and Xie, L and Hao, M}, title = {Cloud-Based English Multimedia for Universities Test Questions Modeling and Applications.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4563491}, pmid = {36124116}, issn = {1687-5273}, mesh = {*Cloud Computing ; Computers ; Humans ; *Multimedia ; Software ; Universities ; }, abstract = {This study constructs a cloud computing-based college English multimedia test question modeling and application through an in-depth study of cloud computing and college English multimedia test questions. The emergence of cloud computing technology undoubtedly provides a new and ideal method to solve test data and paper management problems. This study analyzes the advantages of the Hadoop computing platform and MapReduce computing model and builds a distributed computing platform based on Hadoop using universities' existing hardware and software resources. The study analyzes the advantages of the Hadoop computing platform and the MapReduce computing model. The UML model of the system is given, the system is implemented, the system is tested functionally, and the results of the analysis are given. Multimedia is the critical link to realizing the optimization of English test questions. The proper use of multimedia test questions will undoubtedly become an inevitable trend in the development of English test questions in the future, which requires every worker on the education front to continuously analyze and study the problems arising from multimedia teaching, summarize the experience of multimedia teaching, and explore new methods of multimedia teaching, so that multimedia teaching can better promote the optimization of English test questions in colleges and universities and better serve the education teaching.}, } @article {pmid36118826, year = {2022}, author = {Zhang, F and Zhang, Z and Xiao, H}, title = {Research on Medical Big Data Analysis and Disease Prediction Method Based on Artificial Intelligence.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {4224287}, pmid = {36118826}, issn = {1748-6718}, mesh = {*Artificial Intelligence ; *Big Data ; Cloud Computing ; Data Analysis ; Humans ; }, abstract = {In recent years, the continuous development of big data, cloud services, Internet+, artificial intelligence, and other technologies has accelerated the improvement of data communication services in the traditional pharmaceutical industry. It plays a leading role in the development of my country's pharmaceutical industry, deepening the reform of the health system, improving the efficiency and quality of medical services, and developing new technologies. In this context, we make the following research and draw the following conclusions: (1) the scale of my country's medical big data market is constantly increasing, and the global medical big data market is also increasing. Compared with the global medical big data market, China's medical big data has grown at a faster rate. From the initial 10.33% in 2015, the proportion has reached 38.7% after 7 years, and the proportion has increased by 28.37%. (2) Generally speaking, urine is mainly slightly acidic, that is, the pH is around 6.0, the normal range is 5.0 to 7.0, and there are also neutral or slightly alkaline. 8 and 7.5 are generally people with some physical problems. In recent years, the pharmaceutical industry has continuously developed technologies such as big data, cloud computing, Internet+, and artificial intelligence by improving data transmission services. As an important strategic resource of the country, the generation of great medical skills and great information is of great significance to the development of my country's pharmaceutical industry and the deepening of the reform of the national medical system. Improve the efficiency and level of medical services, and establish forms and services. Accelerate economic growth. In this sense, we set out to explore.}, } @article {pmid36108415, year = {2022}, author = {Shoeibi, A and Moridian, P and Khodatars, M and Ghassemi, N and Jafari, M and Alizadehsani, R and Kong, Y and Gorriz, JM and Ramírez, J and Khosravi, A and Nahavandi, S and Acharya, UR}, title = {An overview of deep learning techniques for epileptic seizures detection and prediction based on neuroimaging modalities: Methods, challenges, and future works.}, journal = {Computers in biology and medicine}, volume = {149}, number = {}, pages = {106053}, doi = {10.1016/j.compbiomed.2022.106053}, pmid = {36108415}, issn = {1879-0534}, mesh = {Algorithms ; *Deep Learning ; Electroencephalography/methods ; *Epilepsy/diagnostic imaging ; Humans ; Neuroimaging ; Seizures/diagnostic imaging ; }, abstract = {Epilepsy is a disorder of the brain denoted by frequent seizures. The symptoms of seizure include confusion, abnormal staring, and rapid, sudden, and uncontrollable hand movements. Epileptic seizure detection methods involve neurological exams, blood tests, neuropsychological tests, and neuroimaging modalities. Among these, neuroimaging modalities have received considerable attention from specialist physicians. One method to facilitate the accurate and fast diagnosis of epileptic seizures is to employ computer-aided diagnosis systems (CADS) based on deep learning (DL) and neuroimaging modalities. This paper has studied a comprehensive overview of DL methods employed for epileptic seizures detection and prediction using neuroimaging modalities. First, DL-based CADS for epileptic seizures detection and prediction using neuroimaging modalities are discussed. Also, descriptions of various datasets, preprocessing algorithms, and DL models which have been used for epileptic seizures detection and prediction have been included. Then, research on rehabilitation tools has been presented, which contains brain-computer interface (BCI), cloud computing, internet of things (IoT), hardware implementation of DL techniques on field-programmable gate array (FPGA), etc. In the discussion section, a comparison has been carried out between research on epileptic seizure detection and prediction. The challenges in epileptic seizures detection and prediction using neuroimaging modalities and DL models have been described. In addition, possible directions for future works in this field, specifically for solving challenges in datasets, DL, rehabilitation, and hardware models, have been proposed. The final section is dedicated to the conclusion which summarizes the significant findings of the paper.}, } @article {pmid36107981, year = {2022}, author = {Kim, YK and Kim, HJ and Lee, H and Chang, JW}, title = {Correction: Privacy-preserving parallel kNN classification algorithm using index-based filtering in cloud computing.}, journal = {PloS one}, volume = {17}, number = {9}, pages = {e0274981}, pmid = {36107981}, issn = {1932-6203}, abstract = {[This corrects the article DOI: 10.1371/journal.pone.0267908.].}, } @article {pmid36107827, year = {2022}, author = {Zhuang, Y and Jiang, N}, title = {Progressive privacy-preserving batch retrieval of lung CT image sequences based on edge-cloud collaborative computation.}, journal = {PloS one}, volume = {17}, number = {9}, pages = {e0274507}, pmid = {36107827}, issn = {1932-6203}, mesh = {*Computer Security ; Lung/diagnostic imaging ; *Privacy ; Tomography, X-Ray Computed ; }, abstract = {BACKGROUND: A computer tomography image (CI) sequence can be regarded as a time-series data that is composed of a great deal of nearby and similar CIs. Since the computational and I/O costs of similarity measure, encryption, and decryption calculation during a similarity retrieval of the large CI sequences (CIS) are extremely high, deploying all retrieval tasks in the cloud, however, will lead to excessive computing load on the cloud, which will greatly and negatively affect the retrieval performance.

METHODOLOGIES: To tackle the above challenges, the paper proposes a progressive privacy-preserving Batch Retrieval scheme for the lung CISs based on edge-cloud collaborative computation called the BRS method. There are four supporting techniques to enable the BRS method, such as: 1) batch similarity measure for CISs, 2) CIB-based privacy preserving scheme, 3) uniform edge-cloud index framework, and 4) edge buffering.

RESULTS: The experimental results reveal that our method outperforms the state-of-the-art approaches in terms of efficiency and scalability, drastically reducing response time by lowering network communication costs while enhancing retrieval safety and accuracy.}, } @article {pmid36105640, year = {2022}, author = {Veeraiah, D and Mohanty, R and Kundu, S and Dhabliya, D and Tiwari, M and Jamal, SS and Halifa, A}, title = {Detection of Malicious Cloud Bandwidth Consumption in Cloud Computing Using Machine Learning Techniques.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4003403}, pmid = {36105640}, issn = {1687-5273}, mesh = {*Cloud Computing ; Fuzzy Logic ; Humans ; *Machine Learning ; }, abstract = {The Internet of Things, sometimes known as IoT, is a relatively new kind of Internet connectivity that connects physical objects to the Internet in a way that was not possible in the past. The Internet of Things is another name for this concept (IoT). The Internet of Things has a larger attack surface as a result of its hyperconnectivity and heterogeneity, both of which are characteristics of the IoT. In addition, since the Internet of Things devices are deployed in managed and uncontrolled contexts, it is conceivable for malicious actors to build new attacks that target these devices. As a result, the Internet of Things (IoT) requires self-protection security systems that are able to autonomously interpret attacks in IoT traffic and efficiently handle the attack scenario by triggering appropriate reactions at a pace that is faster than what is currently available. In order to fulfill this requirement, fog computing must be utilised. This type of computing has the capability of integrating an intelligent self-protection mechanism into the distributed fog nodes. This allows the IoT application to be protected with the least amount of human intervention while also allowing for faster management of attack scenarios. Implementing a self-protection mechanism at malicious fog nodes is the primary objective of this research work. This mechanism should be able to detect and predict known attacks based on predefined attack patterns, as well as predict novel attacks based on no predefined attack patterns, and then choose the most appropriate response to neutralise the identified attack. In the environment of the IoT, a distributed Gaussian process regression is used at fog nodes to anticipate attack patterns that have not been established in the past. This allows for the prediction of new cyberattacks in the environment. It predicts attacks in an uncertain IoT setting at a speedier rate and with greater precision than prior techniques. It is able to effectively anticipate both low-rate and high-rate assaults in a more timely manner within the dispersed fog nodes, which enables it to mount a more accurate defence. In conclusion, a fog computing-based self-protection system is developed to choose the most appropriate reaction using fuzzy logic for detected or anticipated assaults using the suggested detection and prediction mechanisms. This is accomplished by utilising a self-protection system that is based on the development of a self-protection system that utilises the suggested detection and prediction mechanisms. The findings of the experimental investigation indicate that the proposed system identifies threats, lowers bandwidth usage, and thwarts assaults at a rate that is twenty-five percent faster than the cloud-based system implementation.}, } @article {pmid36103218, year = {2022}, author = {Huang, H and Aschettino, S and Lari, N and Lee, TH and Rosenberg, SS and Ng, X and Muthuri, S and Bakshi, A and Bishop, K and Ezzeldin, H}, title = {A Versatile and Scalable Platform That Streamlines Data Collection for Patient-Centered Studies: Usability and Feasibility Study.}, journal = {JMIR formative research}, volume = {6}, number = {9}, pages = {e38579}, pmid = {36103218}, issn = {2561-326X}, abstract = {BACKGROUND: The Food and Drug Administration Center for Biologics Evaluation and Research (CBER) established the Biologics Effectiveness and Safety (BEST) Initiative with several objectives, including the expansion and enhancement of CBER's access to fit-for-purpose data sources, analytics, tools, and infrastructures to improve the understanding of patient experiences with conditions related to CBER-regulated products. Owing to existing challenges in data collection, especially for rare disease research, CBER recognized the need for a comprehensive platform where study coordinators can engage with study participants and design and deploy studies while patients or caregivers could enroll, consent, and securely participate as well.

OBJECTIVE: This study aimed to increase awareness and describe the design, development, and novelty of the Survey of Health and Patient Experience (SHAPE) platform, its functionality and application, quality improvement efforts, open-source availability, and plans for enhancement.

METHODS: SHAPE is hosted in a Google Cloud environment and comprises 3 parts: the administrator application, participant app, and application programming interface. The administrator can build a study comprising a set of questionnaires and self-report entries through the app. Once the study is deployed, the participant can access the app, consent to the study, and complete its components. To build SHAPE to be scalable and flexible, we leveraged the open-source software development kit, Ionic Framework. This enabled the building and deploying of apps across platforms, including iOS, Android, and progressive web applications, from a single codebase by using standardized web technologies. SHAPE has been integrated with a leading Health Level 7 (HL7®) Fast Healthcare Interoperability Resources (FHIR®) application programming interface platform, 1upHealth, which allows participants to consent to 1-time data pull of their electronic health records. We used an agile-based process that engaged multiple stakeholders in SHAPE's design and development.

RESULTS: SHAPE allows study coordinators to plan, develop, and deploy questionnaires to obtain important end points directly from patients or caregivers. Electronic health record integration enables access to patient health records, which can validate and enhance the accuracy of data-capture methods. The administrator can then download the study data into HL7® FHIR®-formatted JSON files. In this paper, we illustrate how study coordinators can use SHAPE to design patient-centered studies. We demonstrate its broad applicability through a hypothetical type 1 diabetes cohort study and an ongoing pilot study on metachromatic leukodystrophy to implement best practices for designing a regulatory-grade natural history study for rare diseases.

CONCLUSIONS: SHAPE is an intuitive and comprehensive data-collection tool for a variety of clinical studies. Further customization of this versatile and scalable platform allows for multiple use cases. SHAPE can capture patient perspectives and clinical data, thereby providing regulators, clinicians, researchers, and patient advocacy organizations with data to inform drug development and improve patient outcomes.}, } @article {pmid36100587, year = {2022}, author = {Wang, C and Kon, WY and Ng, HJ and Lim, CC}, title = {Experimental symmetric private information retrieval with measurement-device-independent quantum network.}, journal = {Light, science & applications}, volume = {11}, number = {1}, pages = {268}, pmid = {36100587}, issn = {2047-7538}, abstract = {Secure information retrieval is an essential task in today's highly digitised society. In some applications, it may be necessary that user query's privacy and database content's security are enforced. For these settings, symmetric private information retrieval (SPIR) could be employed, but its implementation is known to be demanding, requiring a private key-exchange network as the base layer. Here, we report for the first time a realisation of provably-secure SPIR supported by a quantum-secure key-exchange network. The SPIR scheme looks at biometric security, offering secure retrieval of 582-byte fingerprint files from a database with 800 entries. Our experimental results clearly demonstrate the feasibility of SPIR with quantum secure communications, thereby opening up new possibilities in secure distributed data storage and cloud computing over the future Quantum Internet.}, } @article {pmid36093501, year = {2022}, author = {Ahamed Ahanger, T and Aldaej, A and Atiquzzaman, M and Ullah, I and Yousufudin, M}, title = {Distributed Blockchain-Based Platform for Unmanned Aerial Vehicles.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4723124}, pmid = {36093501}, issn = {1687-5273}, mesh = {*Blockchain ; Computer Communication Networks ; Computer Security ; Delivery of Health Care ; Unmanned Aerial Devices ; }, abstract = {Internet of Things (IoT)-inspired drone environment is having a greater influence on daily lives in the form of drone-based smart electricity monitoring, traffic routing, and personal healthcare. However, communication between drones and ground control systems must be protected to avoid potential vulnerabilities and improve coordination among scattered UAVs in the IoT context. In the current paper, a distributed UAV scheme is proposed that uses blockchain technology and a network topology similar to the IoT and cloud server to secure communications during data collection and transmission and reduce the likelihood of attack by maliciously manipulated UAVs. As an alternative to relying on a traditional blockchain approach, a unique, safe, and lightweight blockchain architecture is proposed that reduces computing and storage requirements while keeping privacy and security advantages. In addition, a unique reputation-based consensus protocol is built to assure the dependability of the decentralized network. Numerous types of transactions are established to characterize diverse data access. To validate the presented blockchain-based distributed system, performance evaluations are conducted to estimate the statistical effectiveness in the form of temporal delay, packet flow efficacy, precision, specificity, sensitivity, and security efficiency.}, } @article {pmid36093500, year = {2022}, author = {Zhu, G and Li, X and Zheng, C and Wang, L}, title = {Multimedia Fusion Privacy Protection Algorithm Based on IoT Data Security under Network Regulations.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3574812}, pmid = {36093500}, issn = {1687-5273}, mesh = {Algorithms ; Computer Security ; Data Collection ; *Multimedia ; *Privacy ; }, abstract = {This study provides an in-depth analysis and research on multimedia fusion privacy protection algorithms based on IoT data security in a network regulation environment. Aiming at the problem of collusion and conspiracy to deceive users in the process of outsourced computing and outsourced verification, a safe, reliable, and collusion-resistant scheme based on blockchain is studied for IoT outsourced data computing and public verification, with the help of distributed storage methods, where smart devices encrypt the collected data and upload them to the DHT for storage along with the results of this data given by the cloud server. After testing, the constructed model has a privacy-preserving budget value of 0.6 and the smallest information leakage ratio of multimedia fusion data based on IoT data security when the decision tree depth is 6. After using this model under this condition, the maximum value of the information leakage ratio of multimedia fusion data based on IoT data security is reduced from 0.0865 to 0.003, and the data security is significantly improved. In the consensus verification process, to reduce the consensus time and ensure the operating efficiency of the system, a consensus node selection algorithm is proposed, thereby reducing the time complexity of the consensus. Based on the smart grid application scenario, the security and performance of the proposed model are analyzed. This study proves the correctness of this scheme by using BAN logic and proves the security of this scheme under the stochastic prediction machine model. Finally, this study compares the security aspects and performance aspects of the scheme with some existing similar schemes and shows that the scheme is feasible under IoT.}, } @article {pmid36093488, year = {2022}, author = {Alyami, J and Sadad, T and Rehman, A and Almutairi, F and Saba, T and Bahaj, SA and Alkhurim, A}, title = {Cloud Computing-Based Framework for Breast Tumor Image Classification Using Fusion of AlexNet and GLCM Texture Features with Ensemble Multi-Kernel Support Vector Machine (MK-SVM).}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7403302}, pmid = {36093488}, issn = {1687-5273}, mesh = {Aged ; *Breast Neoplasms/diagnostic imaging ; Cloud Computing ; Diagnosis, Computer-Assisted/methods ; Female ; Humans ; Image Processing, Computer-Assisted/methods ; *Support Vector Machine ; }, abstract = {Breast cancer is common among women all over the world. Early identification of breast cancer lowers death rates. However, it is difficult to determine whether these are cancerous or noncancerous lesions due to their inconsistencies in image appearance. Machine learning techniques are widely employed in imaging analysis as a diagnostic method for breast cancer classification. However, patients cannot take advantage of remote areas as these systems are unavailable on clouds. Thus, breast cancer detection for remote patients is indispensable, which can only be possible through cloud computing. The user is allowed to feed images into the cloud system, which is further investigated through the computer aided diagnosis (CAD) system. Such systems could also be used to track patients, older adults, especially with disabilities, particularly in remote areas of developing countries that do not have medical facilities and paramedic staff. In the proposed CAD system, a fusion of AlexNet architecture and GLCM (gray-level cooccurrence matrix) features are used to extract distinguishable texture features from breast tissues. Finally, to attain higher precision, an ensemble of MK-SVM is used. For testing purposes, the proposed model is applied to the MIAS dataset, a commonly used breast image database, and achieved 96.26% accuracy.}, } @article {pmid36093280, year = {2022}, author = {Xie, Y and Zhang, K and Kou, H and Mokarram, MJ}, title = {Private anomaly detection of student health conditions based on wearable sensors in mobile cloud computing.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {38}, pmid = {36093280}, issn = {2192-113X}, abstract = {With the continuous spread of COVID-19 virus, how to guarantee the healthy living of people especially the students who are of relative weak physique is becoming a key research issue of significant values. Specifically, precise recognition of the anomaly in student health conditions is beneficial to the quick discovery of potential patients. However, there are so many students in each school that the education managers cannot know about the health conditions of students in a real-time manner and accurately recognize the possible anomaly among students quickly. Fortunately, the quick development of mobile cloud computing technologies and wearable sensors has provided a promising way to monitor the real-time health conditions of students and find out the anomalies timely. However, two challenges are present in the above anomaly detection issue. First, the health data monitored by massive wearable sensors are often massive and updated frequently, which probably leads to high sensor-cloud transmission cost for anomaly detection. Second, the health data of students are often sensitive enough, which probably impedes the integration of health data in cloud environment even renders the health data-based anomaly detection infeasible. In view of these challenges, we propose a time-efficient and privacy-aware anomaly detection solution for students with wearable sensors in mobile cloud computing environment. At last, we validate the effectiveness and efficiency of our work via a set of simulated experiments.}, } @article {pmid36092002, year = {2022}, author = {Vadde, U and Kompalli, VS}, title = {Energy efficient service placement in fog computing.}, journal = {PeerJ. Computer science}, volume = {8}, number = {}, pages = {e1035}, pmid = {36092002}, issn = {2376-5992}, abstract = {The Internet of Things (IoT) concept evolved into a slew of applications. To satisfy the requests of these applications, using cloud computing is troublesome because of the high latency caused by the distance between IoT devices and cloud resources. Fog computing has become promising with its geographically distributed infrastructure for providing resources using fog nodes near IoT devices, thereby reducing the bandwidth and latency. A geographical distribution, heterogeneity and resource constraints of fog nodes introduce the key challenge of placing application modules/services in such a large scale infrastructure. In this work, we propose an improved version of the JAYA approach for optimal placement of modules that minimizes the energy consumption of a fog landscape. We analyzed the performance in terms of energy consumption, network usage, delays and execution time. Using iFogSim, we ran simulations and observed that our approach reduces on average 31% of the energy consumption compared to modern methods.}, } @article {pmid36091662, year = {2022}, author = {Singh, A and Chatterjee, K}, title = {Edge computing based secure health monitoring framework for electronic healthcare system.}, journal = {Cluster computing}, volume = {}, number = {}, pages = {1-16}, pmid = {36091662}, issn = {1386-7857}, abstract = {Nowadays, Smart Healthcare Systems (SHS) are frequently used by people for personal healthcare observations using various smart devices. The SHS uses IoT technology and cloud infrastructure for data capturing, transmitting it through smart devices, data storage, processing, and healthcare advice. Processing such a huge amount of data from numerous IoT devices in a short time is quite challenging. Thus, technological frameworks such as edge computing or fog computing can be used as a middle layer between cloud and user in SHS. It reduces the response time for data processing at the lower level (edge level). But, Edge of Things (EoT) also suffers from security and privacy issues. A robust healthcare monitoring framework with secure data storage and access is needed. It will provide a quick response in case of the production of abnormal data and store/access the sensitive data securely. This paper proposed a Secure Framework based on the Edge of Things (SEoT) for Smart healthcare systems. This framework is mainly designed for real-time health monitoring, maintaining the security and confidentiality of the healthcare data in a controlled manner. This paper included clustering approaches for analyzing bio-signal data for abnormality detection and Attribute-Based Encryption (ABE) for bio-signal data security and secure access. The experimental results of the proposed framework show improved performance with maintaining the accuracy of up to 98.5% and data security.}, } @article {pmid36091551, year = {2022}, author = {Guo, C and Li, H}, title = {Application of 5G network combined with AI robots in personalized nursing in China: A literature review.}, journal = {Frontiers in public health}, volume = {10}, number = {}, pages = {948303}, pmid = {36091551}, issn = {2296-2565}, mesh = {Artificial Intelligence ; China ; Delivery of Health Care ; Humans ; *Robotics ; *Telemedicine ; }, abstract = {The medical and healthcare industry is currently developing into digitization. Attributed to the rapid development of advanced technologies such as the 5G network, cloud computing, artificial intelligence (AI), and big data, and their wide applications in the medical industry, the medical model is shifting into an intelligent one. By combining the 5G network with cloud healthcare platforms and AI, nursing robots can effectively improve the overall medical efficacy. Meanwhile, patients can enjoy personalized medical services, the supply and the sharing of medical and healthcare services are promoted, and the digital transformation of the healthcare industry is accelerated. In this paper, the application and practice of 5G network technology in the medical industry are introduced, including telecare, 5G first-aid remote medical service, and remote robot applications. Also, by combining application characteristics of AI and development requirements of smart healthcare, the overall planning, intelligence, and personalization of the 5G network in the medical industry, as well as opportunities and challenges of its application in the field of nursing are discussed. This paper provides references to the development and application of 5G network technology in the field of medical service.}, } @article {pmid36086197, year = {2022}, author = {Amin, AB and Wang, S and David, U and Noh, Y}, title = {Applicability of Cloud Native-based Healthcare Monitoring Platform (CN-HMP) in Older Adult Facilities.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2022}, number = {}, pages = {2684-2688}, doi = {10.1109/EMBC48229.2022.9871998}, pmid = {36086197}, issn = {2694-0604}, mesh = {Aged ; *Cloud Computing ; Computer Communication Networks ; *Delivery of Health Care ; Electrocardiography ; Health Facilities ; Humans ; }, abstract = {Over the past few decades, the world has faced the huge demographic change in the aging population, which makes significant challenges in healthcare systems. The increasing older adult population along with the current health workforce shortage creates a struggling situation for current facilities and personnel to meet the demand. To tackle this situation, cloud computing is a fast-growing area in digital healthcare and it allows to settle up a modern distributed system environment, capable of scaling to tens of thousands of self healing multitenant nodes for healthcare applications. In addition, cloud native architecture is recently getting focused as an ideal structure for multi-node based healthcare monitoring system due to its high scalability, low latency, and rapid and stable maintainability. In this study, we proposed a cloud native-based rapid, robust, and productive digital healthcare platform which allows to manage and care for a large number of patient groups. To validate our platform, we simulated our Cloud Nativebased Healthcare Monitoring Platform (CN-HMP) with real-time setup and evaluated the performance in terms of request response time, data packets delivery, and end-to-end latency. We found it showing less than 0.1 ms response time in at least 92.5% of total requests up to 3K requests, and no data packet loss along with more than 28% of total data packets with no latency and only ≈ 0.6% of those with maximum latency (3 ms) in 24-hour observation. Clinical Relevance- This study and relevant experiment demonstrate the suitability of the CN-HMP to support providers and nurses for elderly patients healthcare with regular monitoring in older adult facilities.}, } @article {pmid36082003, year = {2021}, author = {Aghababaei, M and Ebrahimi, A and Naghipour, AA and Asadi, E and Verrelst, J}, title = {Vegetation Types Mapping Using Multi-Temporal Landsat Images in the Google Earth Engine Platform.}, journal = {Remote sensing}, volume = {13}, number = {22}, pages = {4683}, pmid = {36082003}, issn = {2072-4292}, support = {755617/ERC_/European Research Council/International ; }, abstract = {Vegetation Types (VTs) are important managerial units, and their identification serves as essential tools for the conservation of land covers. Despite a long history of Earth observation applications to assess and monitor land covers, the quantitative detection of sparse VTs remains problematic, especially in arid and semiarid areas. This research aimed to identify appropriate multi-temporal datasets to improve the accuracy of VTs classification in a heterogeneous landscape in Central Zagros, Iran. To do so, first the Normalized Difference Vegetation Index (NDVI) temporal profile of each VT was identified in the study area for the period of 2018, 2019, and 2020. This data revealed strong seasonal phenological patterns and key periods of VTs separation. It led us to select the optimal time series images to be used in the VTs classification. We then compared single-date and multi-temporal datasets of Landsat 8 images within the Google Earth Engine (GEE) platform as the input to the Random Forest classifier for VTs detection. The single-date classification gave a median Overall Kappa (OK) and Overall Accuracy (OA) of 51% and 64%, respectively. Instead, using multi-temporal images led to an overall kappa accuracy of 74% and an overall accuracy of 81%. Thus, the exploitation of multi-temporal datasets favored accurate VTs classification. In addition, the presented results underline that available open access cloud-computing platforms such as the GEE facilitates identifying optimal periods and multitemporal imagery for VTs classification.}, } @article {pmid36081832, year = {2022}, author = {Estévez, J and Salinero-Delgado, M and Berger, K and Pipia, L and Rivera-Caicedo, JP and Wocher, M and Reyes-Muñoz, P and Tagliabue, G and Boschetti, M and Verrelst, J}, title = {Gaussian processes retrieval of crop traits in Google Earth Engine based on Sentinel-2 top-of-atmosphere data.}, journal = {Remote sensing of environment}, volume = {273}, number = {}, pages = {112958}, pmid = {36081832}, issn = {0034-4257}, support = {755617/ERC_/European Research Council/International ; }, abstract = {The unprecedented availability of optical satellite data in cloud-based computing platforms, such as Google Earth Engine (GEE), opens new possibilities to develop crop trait retrieval models from the local to the planetary scale. Hybrid retrieval models are of interest to run in these platforms as they combine the advantages of physically- based radiative transfer models (RTM) with the flexibility of machine learning regression algorithms. Previous research with GEE primarily relied on processing bottom-of-atmosphere (BOA) reflectance data, which requires atmospheric correction. In the present study, we implemented hybrid models directly into GEE for processing Sentinel-2 (S2) Level-1C (L1C) top-of-atmosphere (TOA) reflectance data into crop traits. To achieve this, a training dataset was generated using the leaf-canopy RTM PROSAIL in combination with the atmospheric model 6SV. Gaussian process regression (GPR) retrieval models were then established for eight essential crop traits namely leaf chlorophyll content, leaf water content, leaf dry matter content, fractional vegetation cover, leaf area index (LAI), and upscaled leaf variables (i.e., canopy chlorophyll content, canopy water content and canopy dry matter content). An important pre-requisite for implementation into GEE is that the models are sufficiently light in order to facilitate efficient and fast processing. Successful reduction of the training dataset by 78% was achieved using the active learning technique Euclidean distance-based diversity (EBD). With the EBD-GPR models, highly accurate validation results of LAI and upscaled leaf variables were obtained against in situ field data from the validation study site Munich-North-Isar (MNI), with normalized root mean square errors (NRMSE) from 6% to 13%. Using an independent validation dataset of similar crop types (Italian Grosseto test site), the retrieval models showed moderate to good performances for canopy-level variables, with NRMSE ranging from 14% to 50%, but failed for the leaf-level estimates. Obtained maps over the MNI site were further compared against Sentinel-2 Level 2 Prototype Processor (SL2P) vegetation estimates generated from the ESA Sentinels' Application Platform (SNAP) Biophysical Processor, proving high consistency of both retrievals (R [2] from 0.80 to 0.94). Finally, thanks to the seamless GEE processing capability, the TOA-based mapping was applied over the entirety of Germany at 20 m spatial resolution including information about prediction uncertainty. The obtained maps provided confidence of the developed EBD-GPR retrieval models for integration in the GEE framework and national scale mapping from S2-L1C imagery. In summary, the proposed retrieval workflow demonstrates the possibility of routine processing of S2 TOA data into crop traits maps at any place on Earth as required for operational agricultural applications.}, } @article {pmid36081813, year = {2021}, author = {Salinero-Delgado, M and Estévez, J and Pipia, L and Belda, S and Berger, K and Gómez, VP and Verrelst, J}, title = {Monitoring Cropland Phenology on Google Earth Engine Using Gaussian Process Regression.}, journal = {Remote sensing}, volume = {14}, number = {1}, pages = {146}, pmid = {36081813}, issn = {2072-4292}, support = {755617/ERC_/European Research Council/International ; }, abstract = {Monitoring cropland phenology from optical satellite data remains a challenging task due to the influence of clouds and atmospheric artifacts. Therefore, measures need to be taken to overcome these challenges and gain better knowledge of crop dynamics. The arrival of cloud computing platforms such as Google Earth Engine (GEE) has enabled us to propose a Sentinel-2 (S2) phenology end-to-end processing chain. To achieve this, the following pipeline was implemented: (1) the building of hybrid Gaussian Process Regression (GPR) retrieval models of crop traits optimized with active learning, (2) implementation of these models on GEE (3) generation of spatiotemporally continuous maps and time series of these crop traits with the use of gap-filling through GPR fitting, and finally, (4) calculation of land surface phenology (LSP) metrics such as the start of season (SOS) or end of season (EOS). Overall, from good to high performance was achieved, in particular for the estimation of canopy-level traits such as leaf area index (LAI) and canopy chlorophyll content, with normalized root mean square errors (NRMSE) of 9% and 10%, respectively. By means of the GPR gap-filling time series of S2, entire tiles were reconstructed, and resulting maps were demonstrated over an agricultural area in Castile and Leon, Spain, where crop calendar data were available to assess the validity of LSP metrics derived from crop traits. In addition, phenology derived from the normalized difference vegetation index (NDVI) was used as reference. NDVI not only proved to be a robust indicator for the calculation of LSP metrics, but also served to demonstrate the good phenology quality of the quantitative trait products. Thanks to the GEE framework, the proposed workflow can be realized anywhere in the world and for any time window, thus representing a shift in the satellite data processing paradigm. We anticipate that the produced LSP metrics can provide meaningful insights into crop seasonal patterns in a changing environment that demands adaptive agricultural production.}, } @article {pmid36081177, year = {2022}, author = {Kum, S and Oh, S and Yeom, J and Moon, J}, title = {Optimization of Edge Resources for Deep Learning Application with Batch and Model Management.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {17}, pages = {}, pmid = {36081177}, issn = {1424-8220}, mesh = {*Deep Learning ; Workload ; }, abstract = {As deep learning technology paves its way, real-world applications that make use of it become popular these days. Edge computing architecture is one of the service architectures to realize the deep learning based service, which makes use of the resources near the data source or client. In Edge computing architecture it becomes important to manage resource usage, and there is research on optimization of deep learning, such as pruning or binarization, which makes deep learning models more lightweight, along with the research for the efficient distribution of workloads on cloud or edge resources. Those are to reduce the workload on edge resources. In this paper, a usage optimization method with batch and model management is proposed. The proposed method is to increase the utilization of GPU resource by modifying the batch size of the input of an inference application. To this end, the inference pipelines are identified to see how the different kinds of resources are used, and then the effect of batch inference on GPU is measured. The proposed method consists of a few modules, including a tool for batch size management which is able to change a batch size with respect to the available resources, and another one for model management which supports on-the-fly update of a model. The proposed methods are implemented on a real-time video analysis application and deployed in the Kubernetes cluster as a Docker container. The result shows that the proposed method can optimize the usage of edge resources for real-time video analysis deep learning applications.}, } @article {pmid36081143, year = {2022}, author = {Strigaro, D and Cannata, M and Lepori, F and Capelli, C and Lami, A and Manca, D and Seno, S}, title = {Open and Cost-Effective Digital Ecosystem for Lake Water Quality Monitoring.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {17}, pages = {}, pmid = {36081143}, issn = {1424-8220}, mesh = {Cost-Benefit Analysis ; *Ecosystem ; *Lakes ; Software ; Water Quality ; }, abstract = {In some sectors of the water resources management, the digital revolution process is slowed by some blocking factors such as costs, lack of digital expertise, resistance to change, etc. In addition, in the era of Big Data, many are the sources of information available in this field, but they are often not fully integrated. The adoption of different proprietary solutions to sense, collect and manage data is one of the main problems that hampers the availability of a fully integrated system. In this context, the aim of the project is to verify if a fully open, cost-effective and replicable digital ecosystem for lake monitoring can fill this gap and help the digitalization process using cloud based technology and an Automatic High-Frequency Monitoring System (AHFM) built using open hardware and software components. Once developed, the system is tested and validated in a real case scenario by integrating the historical databases and by checking the performance of the AHFM system. The solution applied the edge computing paradigm in order to move some computational work from server to the edge and fully exploiting the potential offered by low power consuming devices.}, } @article {pmid36081126, year = {2022}, author = {Azamuddin, WMH and Aman, AHM and Hassan, R and Mansor, N}, title = {Comparison of Named Data Networking Mobility Methodology in a Merged Cloud Internet of Things and Artificial Intelligence Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {17}, pages = {}, pmid = {36081126}, issn = {1424-8220}, mesh = {Artificial Intelligence ; Cloud Computing ; *Internet of Things ; Technology ; }, abstract = {In-network caching has evolved into a new paradigm, paving the way for the creation of Named Data Networking (NDN). Rather than simply being typical Internet technology, NDN serves a range of functions, with a focus on consumer-driven network architecture. The NDN design has been proposed as a method for replacing Internet Protocol (IP) addresses with identified content. This study adds to current research on NDN, artificial intelligence (AI), cloud computing, and the Internet of Things (IoT). The core contribution of this paper is the merging of cloud IoT (C-IoT) and NDN-AI-IoT. To be precise, this study provides possible methodological and parameter explanations of the technologies via three methods: KITE, a producer mobility support scheme (PMSS), and hybrid network mobility (hybrid NeMO). KITE uses the indirection method to transmit content using simple NDN communication; the PMSS improves producer operation by reducing handover latency; and hybrid NeMO provides a binding information table to replace the base function of forwarding information. This study also describes mathematical equations for signaling cost and handover latency. Using the network simulator ndnSIM NS-3, this study highlights producer mobility operation. Mathematical equations for each methodology are developed based on the mobility scenario to measure handover latency and signaling cost. The results show that the efficiency of signaling cost for hybrid NeMO is approximately 4% better than that of KITE and the PMSS, while the handover latency for hybrid NeMO is 46% lower than that of KITE and approximately 60% lower than that of the PMSS.}, } @article {pmid36080827, year = {2022}, author = {McRae, MP and Rajsri, KS and Alcorn, TM and McDevitt, JT}, title = {Smart Diagnostics: Combining Artificial Intelligence and In Vitro Diagnostics.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {17}, pages = {}, pmid = {36080827}, issn = {1424-8220}, support = {R01DE031319-01/NH/NIH HHS/United States ; 5U54EB027690-04/NH/NIH HHS/United States ; 3 U01 DE017793-02S1/NH/NIH HHS/United States ; 5 U01 DE017793-2/NH/NIH HHS/United States ; 1RC2DE020785-01/NH/NIH HHS/United States ; 4R44DE 025798-02/NH/NIH HHS/United States ; R01DE024392/NH/NIH HHS/United States ; }, mesh = {Artificial Intelligence ; *Biosensing Techniques ; *COVID-19/diagnosis ; COVID-19 Testing ; Humans ; Microfluidics ; Point-of-Care Systems ; }, abstract = {We are beginning a new era of Smart Diagnostics-integrated biosensors powered by recent innovations in embedded electronics, cloud computing, and artificial intelligence (AI). Universal and AI-based in vitro diagnostics (IVDs) have the potential to exponentially improve healthcare decision making in the coming years. This perspective covers current trends and challenges in translating Smart Diagnostics. We identify essential elements of Smart Diagnostics platforms through the lens of a clinically validated platform for digitizing biology and its ability to learn disease signatures. This platform for biochemical analyses uses a compact instrument to perform multiclass and multiplex measurements using fully integrated microfluidic cartridges compatible with the point of care. Image analysis digitizes biology by transforming fluorescence signals into inputs for learning disease/health signatures. The result is an intuitive Score reported to the patients and/or providers. This AI-linked universal diagnostic system has been validated through a series of large clinical studies and used to identify signatures for early disease detection and disease severity in several applications, including cardiovascular diseases, COVID-19, and oral cancer. The utility of this Smart Diagnostics platform may extend to multiple cell-based oncology tests via cross-reactive biomarkers spanning oral, colorectal, lung, bladder, esophageal, and cervical cancers, and is well-positioned to improve patient care, management, and outcomes through deployment of this resilient and scalable technology. Lastly, we provide a future perspective on the direction and trajectory of Smart Diagnostics and the transformative effects they will have on health care.}, } @article {pmid36079676, year = {2022}, author = {Shi, F and Zhou, B and Zhou, H and Zhang, H and Li, H and Li, R and Guo, Z and Gao, X}, title = {Spatial Autocorrelation Analysis of Land Use and Ecosystem Service Value in the Huangshui River Basin at the Grid Scale.}, journal = {Plants (Basel, Switzerland)}, volume = {11}, number = {17}, pages = {}, pmid = {36079676}, issn = {2223-7747}, abstract = {The Huangshui River Basin is one of the most densely populated areas on the Qinghai-Tibet Plateau and is characterized by a high level of human activity. The contradiction between ecological protection and socioeconomic development has become increasingly prominent; determining how to achieve the balanced and coordinated development of the Huangshui River Basin is an important task. Thus, this study used the Google Earth Engine (GEE) cloud-computing platform and Sentinel-1/2 data, supplemented with an ALOS digital elevation model (ALOS DEM) and field survey data, and combined a remote sensing classification method, grid method, and ecosystem service value (ESV) evaluation method to study the spatial correlation and interaction between land use (LU) and ESV in the Huangshui River Basin. The following results were obtained: (1) on the GEE platform, Sentinel-1/2 active and passive remote sensing data, combined with the gradient tree-boosting algorithm, can efficiently produce highly accurate LU data with a spatial resolution of 10 m in the Huangshui River Basin; the overall accuracy (OA) reached 88%. (2) The total ESV in the Huangshui River Basin in 2020 was CNY 33.18 billion (USD 4867.2 million), of which woodland and grassland were the main contributors to ESV. In the Huangshui River Basin, the LU type, LU degree, and ESV have significant positive spatial correlations, with urban and agricultural areas showing an H-H agglomeration in terms of LU degree, with woodlands, grasslands, reservoirs, and wetlands showing an H-H agglomeration in terms of ESV. (3) There is a significant negative spatial correlation between the LU degree and ESV in the Huangshui River Basin, indicating that the enhancement of the LU degree in the basin could have a negative spatial spillover effect on the ESV of surrounding areas. Thus, green development should be the future direction of progress in the Huangshui River Basin, i.e., while maintaining and expanding the land for ecological protection and restoration, and the LU structure should be actively adjusted to ensure ecological security and coordinated and sustainable socioeconomic development in the Basin.}, } @article {pmid36078329, year = {2022}, author = {Feng, H and Wang, F and Song, G and Liu, L}, title = {Digital Transformation on Enterprise Green Innovation: Effect and Transmission Mechanism.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {17}, pages = {}, pmid = {36078329}, issn = {1660-4601}, mesh = {China ; Financing, Government ; *Government ; *Sustainable Development ; }, abstract = {With the development of blockchain, big data, cloud computing and other new technologies, how to achieve innovative development and green sustainable development in digital transformation has become one of the key issues for enterprises to obtain and maintain core competitiveness. However, little of the literature has paid attention to the impact of digital transformation on enterprise green innovation. Using the data of Chinese A-share listed companies from 2010 to 2020, this paper empirically analyzes the impact of enterprise digital transformation on green innovation and its transmission mechanism, by constructing double fixed-effect models. The results show that digital transformation has remarkably promoted the green innovation of enterprises. R&D investment, government subsidies, and income tax burden have played a conductive role between digital transformation and enterprise green innovation. Furthermore, digital transformation can significantly promote the high-quality green innovation of enterprises and also plays a more significant role in promoting the green innovation of high-tech enterprises and state-owned enterprises. A robustness test is carried out by using the lag data and changing the measurement methods of the dependent variable and independent variables, and the research conclusions are still valid. Based on resource-based theory and dynamic capability theory, this paper reveals the impact path of digital transformation on enterprise green innovation, further expanding the research field of digital transformation and enriching the research on the influencing factors of enterprise green innovation. This paper provides policy suggestions for the government to improve the enterprise green innovation level by increasing government subsidies and providing tax incentives and also provides reference for digital transformation enterprises to accelerate green innovation by increasing R&D investment, obtaining government subsidies, and acquiring tax policy support.}, } @article {pmid36075919, year = {2022}, author = {Sheffield, NC and Bonazzi, VR and Bourne, PE and Burdett, T and Clark, T and Grossman, RL and Spjuth, O and Yates, AD}, title = {From biomedical cloud platforms to microservices: next steps in FAIR data and analysis.}, journal = {Scientific data}, volume = {9}, number = {1}, pages = {553}, pmid = {36075919}, issn = {2052-4463}, abstract = {The biomedical research community is investing heavily in biomedical cloud platforms. Cloud computing holds great promise for addressing challenges with big data and ensuring reproducibility in biology. However, despite their advantages, cloud platforms in and of themselves do not automatically support FAIRness. The global push to develop biomedical cloud platforms has led to new challenges, including platform lock-in, difficulty integrating across platforms, and duplicated effort for both users and developers. Here, we argue that these difficulties are systemic and emerge from incentives that encourage development effort on self-sufficient platforms and data repositories instead of interoperable microservices. We argue that many of these issues would be alleviated by prioritizing microservices and access to modular data in smaller chunks or summarized form. We propose that emphasizing modularity and interoperability would lead to a more powerful Unix-like ecosystem of web services for biomedical analysis and data retrieval. We challenge funders, developers, and researchers to support a vision to improve interoperability through microservices as the next generation of cloud-based bioinformatics.}, } @article {pmid36072746, year = {2022}, author = {Cheng, Q and Dang, CN}, title = {Using GIS Remote Sensing Image Data for Wetland Monitoring and Environmental Simulation.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7886358}, pmid = {36072746}, issn = {1687-5273}, mesh = {Artificial Intelligence ; Environmental Monitoring/methods ; Geographic Information Systems ; *Remote Sensing Technology ; *Wetlands ; }, abstract = {Through a comprehensive theoretical basis and actual test analysis of the application system design and functional efficiency of the cloud platform, this paper puts forward an artificial intelligence environmental data monitoring and wetland environmental simulation method based on GIS remote sensing images. First, the basic storage and computing functions have been enhanced at the physical layer. Second, the middleware layer is more flexible in the use of management methods and strategies. There are many strategies and methods that can be used in combination. Finally, based on this, the application system design framework is more convenient and faster so that you can focus on business logic, and the strategic advantages of certain functions are very obvious. The method of object-oriented classification and visual interpretation using UAV image data and satellite remote sensing images from the typical recovery area and treatment area of wetland from 2016 to 2020 is given in detail together to extract wetland information and use GIS software for dynamic calculation. Using the wetland transmission matrix method, the distribution map of the characteristic types of the survey areas in the four periods and the conversion status of the characteristic types at each stage were obtained, and the effect of wetland treatment was quantitatively studied.}, } @article {pmid36072717, year = {2022}, author = {Aggarwal, A and Kumar, S and Bhatt, A and Shah, MA}, title = {Solving User Priority in Cloud Computing Using Enhanced Optimization Algorithm in Workflow Scheduling.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7855532}, pmid = {36072717}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Workflow ; }, abstract = {Cloud computing is a procedure of stockpiling as well as retrieval of data or computer services over the Internet that allows all its users to remotely access the data centers. Cloud computing provides all required services to the users, but every platform has its share of pros and cons, and another major problem in the cloud is task scheduling or workflow scheduling. Multiple factors are becoming a challenge for scheduling in cloud computing namely the heterogeneity of resources, tasks, and user priority. User priority has been encountered as the most challenging problem during the last decade as the number of users is increasing worldwide. This issue has been resolved by an advanced encryption standard (AES) algorithm, which decreases the response time and execution delay of the user-request. There are multifarious tasks, for instance, deploying the data on the cloud, that will be executed according to first come first serve (FCFS) and not on the payment basis, which provides an ease to the users. These investigated techniques are 30.21%, 25.20%, 25.30%, 30.25%, 24.26%, and 36.9 8% improved in comparison with the traditional FFOA, DE, ABC, PSO, GA, and ETC, respectively. Moreover, during iteration number 5, this approach is 15.20%, 20.22%, 30.56%, 26.30%, and 36.23% improved than that of the traditional techniques FFOA, DE, ABC, PSO, GA, and ETC, respectively. This investigated method is more efficient and applicable in certain arenas where user priority is the primary concern and can offer all the required services to the users without any interruption.}, } @article {pmid36065132, year = {2022}, author = {Feser, M and König, P and Fiebig, A and Arend, D and Lange, M and Scholz, U}, title = {On the way to plant data commons - a genotyping use case.}, journal = {Journal of integrative bioinformatics}, volume = {}, number = {}, pages = {}, doi = {10.1515/jib-2022-0033}, pmid = {36065132}, issn = {1613-4516}, abstract = {Over the last years it has been observed that the progress in data collection in life science has created increasing demand and opportunities for advanced bioinformatics. This includes data management as well as the individual data analysis and often covers the entire data life cycle. A variety of tools have been developed to store, share, or reuse the data produced in the different domains such as genotyping. Especially imputation, as a subfield of genotyping, requires good Research Data Management (RDM) strategies to enable use and re-use of genotypic data. To aim for sustainable software, it is necessary to develop tools and surrounding ecosystems, which are reusable and maintainable. Reusability in the context of streamlined tools can e.g. be achieved by standardizing the input and output of the different tools and adapting to open and broadly used file formats. By using such established file formats, the tools can also be connected with others, improving the overall interoperability of the software. Finally, it is important to build strong communities that maintain the tools by developing and contributing new features and maintenance updates. In this article, concepts for this will be presented for an imputation service.}, } @article {pmid36062125, year = {2022}, author = {Guan, J and Xu, H and Wang, Y and Ma, Y and Wang, Y and Gao, R and Yu, K}, title = {Digital Economy and Health: A Case Study of a Leading Enterprise's Value Mining Mode in the Global Big Health Market.}, journal = {Frontiers in public health}, volume = {10}, number = {}, pages = {904186}, pmid = {36062125}, issn = {2296-2565}, mesh = {Aged ; Bayes Theorem ; *COVID-19/epidemiology ; *Ecosystem ; Humans ; Industry ; }, abstract = {Coronavirus disease 2019 (COVID-19) swept across the world and posed a serious threat to human health. Health and elderly care enterprises are committed to continuously improving people's health. With the rapid development of the digital economy, many enterprises have established digital product-service ecosystems after combining "Internet +," big data, cloud computing, and the big health industry. This paper uses the case study method to analyze the overseas market value mining mode of health and elderly care enterprises through in-depth research on leading health and elderly care enterprises. This study explores the value mining mode of the leading enterprise's global big health market using a cluster analysis and Bayesian model with the support of data on geographical characteristics, users' sleep habits, and national big health. This paper theoretically summarizes the successful cases of health and elderly care enterprises through digital transformation, which provides a useful reference for the intelligent transformation of the health and elderly care industry.}, } @article {pmid36062066, year = {2022}, author = {Rufin, P and Bey, A and Picoli, M and Meyfroidt, P}, title = {Large-area mapping of active cropland and short-term fallows in smallholder landscapes using PlanetScope data.}, journal = {International journal of applied earth observation and geoinformation : ITC journal}, volume = {112}, number = {}, pages = {102937}, pmid = {36062066}, issn = {1569-8432}, abstract = {Cropland mapping in smallholder landscapes is challenged by complex and fragmented landscapes, labor-intensive and unmechanized land management causing high within-field variability, rapid dynamics in shifting cultivation systems, and substantial proportions of short-term fallows. To overcome these challenges, we here present a large-area mapping framework to identify active cropland and short-term fallows in smallholder landscapes for the 2020/2021 growing season at 4.77 m spatial resolution. Our study focuses on Northern Mozambique, an area comprising 381,698 km[2]. The approach is based on Google Earth Engine and time series of PlanetScope mosaics made openly available through Norwaýs International Climate and Forest Initiative (NICFI) data program. We conducted multi-temporal coregistration of the PlanetScope data using seasonal Sentinel-2 base images and derived consistent and gap-free seasonal time series metrics to classify active cropland and short-term fallows. An iterative active learning framework based on Random Forest class probabilities was used for training rare classes and uncertain regions. The map was accurate (area-adjusted overall accuracy 88.6% ± 1.5%), with the main error type being the commission of active cropland. Error-adjusted area estimates of active cropland extent (61,799.5 km[2] ± 4,252.5 km[2]) revealed that existing global and regional land cover products tend to under-, or over-estimate active cropland extent, respectively. Short-term fallows occupied 28.9% of the cropland in our reference sample (13% of the mapped cropland), with consolidated agricultural regions showing the highest shares of short-term fallows. Our approach relies on openly available PlanetScope data and cloud-based processing in Google Earth Engine, which minimizes financial constraints and maximizes replicability of the methods. All code and maps were made available for further use.}, } @article {pmid36061493, year = {2022}, author = {Zhou, D}, title = {Mobility and interlinkage: the transformation and new approaches for anthropological research.}, journal = {International journal of anthropology and ethnology}, volume = {6}, number = {1}, pages = {13}, doi = {10.1186/s41257-022-00072-x}, pmid = {36061493}, issn = {2366-1003}, abstract = {Mobility and interlinkage have become the most important characteristics of our time. The mobility and interlinkage of people, material and information constitute the way and rules of the operation of today's world. Internet links, cloud computing, complex database and human computation have changed the way people relate to the world, thus the anthropology for understanding and interpretation of human cultures have changed correspondingly. Cultures in the state of mobility and interlinkage, such as spatial changes, the evolution of interpersonal relationships and the new cultural order, have become a new subject.}, } @article {pmid36060618, year = {2022}, author = {Katal, A and Dahiya, S and Choudhury, T}, title = {Energy efficiency in cloud computing data centers: a survey on software technologies.}, journal = {Cluster computing}, volume = {}, number = {}, pages = {1-31}, pmid = {36060618}, issn = {1386-7857}, abstract = {Cloud computing is a commercial and economic paradigm that has gained traction since 2006 and is presently the most significant technology in IT sector. From the notion of cloud computing to its energy efficiency, cloud has been the subject of much discussion. The energy consumption of data centres alone will rise from 200 TWh in 2016 to 2967 TWh in 2030. The data centres require a lot of power to provide services, which increases CO2 emissions. In this survey paper, software-based technologies that can be used for building green data centers and include power management at individual software level has been discussed. The paper discusses the energy efficiency in containers and problem-solving approaches used for reducing power consumption in data centers. Further, the paper also gives details about the impact of data centers on environment that includes the e-waste and the various standards opted by different countries for giving rating to the data centers. This article goes beyond just demonstrating new green cloud computing possibilities. Instead, it focuses the attention and resources of academia and society on a critical issue: long-term technological advancement. The article covers the new technologies that can be applied at the individual software level that includes techniques applied at virtualization level, operating system level and application level. It clearly defines different measures at each level to reduce the energy consumption that clearly adds value to the current environmental problem of pollution reduction. This article also addresses the difficulties, concerns, and needs that cloud data centres and cloud organisations must grasp, as well as some of the factors and case studies that influence green cloud usage.}, } @article {pmid36059591, year = {2022}, author = {Moqurrab, SA and Tariq, N and Anjum, A and Asheralieva, A and Malik, SUR and Malik, H and Pervaiz, H and Gill, SS}, title = {A Deep Learning-Based Privacy-Preserving Model for Smart Healthcare in Internet of Medical Things Using Fog Computing.}, journal = {Wireless personal communications}, volume = {126}, number = {3}, pages = {2379-2401}, pmid = {36059591}, issn = {0929-6212}, abstract = {With the emergence of COVID-19, smart healthcare, the Internet of Medical Things, and big data-driven medical applications have become even more important. The biomedical data produced is highly confidential and private. Unfortunately, conventional health systems cannot support such a colossal amount of biomedical data. Hence, data is typically stored and shared through the cloud. The shared data is then used for different purposes, such as research and discovery of unprecedented facts. Typically, biomedical data appear in textual form (e.g., test reports, prescriptions, and diagnosis). Unfortunately, such data is prone to several security threats and attacks, for example, privacy and confidentiality breach. Although significant progress has been made on securing biomedical data, most existing approaches yield long delays and cannot accommodate real-time responses. This paper proposes a novel fog-enabled privacy-preserving model called δ r sanitizer, which uses deep learning to improve the healthcare system. The proposed model is based on a Convolutional Neural Network with Bidirectional-LSTM and effectively performs Medical Entity Recognition. The experimental results show that δ r sanitizer outperforms the state-of-the-art models with 91.14% recall, 92.63% in precision, and 92% F1-score. The sanitization model shows 28.77% improved utility preservation as compared to the state-of-the-art.}, } @article {pmid36059392, year = {2022}, author = {Srivastava, DK and Tiwari, PK and Srivastava, M and Dawadi, BR}, title = {An Energy-Efficient Strategy and Secure VM Placement Algorithm in Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {5324202}, pmid = {36059392}, issn = {1687-5273}, abstract = {One of the important and challenging tasks in cloud computing is to obtain the usefulness of cloud by implementing several specifications for our needs, to meet the present growing demands, and to minimize energy consumption as much as possible and ensure proper utilization of computing resources. An excellent mapping scheme has been derived which maps virtual machines (VMs) to physical machines (PMs), which is also known as virtual machine (VM) placement, and this needs to be implemented. The tremendous diversity of computing resources, tasks, and virtualization processes in the cloud causes the consolidation method to be more complex, tedious, and problematic. An algorithm for reducing energy use and resource allocation is proposed for implementation in this article. This algorithm was developed with the help of a Cloud System Model, which enables mapping between VMs and PMs and among tasks of VMs. The methodology used in this algorithm also supports lowering the number of PMs that are in an active state and optimizes the total time taken to process a set of tasks (also known as makespan time). Using the CloudSim Simulator tool, we evaluated and assessed the energy consumption and makespan time. The results are compiled and then compared graphically with respect to other existing energy-efficient VM placement algorithms.}, } @article {pmid36052034, year = {2022}, author = {Gan, B and Zhang, C}, title = {An Improved Model of Product Classification Feature Extraction and Recognition Based on Intelligent Image Recognition.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {2926669}, pmid = {36052034}, issn = {1687-5273}, mesh = {*Algorithms ; Cloud Computing ; Commerce ; Humans ; *Software ; }, abstract = {With the development of the new generation of technological revolution, the manufacturing industry has entered the era of intelligent manufacturing, and people have higher and higher requirements for the technology, industry, and application of product manufacturing. At present, some factories have introduced intelligent image recognition technology into the production process in order to meet the needs of customers' personalized customization. However, the current image recognition technology has limited capabilities. When faced with many special customized products or complex types of small batch products in the market, it is still impossible to perfectly analyze the product requirements and put them into production. Therefore, this paper conducts in-depth research on the improved model of product classification feature extraction and recognition based on intelligent image recognition: 3D modeling of the target product is carried out, and various data of the model are analyzed and recorded to facilitate subsequent work. Use the tools and the established 3D model tosimulate the parameters of the product in the real scene, and record them. Atthe same time, various methods such as image detection and edge analysis areused to maximize the accuracy of the obtained parameters, and variousalgorithms are used for cross-validation to obtain the correct rate of the obtaineddata, and the standard is 90% and above. Build a data platform, compare simulated data with display data by software and algorithm, and check by cloud computing force, so that the model data can be as close to the parameters of the real product as possible. Experimental results show that the algorithm has high accuracy and can meet the requirements of different classification prospects in actual production.}, } @article {pmid36048352, year = {2022}, author = {Jiang, F and Deng, M and Tang, J and Fu, L and Sun, H}, title = {Integrating spaceborne LiDAR and Sentinel-2 images to estimate forest aboveground biomass in Northern China.}, journal = {Carbon balance and management}, volume = {17}, number = {1}, pages = {12}, pmid = {36048352}, issn = {1750-0680}, abstract = {BACKGROUND: Fast and accurate forest aboveground biomass (AGB) estimation and mapping is the basic work of forest management and ecosystem dynamic investigation, which is of great significance to evaluate forest quality, resource assessment, and carbon cycle and management. The Ice, Cloud, and Land Elevation Satellite-2 (ICESat-2), as one of the latest launched spaceborne light detection and ranging (LiDAR) sensors, can penetrate the forest canopy and has the potential to obtain accurate forest vertical structure parameters on a large scale. However, the along-track segments of canopy height provided by ICESat-2 cannot be used to obtain comprehensive AGB spatial distribution. To make up for the deficiency of spaceborne LiDAR, the Sentinel-2 images provided by google earth engine (GEE) were used as the medium to integrate with ICESat-2 for continuous AGB mapping in our study. Ensemble learning can summarize the advantages of estimation models and achieve better estimation results. A stacking algorithm consisting of four non-parametric base models which are the backpropagation (BP) neural network, k-nearest neighbor (kNN), support vector machine (SVM), and random forest (RF) was proposed for AGB modeling and estimating in Saihanba forest farm, northern China.

RESULTS: The results show that stacking achieved the best AGB estimation accuracy among the models, with an R[2] of 0.71 and a root mean square error (RMSE) of 45.67 Mg/ha. The stacking resulted in the lowest estimation error with the decreases of RMSE by 22.6%, 27.7%, 23.4%, and 19.0% compared with those from the BP, kNN, SVM, and RF, respectively.

CONCLUSION: Compared with using Sentinel-2 alone, the estimation errors of all models have been significantly reduced after adding the LiDAR variables of ICESat-2 in AGB estimation. The research demonstrated that ICESat-2 has the potential to improve the accuracy of AGB estimation and provides a reference for dynamic forest resources management and monitoring.}, } @article {pmid36048148, year = {2022}, author = {Krissinel, E and Lebedev, AA and Uski, V and Ballard, CB and Keegan, RM and Kovalevskiy, O and Nicholls, RA and Pannu, NS and Skubák, P and Berrisford, J and Fando, M and Lohkamp, B and Wojdyr, M and Simpkin, AJ and Thomas, JMH and Oliver, C and Vonrhein, C and Chojnowski, G and Basle, A and Purkiss, A and Isupov, MN and McNicholas, S and Lowe, E and Triviño, J and Cowtan, K and Agirre, J and Rigden, DJ and Uson, I and Lamzin, V and Tews, I and Bricogne, G and Leslie, AGW and Brown, DG}, title = {CCP4 Cloud for structure determination and project management in macromolecular crystallography.}, journal = {Acta crystallographica. Section D, Structural biology}, volume = {78}, number = {Pt 9}, pages = {1079-1089}, pmid = {36048148}, issn = {2059-7983}, support = {BB/L007037/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BB/S007040/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BB/S007083/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BB/S005099/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BB/S007105/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BBF020384/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; MC_UP_A025_1012/MRC_/Medical Research Council/United Kingdom ; MC_U105184325/MRC_/Medical Research Council/United Kingdom ; }, mesh = {*Cloud Computing ; Crystallography, X-Ray ; Macromolecular Substances/chemistry ; *Software ; }, abstract = {Nowadays, progress in the determination of three-dimensional macromolecular structures from diffraction images is achieved partly at the cost of increasing data volumes. This is due to the deployment of modern high-speed, high-resolution detectors, the increased complexity and variety of crystallographic software, the use of extensive databases and high-performance computing. This limits what can be accomplished with personal, offline, computing equipment in terms of both productivity and maintainability. There is also an issue of long-term data maintenance and availability of structure-solution projects as the links between experimental observations and the final results deposited in the PDB. In this article, CCP4 Cloud, a new front-end of the CCP4 software suite, is presented which mitigates these effects by providing an online, cloud-based environment for crystallographic computation. CCP4 Cloud was developed for the efficient delivery of computing power, database services and seamless integration with web resources. It provides a rich graphical user interface that allows project sharing and long-term storage for structure-solution projects, and can be linked to data-producing facilities. The system is distributed with the CCP4 software suite version 7.1 and higher, and an online publicly available instance of CCP4 Cloud is provided by CCP4.}, } @article {pmid36046635, year = {2022}, author = {Nickel, S and Bremer, K and Dierks, ML and Haack, M and Wittmar, S and Borgetto, B and Kofahl, C}, title = {Digitization in health-related self-help - Results of an online survey among self-help organizations in Germany.}, journal = {Digital health}, volume = {8}, number = {}, pages = {20552076221120726}, pmid = {36046635}, issn = {2055-2076}, abstract = {BACKGROUND: Nowadays, much hope and expectations are associated with digitization in the health sector. The digital change also affects health-related self-help. A nationwide survey of self-help organizations (SHOs) aimed to show chances and limitations in the use of interactive IT tools like webforums, online meetings or social media as well as digital infrastructures for their organizational management. In this survey, we also determined whether SHO staff themselves have support and qualification needs with regard to this topic.

DESIGN: The online survey was conducted between 14 November and 8 December 2019, i.e., immediately before the outbreak of the Covid-19 pandemic. The questionnaire consisted of 50 questions consisting of 180 single items which could be answered in 30-40 min. After two reminder letters, 119 questionnaires of the SHOs were gathered and analysed.

RESULTS: SHOs already have a lot of experience with digital media/tools (e.g., own homepage, social media, cloud computing). Some tools are attested a "high" or "very high" benefit by more than 80% of users. Perceived benefits, however, are also facing a number of problems, ranging from lack of resources to data protection issues. Despite, or even because of the limits of digitization, there is great desire and need for support and further training in SHOs (and self-help groups).

CONCLUSIONS: At many points in the survey it was shown that digital media can be a useful extension of "traditional" collective self-help. Taking into account the risks and limitations associated with digital tools, SHOs can be central stakeholders in digitization in health-related self-help.

The study was financially supported by the Federal Ministry of Health, Germany. A detailed representation of the results is publicly available at: https://www.uke.de/dish.}, } @article {pmid36035822, year = {2022}, author = {Zala, K and Thakkar, HK and Jadeja, R and Dholakia, NH and Kotecha, K and Jain, DK and Shukla, M}, title = {On the Design of Secured and Reliable Dynamic Access Control Scheme of Patient E-Healthcare Records in Cloud Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3804553}, pmid = {36035822}, issn = {1687-5273}, mesh = {*Computer Security ; Confidentiality ; Delivery of Health Care ; Humans ; Privacy ; *Telemedicine ; }, abstract = {Traditional healthcare services have changed into modern ones in which doctors can diagnose patients from a distance. All stakeholders, including patients, ward boy, life insurance agents, physicians, and others, have easy access to patients' medical records due to cloud computing. The cloud's services are very cost-effective and scalable, and provide various mobile access options for a patient's electronic health records (EHRs). EHR privacy and security are critical concerns despite the many benefits of the cloud. Patient health information is extremely sensitive and important, and sending it over an unencrypted wireless media raises a number of security hazards. This study suggests an innovative and secure access system for cloud-based electronic healthcare services storing patient health records in a third-party cloud service provider. The research considers the remote healthcare requirements for maintaining patient information integrity, confidentiality, and security. There will be fewer attacks on e-healthcare records now that stakeholders will have a safe interface and data on the cloud will not be accessible to them. End-to-end encryption is ensured by using multiple keys generated by the key conclusion function (KCF), and access to cloud services is granted based on a person's identity and the relationship between the parties involved, which protects their personal information that is the methodology used in the proposed scheme. The proposed scheme is best suited for cloud-based e-healthcare services because of its simplicity and robustness. Using different Amazon EC2 hosting options, we examine how well our cloud-based web application service works when the number of requests linearly increases. The performance of our web application service that runs in the cloud is based on how many requests it can handle per second while keeping its response time constant. The proposed secure access scheme for cloud-based web applications was compared to the Ethereum blockchain platform, which uses internet of things (IoT) devices in terms of execution time, throughput, and latency.}, } @article {pmid36033780, year = {2022}, author = {Deng, C and Yu, Q and Luo, G and Zhao, Z and Li, Y}, title = {Big data-driven intelligent governance of college students' physical health: System and strategy.}, journal = {Frontiers in public health}, volume = {10}, number = {}, pages = {924025}, pmid = {36033780}, issn = {2296-2565}, mesh = {*Artificial Intelligence ; *Big Data ; Exercise ; Humans ; Students ; Surveys and Questionnaires ; }, abstract = {With the development of information technology, the application of a new generation of information technologies, such as big data, Internet Plus, and artificial intelligence, in the sports field is an emerging, novel trend. This paper examined the relevant research results and literature on physical education, computer science, pedagogy, management, and other disciplines, then used a self-made questionnaire to investigate the physical health status of Chinese college students. The big data were subsequently analyzed, which provided a scientific basis for the construction of an intelligent governance system for college students' physical health. Intelligent devices may be used to obtain big data resources, master the physical sports development and psychological status of college students, and push personalized sports prescriptions to solve the problems existing in college students' physical health. Research shows that there are four reasons for the continuous decline in Chinese college students' physical health levels. These are students' lack of positive exercise consciousness and healthy sports values (85.43%), a weak family sports concept and lack of physical exercise habits (62.76%), poor implementation of school sports policies (55.35%), and people's distorted sports value orientation (42.27%). Through the connecting effect of data, we can bring together the positive role of the government, school, society, family, and students so as to create an interlinked impact to promote students' physical health. The problems of insufficient platform utilization, lack of teaching resources, lagging research, and insufficient combination with big data in the intelligent governance of physical health of Chinese college students can be solved by building an intelligent governance system of physical health. Such a system would be composed of school infrastructure, data resources and technology processing, and intelligent service applications. Among these, school infrastructure refers to the material foundation and technical support. The material foundation includes perceptions, storage, computing, networks, and other equipment, and the technical support includes cloud computing, mobile Internet, the Internet of Things, artificial intelligence, and deep learning. Data resources refer to smart data, such as stadium data, physical health management data, and students' sports behavior data, which are mined from data resources such as students' physical development, physical health, and sports through big data technology and intelligent wearable devices. Intelligent managers provide efficient, intelligent, accurate, and personalized intelligent sports services for college students through data resource value mining, venue space-time optimization, health knowledge discovery, sports prescription pushes, etc. Finally, we put forward the development strategy for further deepening and improving the big data-driven intelligent governance system for college students' physical health. The intelligent governance system of physical health driven by big data and its development strategy can not only accurately guide and improve the physical health level of college students but also realize integrated teaching inside and outside physical education classes.}, } @article {pmid36033031, year = {2022}, author = {Liu, Y and Chen, L and Yao, Z}, title = {The application of artificial intelligence assistant to deep learning in teachers' teaching and students' learning processes.}, journal = {Frontiers in psychology}, volume = {13}, number = {}, pages = {929175}, pmid = {36033031}, issn = {1664-1078}, abstract = {With the emergence of big data, cloud computing, and other technologies, artificial intelligence (AI) technology has set off a new wave in the field of education. The application of AI technology to deep learning in university teachers' teaching and students' learning processes is an innovative way to promote the quality of teaching and learning. This study proposed the deep learning-based assessment to measure whether students experienced an improvement in terms of their mastery of knowledge, development of abilities, and emotional experiences. It also used comparative analysis of pre-tests and post-tests through online questionnaires to test the results. The impact of technology on teachers' teaching and students' learning processes, identified the problems in the teaching and learning processes in the context of the application of AI technology, and proposed strategies for reforming and optimizing teaching and learning. It recommends the application of software and platforms, such as Waston and Knewton, under the orientation of AI technology to improve efficiency in teaching and learning, optimize course design, and engage students in deep learning. The contribution of this research is that the teaching and learning processes will be enhanced by the use of intelligent and efficient teaching models on the teachers' side and personalized and in-depth learning on the students' side. On the one hand, the findings are helpful for teachers to better grasp the actual conditions of in-class teaching in real time, carry out intelligent lesson preparations, enrich teaching methods, improve teaching efficiency, and achieve personalized and precision teaching. On the other hand, it also provides a space of intelligent support for students with different traits in terms of learning and effectively improves students' innovation ability, ultimately achieving the purpose of "artificial intelligence + education."}, } @article {pmid36032802, year = {2022}, author = {Mi, J and Sun, X and Zhang, S and Liu, N}, title = {Residential Environment Pollution Monitoring System Based on Cloud Computing and Internet of Things.}, journal = {International journal of analytical chemistry}, volume = {2022}, number = {}, pages = {1013300}, pmid = {36032802}, issn = {1687-8760}, abstract = {In order to solve the problems of single monitoring factor, weak comprehensive analysis ability, and poor real time performance in traditional environmental monitoring systems, a research method of residential environment pollution monitoring system based on cloud computing and Internet of Things is proposed. The method mainly includes two parts: an environmental monitoring terminal and an environmental pollution monitoring and management platform. Through the Wi-Fi module, the data is sent to the environmental pollution monitoring and management platform in real time. The environmental monitoring management platform is mainly composed of environmental pollution monitoring server, web server, and mobile terminal. The results are as follows. The data measured by the system is close to the data measured by the instrument, and the overall error is small. The measurement error of harmful gases is about 6%. PM 2.5 is about 6.5%. Noise is about 1%. The average time for sensor data update is 0.762 s. The average alarm response time is 2 s. The average data transfer time is 2 s. Practice has proved that the environmental pollution monitoring and alarm system operates stably and can realize real-time collection and transmission of data such as noise, PM 2.5, harmful gas concentration, illumination, GPS, and video images, providing a reliable guarantee for timely environmental pollution control.}, } @article {pmid36017455, year = {2022}, author = {Venkateswarlu, Y and Baskar, K and Wongchai, A and Gauri Shankar, V and Paolo Martel Carranza, C and Gonzáles, JLA and Murali Dharan, AR}, title = {An Efficient Outlier Detection with Deep Learning-Based Financial Crisis Prediction Model in Big Data Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4948947}, pmid = {36017455}, issn = {1687-5273}, mesh = {Algorithms ; *Big Data ; Cloud Computing ; *Deep Learning ; Machine Learning ; }, abstract = {As Big Data, Internet of Things (IoT), cloud computing (CC), and other ideas and technologies are combined for social interactions. Big data technologies improve the treatment of financial data for businesses. At present, an effective tool can be used to forecast the financial failures and crises of small and medium-sized enterprises. Financial crisis prediction (FCP) plays a major role in the country's economic phenomenon. Accurate forecasting of the number and probability of failure is an indication of the development and strength of national economies. Normally, distinct approaches are planned for an effective FCP. Conversely, classifier efficiency and predictive accuracy and data legality could not be optimal for practical application. In this view, this study develops an oppositional ant lion optimizer-based feature selection with a machine learning-enabled classification (OALOFS-MLC) model for FCP in a big data environment. For big data management in the financial sector, the Hadoop MapReduce tool is used. In addition, the presented OALOFS-MLC model designs a new OALOFS algorithm to choose an optimal subset of features which helps to achieve improved classification results. In addition, the deep random vector functional links network (DRVFLN) model is used to perform the grading process. Experimental validation of the OALOFS-MLC approach was conducted using a baseline dataset and the results demonstrated the supremacy of the OALOFS-MLC algorithm over recent approaches.}, } @article {pmid36016907, year = {2022}, author = {Reyes-Muñoz, P and Pipia, L and Salinero-Delgado, M and Belda, S and Berger, K and Estévez, J and Morata, M and Rivera-Caicedo, JP and Verrelst, J}, title = {Quantifying Fundamental Vegetation Traits over Europe Using the Sentinel-3 OLCI Catalogue in Google Earth Engine.}, journal = {Remote sensing}, volume = {14}, number = {6}, pages = {1347}, pmid = {36016907}, issn = {2072-4292}, support = {755617/ERC_/European Research Council/International ; }, abstract = {Thanks to the emergence of cloud-computing platforms and the ability of machine learning methods to solve prediction problems efficiently, this work presents a workflow to automate spatiotemporal mapping of essential vegetation traits from Sentinel-3 (S3) imagery. The traits included leaf chlorophyll content (LCC), leaf area index (LAI), fraction of absorbed photosynthetically active radiation (FAPAR), and fractional vegetation cover (FVC), being fundamental for assessing photosynthetic activity on Earth. The workflow involved Gaussian process regression (GPR) algorithms trained on top-of-atmosphere (TOA) radiance simulations generated by the coupled canopy radiative transfer model (RTM) SCOPE and the atmospheric RTM 6SV. The retrieval models, named to S3-TOA-GPR-1.0, were directly implemented in Google Earth Engine (GEE) to enable the quantification of the traits from TOA data as acquired from the S3 Ocean and Land Colour Instrument (OLCI) sensor.Following good to high theoretical validation results with normalized root mean square error (NRMSE) ranging from 5% (FAPAR) to 19% (LAI), a three fold evaluation approach over diverse sites and land cover types was pursued: (1) temporal comparison against LAI and FAPAR products obtained from Moderate Resolution Imaging Spectroradiometer (MODIS) for the time window 2016-2020, (2) spatial difference mapping with Copernicus Global Land Service (CGLS) estimates, and (3) direct validation using interpolated in situ data from the VALERI network. For all three approaches, promising results were achieved. Selected sites demonstrated coherent seasonal patterns compared to LAI and FAPAR MODIS products, with differences between spatially averaged temporal patterns of only 6.59%. In respect of the spatial mapping comparison, estimates provided by the S3-TOA-GPR-1.0 models indicated highest consistency with FVC and FAPAR CGLS products. Moreover, the direct validation of our S3-TOA-GPR-1.0 models against VALERI estimates indicated with regard to jurisdictional claims in good retrieval performance for LAI, FAPAR and FVC. We conclude that our retrieval workflow of spatiotemporal S3 TOA data processing into GEE opens the path towards global monitoring of fundamental vegetation traits, accessible to the whole research community.}, } @article {pmid36016060, year = {2022}, author = {Thilakarathne, NN and Bakar, MSA and Abas, PE and Yassin, H}, title = {A Cloud Enabled Crop Recommendation Platform for Machine Learning-Driven Precision Farming.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {16}, pages = {}, pmid = {36016060}, issn = {1424-8220}, mesh = {*Agriculture ; *Artificial Intelligence ; Crops, Agricultural ; Farms ; Machine Learning ; }, abstract = {Modern agriculture incorporated a portfolio of technologies to meet the current demand for agricultural food production, in terms of both quality and quantity. In this technology-driven farming era, this portfolio of technologies has aided farmers to overcome many of the challenges associated with their farming activities by enabling precise and timely decision making on the basis of data that are observed and subsequently converged. In this regard, Artificial Intelligence (AI) holds a key place, whereby it can assist key stakeholders in making precise decisions regarding the conditions on their farms. Machine Learning (ML), which is a branch of AI, enables systems to learn and improve from their experience without explicitly being programmed, by imitating intelligent behavior in solving tasks in a manner that requires low computational power. For the time being, ML is involved in a variety of aspects of farming, assisting ranchers in making smarter decisions on the basis of the observed data. In this study, we provide an overview of AI-driven precision farming/agriculture with related work and then propose a novel cloud-based ML-powered crop recommendation platform to assist farmers in deciding which crops need to be harvested based on a variety of known parameters. Moreover, in this paper, we compare five predictive ML algorithms-K-Nearest Neighbors (KNN), Decision Tree (DT), Random Forest (RF), Extreme Gradient Boosting (XGBoost) and Support Vector Machine (SVM)-to identify the best-performing ML algorithm on which to build our recommendation platform as a cloud-based service with the intention of offering precision farming solutions that are free and open source, as will lead to the growth and adoption of precision farming solutions in the long run.}, } @article {pmid36016017, year = {2022}, author = {Rocha Filho, GP and Brandão, AH and Nobre, RA and Meneguette, RI and Freitas, H and Gonçalves, VP}, title = {HOsT: Towards a Low-Cost Fog Solution via Smart Objects to Deal with the Heterogeneity of Data in a Residential Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {16}, pages = {}, pmid = {36016017}, issn = {1424-8220}, mesh = {*Environment ; }, abstract = {With the fast and unstoppable development of technology, the amount of available technological devices and the data they produce is overwhelming. In analyzing the context of a smart home, a diverse group of intelligent devices generating constant reports of its environment information is needed for the proper control of the house. Due to this demand, many possible solutions have been developed in the literature to assess the need for processing power and storage capacity. This work proposes HOsT (home-context-aware fog-computing solution)-a solution that addresses the problems of data heterogeneity and the interoperability of smart objects in the context of a smart home. HOsT was modeled to compose a set of intelligent objects to form a computational infrastructure in fog. A publish/subscribe communication module was implemented to abstract the details of communication between objects to disseminate heterogeneous information. A performance evaluation was carried out to validate HOsT. The results show evidence of efficiency in the communication infrastructure; and in the impact of HOsT compared with a cloud infrastructure. Furthermore, HOsT provides scalability about the number of devices acting simultaneously and demonstrates its ability to work with different devices.}, } @article {pmid36016014, year = {2022}, author = {Bemani, A and Björsell, N}, title = {Aggregation Strategy on Federated Machine Learning Algorithm for Collaborative Predictive Maintenance.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {16}, pages = {}, pmid = {36016014}, issn = {1424-8220}, mesh = {*Algorithms ; Computer Simulation ; *Machine Learning ; Privacy ; Support Vector Machine ; }, abstract = {Industry 4.0 lets the industry build compact, precise, and connected assets and also has made modern industrial assets a massive source of data that can be used in process optimization, defining product quality, and predictive maintenance (PM). Large amounts of data are collected from machines, processed, and analyzed by different machine learning (ML) algorithms to achieve effective PM. These machines, assumed as edge devices, transmit their data readings to the cloud for processing and modeling. Transmitting massive amounts of data between edge and cloud is costly, increases latency, and causes privacy concerns. To address this issue, efforts have been made to use edge computing in PM applications., reducing data transmission costs and increasing processing speed. Federated learning (FL) has been proposed a mechanism that provides the ability to create a model from distributed data in edge, fog, and cloud layers without violating privacy and offers new opportunities for a collaborative approach to PM applications. However, FL has challenges in confronting with asset management in the industry, especially in the PM applications, which need to be considered in order to be fully compatible with these applications. This study describes distributed ML for PM applications and proposes two federated algorithms: Federated support vector machine (FedSVM) with memory for anomaly detection and federated long-short term memory (FedLSTM) for remaining useful life (RUL) estimation that enables factories at the fog level to maximize their PM models' accuracy without compromising their privacy. A global model at the cloud level has also been generated based on these algorithms. We have evaluated the approach using the Commercial Modular Aero-Propulsion System Simulation (CMAPSS) dataset to predict engines' RUL Experimental results demonstrate the advantage of FedSVM and FedLSTM in terms of model accuracy, model convergence time, and network usage resources.}, } @article {pmid36015736, year = {2022}, author = {Chen, YS and Cheng, KH and Hsu, CS and Zhang, HL}, title = {MiniDeep: A Standalone AI-Edge Platform with a Deep Learning-Based MINI-PC and AI-QSR System.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {16}, pages = {}, pmid = {36015736}, issn = {1424-8220}, mesh = {*Artificial Intelligence ; *Deep Learning ; Software ; }, abstract = {In this paper, we present a new AI (Artificial Intelligence) edge platform, called "MiniDeep", which provides a standalone deep learning platform based on the cloud-edge architecture. This AI-Edge platform provides developers with a whole deep learning development environment to set up their deep learning life cycle processes, such as model training, model evaluation, model deployment, model inference, ground truth collecting, data pre-processing, and training data management. To the best of our knowledge, such a whole deep learning development environment has not been built before. MiniDeep uses Amazon Web Services (AWS) as the backend platform of a deep learning tuning management model. In the edge device, the OpenVino enables deep learning inference acceleration at the edge. To perform a deep learning life cycle job, MiniDeep proposes a mini deep life cycle (MDLC) system which is composed of several microservices from the cloud to the edge. MiniDeep provides Train Job Creator (TJC) for training dataset management and the models' training schedule and Model Packager (MP) for model package management. All of them are based on several AWS cloud services. On the edge device, MiniDeep provides Inference Handler (IH) to handle deep learning inference by hosting RESTful API (Application Programming Interface) requests/responses from the end device. Data Provider (DP) is responsible for ground truth collection and dataset synchronization for the cloud. With the deep learning ability, this paper uses the MiniDeep platform to implement a recommendation system for AI-QSR (Quick Service Restaurant) KIOSK (interactive kiosk) application. AI-QSR uses the MiniDeep platform to train an LSTM (Long Short-Term Memory)-based recommendation system. The LSTM-based recommendation system converts KIOSK UI (User Interface) flow to the flow sequence and performs sequential recommendations with food suggestions. At the end of this paper, the efficiency of the proposed MiniDeep is verified through real experiments. The experiment results have demonstrated that the proposed LSTM-based scheme performs better than the rule-based scheme in terms of purchase hit accuracy, categorical cross-entropy, precision, recall, and F1 score.}, } @article {pmid36015727, year = {2022}, author = {Alzahrani, A and Alyas, T and Alissa, K and Abbas, Q and Alsaawy, Y and Tabassum, N}, title = {Hybrid Approach for Improving the Performance of Data Reliability in Cloud Storage Management.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {16}, pages = {}, pmid = {36015727}, issn = {1424-8220}, mesh = {*Cloud Computing ; Computers ; *Information Storage and Retrieval ; Reproducibility of Results ; }, abstract = {The digital transformation disrupts the various professional domains in different ways, though one aspect is common: the unified platform known as cloud computing. Corporate solutions, IoT systems, analytics, business intelligence, and numerous tools, solutions and systems use cloud computing as a global platform. The migrations to the cloud are increasing, causing it to face new challenges and complexities. One of the essential segments is related to data storage. Data storage on the cloud is neither simplistic nor conventional; rather, it is becoming more and more complex due to the versatility and volume of data. The inspiration of this research is based on the development of a framework that can provide a comprehensive solution for cloud computing storage in terms of replication, and instead of using formal recovery channels, erasure coding has been proposed for this framework, which in the past proved itself as a trustworthy mechanism for the job. The proposed framework provides a hybrid approach to combine the benefits of replication and erasure coding to attain the optimal solution for storage, specifically focused on reliability and recovery. Learning and training mechanisms were developed to provide dynamic structure building in the future and test the data model. RAID architecture is used to formulate different configurations for the experiments. RAID-1 to RAID-6 are divided into two groups, with RAID-1 to 4 in the first group while RAID-5 and 6 are in the second group, further categorized based on FTT, parity, failure range and capacity. Reliability and recovery are evaluated on the rest of the data on the server side, and for the data in transit at the virtual level. The overall results show the significant impact of the proposed hybrid framework on cloud storage performance. RAID-6c at the server side came out as the best configuration for optimal performance. The mirroring for replication using RAID-6 and erasure coding for recovery work in complete coherence provide good results for the current framework while highlighting the interesting and challenging paths for future research.}, } @article {pmid36015699, year = {2022}, author = {Lakhan, A and Mohammed, MA and Abdulkareem, KH and Jaber, MM and Nedoma, J and Martinek, R and Zmij, P}, title = {Delay Optimal Schemes for Internet of Things Applications in Heterogeneous Edge Cloud Computing Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {16}, pages = {}, pmid = {36015699}, issn = {1424-8220}, mesh = {*Cloud Computing ; Delivery of Health Care ; *Internet of Things ; }, abstract = {Over the last decade, the usage of Internet of Things (IoT) enabled applications, such as healthcare, intelligent vehicles, and smart homes, has increased progressively. These IoT applications generate delayed- sensitive data and requires quick resources for execution. Recently, software-defined networks (SDN) offer an edge computing paradigm (e.g., fog computing) to run these applications with minimum end-to-end delays. Offloading and scheduling are promising schemes of edge computing to run delay-sensitive IoT applications while satisfying their requirements. However, in the dynamic environment, existing offloading and scheduling techniques are not ideal and decrease the performance of such applications. This article formulates joint and scheduling problems into combinatorial integer linear programming (CILP). We propose a joint task offloading and scheduling (JTOS) framework based on the problem. JTOS consists of task offloading, sequencing, scheduling, searching, and failure components. The study's goal is to minimize the hybrid delay of all applications. The performance evaluation shows that JTOS outperforms all existing baseline methods in hybrid delay for all applications in the dynamic environment. The performance evaluation shows that JTOS reduces the processing delay by 39% and the communication delay by 35% for IoT applications compared to existing schemes.}, } @article {pmid36009026, year = {2022}, author = {Lin, PC and Tsai, YS and Yeh, YM and Shen, MR}, title = {Cutting-Edge AI Technologies Meet Precision Medicine to Improve Cancer Care.}, journal = {Biomolecules}, volume = {12}, number = {8}, pages = {}, pmid = {36009026}, issn = {2218-273X}, mesh = {Artificial Intelligence ; Computational Biology/methods ; Data Mining ; Genomics/methods ; Humans ; *Neoplasms/diagnosis/genetics/therapy ; *Precision Medicine/methods ; }, abstract = {To provide precision medicine for better cancer care, researchers must work on clinical patient data, such as electronic medical records, physiological measurements, biochemistry, computerized tomography scans, digital pathology, and the genetic landscape of cancer tissue. To interpret big biodata in cancer genomics, an operational flow based on artificial intelligence (AI) models and medical management platforms with high-performance computing must be set up for precision cancer genomics in clinical practice. To work in the fast-evolving fields of patient care, clinical diagnostics, and therapeutic services, clinicians must understand the fundamentals of the AI tool approach. Therefore, the present article covers the following four themes: (i) computational prediction of pathogenic variants of cancer susceptibility genes; (ii) AI model for mutational analysis; (iii) single-cell genomics and computational biology; (iv) text mining for identifying gene targets in cancer; and (v) the NVIDIA graphics processing units, DRAGEN field programmable gate arrays systems and AI medical cloud platforms in clinical next-generation sequencing laboratories. Based on AI medical platforms and visualization, large amounts of clinical biodata can be rapidly copied and understood using an AI pipeline. The use of innovative AI technologies can deliver more accurate and rapid cancer therapy targets.}, } @article {pmid35996679, year = {2022}, author = {Alsalemi, A and Amira, A and Malekmohamadi, H and Diao, K}, title = {Lightweight Gramian Angular Field classification for edge internet of energy applications.}, journal = {Cluster computing}, volume = {}, number = {}, pages = {1-13}, pmid = {35996679}, issn = {1386-7857}, abstract = {UNLABELLED: With adverse industrial effects on the global landscape, climate change is imploring the global economy to adopt sustainable solutions. The ongoing evolution of energy efficiency targets massive data collection and Artificial Intelligence (AI) for big data analytics. Besides, emerging on the Internet of Energy (IoE) paradigm, edge computing is playing a rising role in liberating private data from cloud centralization. In this direction, a creative visual approach to understanding energy data is introduced. Building upon micro-moments, which are timeseries of small contextual data points, the power of pictorial representations to encapsulate rich information in a small two-dimensional (2D) space is harnessed through a novel Gramian Angular Fields (GAF) classifier for energy micro-moments. Designed with edge computing efficiency in mind, current testing results on the ODROID-XU4 can classify up to 7 million GAF-converted datapoints with ~ 90% accuracy in less than 30 s, paving the path towards industrial adoption of edge IoE.

SUPPLEMENTARY INFORMATION: The online version contains supplementary material available at 10.1007/s10586-022-03704-1.}, } @article {pmid35994872, year = {2022}, author = {Yeung, S and Kim, HK and Carleton, A and Munro, J and Ferguson, D and Monk, AP and Zhang, J and Besier, T and Fernandez, J}, title = {Integrating wearables and modelling for monitoring rehabilitation following total knee joint replacement.}, journal = {Computer methods and programs in biomedicine}, volume = {225}, number = {}, pages = {107063}, doi = {10.1016/j.cmpb.2022.107063}, pmid = {35994872}, issn = {1872-7565}, mesh = {*Arthroplasty, Replacement, Knee/rehabilitation ; Biomechanical Phenomena ; Gait ; Humans ; Knee Joint/surgery ; *Knee Prosthesis ; *Wearable Electronic Devices ; }, abstract = {BACKGROUND AND OBJECTIVE: Wearable inertial devices integrated with modelling and cloud computing have been widely adopted in the sports sector, however, their use in the health and medical field has yet to be fully realised. To date, there have been no reported studies concerning the use of wearables as a surrogate tool to monitor knee joint loading during recovery following a total knee joint replacement. The objective of this study is to firstly evaluate if peak tibial acceleration from wearables during gait is a good surrogate metric for computer modelling predicted functional knee loading; and secondly evaluate if traditional clinical patient related outcomes measures are consistent with wearable predictions.

METHODS: Following ethical approval, four healthy participants were used to establish the relationship between computer modelling predicted knee joint loading and wearable measured tibial acceleration. Following this, ten patients who had total knee joint replacements were then followed during their 6-week rehabilitation. Gait analysis, wearable acceleration, computer models of knee joint loading, and patient related outcomes measures including the Oxford knee score and range of motion were recorded.

RESULTS: A linear correlation (R[2] of 0.7-0.97) was observed between peak tibial acceleration (from wearables) and musculoskeletal model predicted knee joint loading during gait in healthy participants first. Whilst patient related outcome measures (Oxford knee score and patient range of motion) were observed to improve consistently during rehabilitation, this was not consistent with all patient's tibial acceleration. Only those patients that exhibited increasing peak tibial acceleration over 6-weeks rehabilitation were positively correlated with the Oxford knee score (R[2] of 0.51 to 0.97). Wearable predicted tibial acceleration revealed three patients with a consistent knee loading, five patients with improving knee loading, and two patients with declining knee loading during recovery. Hence, 20% of patients did not present with satisfactory joint loading following total knee joint replacement and this was not detected with current patient related outcome measures.

CONCLUSIONS: The use of inertial measurement units or wearables in this study provided additional insight into patients who were not exhibiting functional improvements in joint loading, and offers clinicians an 'off-site' early warning metric to identify potential complications during recovery and provide the opportunity for early intervention. This study has important implications for improving patient outcomes, equity, and for those who live in rural regions.}, } @article {pmid35992348, year = {2022}, author = {Xu, J and Xu, Z and Shi, B}, title = {Deep Reinforcement Learning Based Resource Allocation Strategy in Cloud-Edge Computing System.}, journal = {Frontiers in bioengineering and biotechnology}, volume = {10}, number = {}, pages = {908056}, pmid = {35992348}, issn = {2296-4185}, abstract = {The rapid development of mobile device applications put tremendous pressure on edge nodes with limited computing capabilities, which may cause poor user experience. To solve this problem, collaborative cloud-edge computing is proposed. In the cloud-edge computing, an edge node with limited local resources can rent more resources from a cloud node. According to the nature of cloud service, cloud service can be divided into private cloud and public cloud. In a private cloud environment, the edge node must allocate resources between the cloud node and the edge node. In a public cloud environment, since public cloud service providers offer various pricing modes for users' different computing demands, the edge node also must select the appropriate pricing mode of cloud service; which is a sequential decision problem. In this stydy, we model it as a Markov decision process and parameterized action Markov decision process, and we propose a resource allocation algorithm cost efficient resource allocation with private cloud (CERAI) and cost efficient resource allocation with public cloud (CERAU) in the collaborative cloud-edge environment based on the deep reinforcement learning algorithm deep deterministic policy gradient and P-DQN. Next, we evaluated CERAI and CERAU against three typical resource allocation algorithms based on synthetic and real data of Google datasets. The experimental results demonstrate that CERAI and CERAU can effectively reduce the long-term operating cost of collaborative cloud-side computing in various demanding settings. Our analysis can provide some useful insights for enterprises to design the resource allocation strategy in the collaborative cloud-side computing system.}, } @article {pmid35991356, year = {2022}, author = {de Oliveira, MEG and da Silva, MV and de Almeida, GLP and Pandorfi, H and Oliveira Lopes, PM and Manrique, DRC and Dos Santos, A and Jardim, AMDRF and Giongo, PR and Montenegro, AAA and da Silva Junior, CA and de Oliveira-Júnior, JF}, title = {Investigation of pre and post environmental impact of the lockdown (COVID-19) on the water quality of the Capibaribe and Tejipió rivers, Recife metropolitan region, Brazil.}, journal = {Journal of South American earth sciences}, volume = {118}, number = {}, pages = {103965}, pmid = {35991356}, issn = {0895-9811}, abstract = {The coronavirus pandemic has seriously affected human health, although some improvements on environmental indexes have temporarily occurred, due to changes on socio-cultural and economic standards. The objective of this study was to evaluate the impacts of the coronavirus and the influence of the lockdown associated with rainfall on the water quality of the Capibaribe and Tejipió rivers, Recife, Northeast Brazil, using cloud remote sensing on the Google Earth Engine (GEE) platform. The study was carried out based on eight representative images from Sentinel-2. Among the selected images, two refer to the year 2019 (before the pandemic), three refer to 2020 (during a pandemic), two from the lockdown period (2020), and one for the year 2021. The land use and land cover (LULC) and slope of the study region were determined and classified. Water turbidity data were subjected to descriptive and multivariate statistics. When analyzing the data on LULC for the riparian margin of the Capibaribe and Tejipió rivers, a low permanent preservation area was found, with a predominance of almost 100% of the urban area to which the deposition of soil particles in rivers are minimal. The results indicated that turbidity values in the water bodies varied from 6 mg. L[-1] up to 40 mg. L[-1]. Overall, the reduction in human-based activities generated by the lockdown enabled improvements in water quality of these urban rivers.}, } @article {pmid35990146, year = {2022}, author = {Li, J and Liu, L}, title = {The Reform of University Education Teaching Based on Cloud Computing and Big Data Background.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8169938}, pmid = {35990146}, issn = {1687-5273}, mesh = {*Big Data ; *Cloud Computing ; Humans ; Teaching ; Universities ; }, abstract = {In the era of big data and cloud computing, traditional college teaching model needs to be revolutionized in order to adapt to the needs of the present generation. The traditional college teaching model is currently facing unprecedented severe challenges which could be optimistically considered as a huge scope of development opportunity. In order to promote the gradual transformation of college teaching toward digitization, intelligence, and modernization, this paper comprehensively analyzes the impact of science and technology on college teaching. It further encourages the omnidirectional and multifaceted amalgamation of education with big data and cloud computing technology with an objective to improve the overall teaching level of colleges and universities. In order to realize the accurate evaluation of university teaching reform and improve teaching quality, the study presents an evaluation method of university teaching reform based on in-depth research network. Then, it further analyzes the main contents of university teaching reform, establishes the evaluation department of university teaching reform, and then establishes the evaluation model of university education reform. This is achieved by analyzing the relationship between university education reform and indicators using in-depth learning network followed by the development of simulation experiments pertinent to evaluation of university education reform. The results show that this method is helpful in improving the teaching quality.}, } @article {pmid35990138, year = {2022}, author = {Zhao, J and Zhang, L and Zhao, Y}, title = {Informatization of Accounting Systems in Small- and Medium-Sized Enterprises Based on Artificial Intelligence-Enabled Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6089195}, pmid = {35990138}, issn = {1687-5273}, mesh = {*Artificial Intelligence ; *Cloud Computing ; }, abstract = {Against the backdrop of China's growing market economy, small- and medium-sized enterprises (SMEs) have taken advantage of this opportunity to develop rapidly. At present, SMEs have become an important part of the market economy. Accounting system information management system is an advanced form of management, and improving the degree of accounting information is the key to improving the management mode of SMEs. This study applies cloud computing to enterprise accounting management systems. The results show that realizing SME accounting information management can effectively improve economic settlements. With the development of cloud computing, its improvement of accounting management efficiency cannot be ignored. Besides, the risks of accounting informatization, enterprises can make their development by establishing a secure network protection wall and relying on strict relevant laws and regulations.}, } @article {pmid35989835, year = {2022}, author = {Datta, PK and Chowdhury, SR and Aravindan, A and Nath, S and Sen, P}, title = {Looking for a Silver Lining to the Dark Cloud: A Google Trends Analysis of Contraceptive Interest in the United States Post Roe vs. Wade Verdict.}, journal = {Cureus}, volume = {14}, number = {7}, pages = {e27012}, pmid = {35989835}, issn = {2168-8184}, abstract = {Background In the wake of the recent Roe vs. Wade judgment, we performed a Google Trends analysis to identify the impact of this decision on the interests regarding contraceptive choices in the United States. Methods A Google Trends search between April 6 and July 5, 2022, with the United States as the area of interest, was performed using the five most popular contraception choices. In addition, a second trend search was performed using oral and injectable hormonal birth control measures. Results Trends showed a spike in interest regarding various contraceptive methods immediately following the verdict. The highest increase in interest was noted for "vasectomy," followed by "tubal ligation." With respect to oral and injectable birth control measures, "morning after pill" showed a marked spike in interest. Conclusion This verdict has triggered increased interest in contraceptive practices, which can be translated into better reproductive health with proper public health initiatives.}, } @article {pmid35978910, year = {2022}, author = {Tang, H and Jiang, G and Wang, Q}, title = {Prediction of College Students' Sports Performance Based on Improved BP Neural Network.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {5872384}, pmid = {35978910}, issn = {1687-5273}, mesh = {Algorithms ; *Athletic Performance ; Humans ; Neural Networks, Computer ; *Students ; Universities ; }, abstract = {Sports performance prediction has gradually become a research hotspot in various colleges and universities, and colleges and universities pay more and more attention to the development of college students' comprehensive quality. Aiming at the problems of low accuracy and slow convergence of the existing college students' sports performance prediction models, a method of college students' sports performance prediction based on improved BP neural network is proposed. First, preprocess the student's sports performance data, then use the BP neural network to train the data samples, optimize the selection of weights and thresholds in the neural network through the DE algorithm, and establish an optimal college student's sports performance prediction model, and then based on cloud computing, the platform implements and runs the sports performance prediction model, which speeds up the prediction of sports performance. The results show that the model can improve the accuracy of college students' sports performance prediction, provide more reliable prediction results, and provide valuable information for sports training.}, } @article {pmid35974742, year = {2022}, author = {Deumer, J and Pauw, BR and Marguet, S and Skroblin, D and Taché, O and Krumrey, M and Gollwitzer, C}, title = {Small-angle X-ray scattering: characterization of cubic Au nanoparticles using Debye's scattering formula.}, journal = {Journal of applied crystallography}, volume = {55}, number = {Pt 4}, pages = {993-1001}, pmid = {35974742}, issn = {0021-8898}, abstract = {A versatile software package in the form of a Python extension, named CDEF (computing Debye's scattering formula for extraordinary form factors), is proposed to calculate approximate scattering profiles of arbitrarily shaped nanoparticles for small-angle X-ray scattering (SAXS). CDEF generates a quasi-randomly distributed point cloud in the desired particle shape and then applies the open-source software DEBYER for efficient evaluation of Debye's scattering formula to calculate the SAXS pattern (https://github.com/j-from-b/CDEF). If self-correlation of the scattering signal is not omitted, the quasi-random distribution provides faster convergence compared with a true-random distribution of the scatterers, especially at higher momentum transfer. The usage of the software is demonstrated for the evaluation of scattering data of Au nanocubes with rounded edges, which were measured at the four-crystal monochromator beamline of PTB at the synchrotron radiation facility BESSY II in Berlin. The implementation is fast enough to run on a single desktop computer and perform model fits within minutes. The accuracy of the method was analyzed by comparison with analytically known form factors and verified with another implementation, the SPONGE, based on a similar principle with fewer approximations. Additionally, the SPONGE coupled to McSAS3 allows one to retrieve information on the uncertainty of the size distribution using a Monte Carlo uncertainty estimation algorithm.}, } @article {pmid35972790, year = {2022}, author = {Ngu, AH and Metsis, V and Coyne, S and Srinivas, P and Salad, T and Mahmud, U and Chee, KH}, title = {Personalized Watch-Based Fall Detection Using a Collaborative Edge-Cloud Framework.}, journal = {International journal of neural systems}, volume = {32}, number = {12}, pages = {2250048}, doi = {10.1142/S0129065722500484}, pmid = {35972790}, issn = {1793-6462}, mesh = {Humans ; Aged ; *Accidental Falls/prevention & control ; *Smartphone ; Automation ; Software ; }, abstract = {The majority of current smart health applications are deployed on a smartphone paired with a smartwatch. The phone is used as the computation platform or the gateway for connecting to the cloud while the watch is used mainly as the data sensing device. In the case of fall detection applications for older adults, this kind of setup is not very practical since it requires users to always keep their phones in proximity while doing the daily chores. When a person falls, in a moment of panic, it might be difficult to locate the phone in order to interact with the Fall Detection App for the purpose of indicating whether they are fine or need help. This paper demonstrates the feasibility of running a real-time personalized deep-learning-based fall detection system on a smartwatch device using a collaborative edge-cloud framework. In particular, we present the software architecture we used for the collaborative framework, demonstrate how we automate the fall detection pipeline, design an appropriate UI on the small screen of the watch, and implement strategies for the continuous data collection and automation of the personalization process with the limited computational and storage resources of a smartwatch. We also present the usability of such a system with nine real-world older adult participants.}, } @article {pmid35972192, year = {2022}, author = {Poolman, TM and Townsend-Nicholson, A and Cain, A}, title = {Teaching genomics to life science undergraduates using cloud computing platforms with open datasets.}, journal = {Biochemistry and molecular biology education : a bimonthly publication of the International Union of Biochemistry and Molecular Biology}, volume = {50}, number = {5}, pages = {446-449}, doi = {10.1002/bmb.21646}, pmid = {35972192}, issn = {1539-3429}, mesh = {*COVID-19/epidemiology ; *Cloud Computing ; Genomics ; Humans ; Software ; Students ; }, abstract = {The final year of a biochemistry degree is usually a time to experience research. However, laboratory-based research projects were not possible during COVID-19. Instead, we used open datasets to provide computational research projects in metagenomics to biochemistry undergraduates (80 students with limited computing experience). We aimed to give the students a chance to explore any dataset, rather than use a small number of artificial datasets (~60 published datasets were used). To achieve this, we utilized Google Colaboratory (Colab), a virtual computing environment. Colab was used as a framework to retrieve raw sequencing data (analyzed with QIIME2) and generate visualizations. Setting up the environment requires no prior experience; all students have the same drive structure and notebooks can be shared (for synchronous sessions). We also used the platform to combine multiple datasets, perform a meta-analysis, and allowed the students to analyze large datasets with 1000s of subjects and factors. Projects that required increased computational resources were integrated with Google Cloud Compute. In future, all research projects can include some aspects of reanalyzing public data, providing students with data science experience. Colab is also an excellent environment in which to develop data skills in multiple languages (e.g., Perl, Python, Julia).}, } @article {pmid35970834, year = {2022}, author = {Kim, M and Jiang, X and Lauter, K and Ismayilzada, E and Shams, S}, title = {Secure human action recognition by encrypted neural network inference.}, journal = {Nature communications}, volume = {13}, number = {1}, pages = {4799}, pmid = {35970834}, issn = {2041-1723}, support = {R13 HG009072/HG/NHGRI NIH HHS/United States ; R01 AG066749/AG/NIA NIH HHS/United States ; }, mesh = {*Activities of Daily Living ; Algorithms ; Cloud Computing ; *Computer Security ; Humans ; Neural Networks, Computer ; Pattern Recognition, Automated ; }, abstract = {Advanced computer vision technology can provide near real-time home monitoring to support "aging in place" by detecting falls and symptoms related to seizures and stroke. Affordable webcams, together with cloud computing services (to run machine learning algorithms), can potentially bring significant social benefits. However, it has not been deployed in practice because of privacy concerns. In this paper, we propose a strategy that uses homomorphic encryption to resolve this dilemma, which guarantees information confidentiality while retaining action detection. Our protocol for secure inference can distinguish falls from activities of daily living with 86.21% sensitivity and 99.14% specificity, with an average inference latency of 1.2 seconds and 2.4 seconds on real-world test datasets using small and large neural nets, respectively. We show that our method enables a 613x speedup over the latency-optimized LoLa and achieves an average of 3.1x throughput increase in secure inference compared to the throughput-optimized nGraph-HE2.}, } @article {pmid35968406, year = {2022}, author = {Gupta, YP and Mukul, and Gupta, N}, title = {Deep learning model based multimedia retrieval and its optimization in augmented reality applications.}, journal = {Multimedia tools and applications}, volume = {}, number = {}, pages = {1-20}, pmid = {35968406}, issn = {1380-7501}, abstract = {With the uproar of touchless technology, the Virtual Continuum has seen some spark in the upcoming products. Today numerous gadgets support the use of Mixed Reality / Augmented Reality (AR)/ Virtual Reality. The Head Mounted Displays (HMDs) like that of Hololens, Google Lens, Jio Glass manifested reality into virtuality. Other than the HMDs many organizations tend to develop mobile AR applications to support umpteen number of industries like medicine, education, construction. Currently, the major issue lies in the performance parameters of these applications, while deploying for mobile application's graphics performance, latency, and CPU functioning. Many industries pose real-time computation requirements in AR but do not implement an efficient algorithm in their frameworks. Offloading the computation of deep learning models involved in the application to the cloud servers will highly affect the processing parameters. For our use case, we will be using Multi-Task Cascaded Convolutional Neural Network (MTCNN) which is a modern tool for face detection, using a 3-stage neural network detector. Therefore, the optimization of communication between local application and cloud computing frameworks needs to be optimized. The proposed framework defines how the parameters involving the complete deployment of a mobile AR application can be optimized in terms of retrieval of multimedia, its processing, and augmentation of graphics, eventually enhancing the performance. To implement the proposed algorithm a mobile application is created in Unity3D. The mobile application virtually augments a 3D model of a skeleton on a target face. After the mentioned experimentation, it is found that average Media Retrieval Time (1.1471 μ s) and Client Time (1.1207 μ s) in the local application are extremely low than the average API process time (288.934ms). The highest time latency is achieved at the frame rate higher than 80fps.}, } @article {pmid35968403, year = {2022}, author = {Finnegan, A and Potenziani, DD and Karutu, C and Wanyana, I and Matsiko, N and Elahi, C and Mijumbi, N and Stanley, R and Vota, W}, title = {Deploying machine learning with messy, real world data in low- and middle-income countries: Developing a global health use case.}, journal = {Frontiers in big data}, volume = {5}, number = {}, pages = {553673}, pmid = {35968403}, issn = {2624-909X}, abstract = {The rapid emergence of machine learning in the form of large-scale computational statistics and accumulation of data offers global health implementing partners an opportunity to adopt, adapt, and apply these techniques and technologies to low- and middle-income country (LMIC) contexts where we work. These benefits reside just out of the reach of many implementing partners because they lack the experience and specific skills to use them. Yet the growth of available analytical systems and exponential growth of data require the global digital health community to become conversant in this technology to continue to make contributions to help fulfill our missions. In this community case study, we describe the approach we took at IntraHealth International to inform the use case for machine learning in global health and development. We found that the data needed to take advantage of machine learning were plentiful and that an international, interdisciplinary team can be formed to collect, clean, and analyze the data at hand using cloud-based (e.g., Dropbox, Google Drive) and open source tools (e.g., R). We organized our work as a "sprint" lasting roughly 10 weeks in length so that we could rapidly prototype these approaches in order to achieve institutional buy in. Our initial sprint resulted in two requests in subsequent workplans for analytics using the data we compiled and directly impacted program implementation.}, } @article {pmid35967636, year = {2022}, author = {Liu, S}, title = {Anti-monopoly supervision model of platform economy based on big data and sentiment.}, journal = {Frontiers in psychology}, volume = {13}, number = {}, pages = {953271}, pmid = {35967636}, issn = {1664-1078}, abstract = {With the advent of the cloud computing era, big data technology has also developed rapidly. Due to the huge volume, variety, fast processing speed and low value density of big data, traditional data storage, extraction, transformation and analysis technologies are not suitable, so new solutions for big data application technologies are needed. However, with the development of economic theory and the practice of market economy, some links in the industrial chain of natural monopoly industries already have a certain degree of competitiveness. In this context, the article conducts a research on the anti-monopoly supervision mode of platform economy based on big data and sentiment analysis. This paper introduces the main idea of MapReduce, the current software implementation specifies a Map function that maps a set of key-value pairs into a new set of key-value pairs. It specifies a concurrent Reduce function that guarantees that each of all mapped key-value pairs share the same set of keys. establishes a vector space model, and basically realizes the extraction of text emotional elements. It introduces the theoretical controversy of antitrust regulation of predatory pricing behavior of third-party payment platforms, and conducted model experiments. The experimental results show that the throughput of 40 test users in 1 h of test is determined by two factors, QPS and the number of concurrent, where QPS = 40/(60*60) transactions/second. The time for each test user to log in to the system is 10 min, and the average response time is 10*60 s, then the number of concurrency = QPS*average response time = 40/(60*60)*10*60 = 6.66. This paper has successfully completed the research on the anti-monopoly supervision model of platform economy based on big data and sentiment analysis.}, } @article {pmid35966392, year = {2022}, author = {Berisha, B and Mëziu, E and Shabani, I}, title = {Big data analytics in Cloud computing: an overview.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {24}, pmid = {35966392}, issn = {2192-113X}, abstract = {Big Data and Cloud Computing as two mainstream technologies, are at the center of concern in the IT field. Every day a huge amount of data is produced from different sources. This data is so big in size that traditional processing tools are unable to deal with them. Besides being big, this data moves fast and has a lot of variety. Big Data is a concept that deals with storing, processing and analyzing large amounts of data. Cloud computing on the other hand is about offering the infrastructure to enable such processes in a cost-effective and efficient manner. Many sectors, including among others businesses (small or large), healthcare, education, etc. are trying to leverage the power of Big Data. In healthcare, for example, Big Data is being used to reduce costs of treatment, predict outbreaks of pandemics, prevent diseases etc. This paper, presents an overview of Big Data Analytics as a crucial process in many fields and sectors. We start by a brief introduction to the concept of Big Data, the amount of data that is generated on a daily bases, features and characteristics of Big Data. We then delve into Big Data Analytics were we discuss issues such as analytics cycle, analytics benefits and the movement from ETL to ELT paradigm as a result of Big Data analytics in Cloud. As a case study we analyze Google's BigQuery which is a fully-managed, serverless data warehouse that enables scalable analysis over petabytes of data. As a Platform as a Service (PaaS) supports querying using ANSI SQL. We use the tool to perform different experiments such as average read, average compute, average write, on different sizes of datasets.}, } @article {pmid35965760, year = {2022}, author = {Sadad, T and Bukhari, SAC and Munir, A and Ghani, A and El-Sherbeeny, AM and Rauf, HT}, title = {Detection of Cardiovascular Disease Based on PPG Signals Using Machine Learning with Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {1672677}, pmid = {35965760}, issn = {1687-5273}, mesh = {Bayes Theorem ; *COVID-19/diagnosis ; *Cardiovascular Diseases/diagnosis ; Cloud Computing ; Humans ; Machine Learning ; Pandemics ; Photoplethysmography/methods ; }, abstract = {Hypertension is the main cause of blood pressure (BP), which further causes various cardiovascular diseases (CVDs). The recent COVID-19 pandemic raised the burden on the healthcare system and also limits the resources to these patients only. The treatment of chronic patients, especially those who suffer from CVD, has fallen behind, resulting in increased deaths from CVD around the world. Regular monitoring of BP is crucial to prevent CVDs as it can be controlled and diagnosed through constant monitoring. To find an effective and convenient procedure for the early diagnosis of CVDs, photoplethysmography (PPG) is recognized as a low-cost technology. Through PPG technology, various cardiovascular parameters, including blood pressure, heart rate, blood oxygen saturation, etc., are detected. Merging the healthcare domain with information technology (IT) is a demanding area to reduce the rehospitalization of CVD patients. In the proposed model, PPG signals from the Internet of things (IoT)-enabled wearable patient monitoring (WPM) devices are used to monitor the heart rate (HR), etc., of the patients remotely. This article investigates various machine learning techniques such as decision tree (DT), naïve Bayes (NB), and support vector machine (SVM) and the deep learning model one-dimensional convolutional neural network-long short-term memory (1D CNN-LSTM) to develop a system that assists physicians during continuous monitoring, which achieved an accuracy of 99.5% using PPG-BP data set. The proposed system provides cost-effective, efficient, and fully connected monitoring systems for cardiac patients.}, } @article {pmid35963375, year = {2022}, author = {Palomeque-Mangut, S and Meléndez, F and Gómez-Suárez, J and Frutos-Puerto, S and Arroyo, P and Pinilla-Gil, E and Lozano, J}, title = {Wearable system for outdoor air quality monitoring in a WSN with cloud computing: Design, validation and deployment.}, journal = {Chemosphere}, volume = {307}, number = {Pt 3}, pages = {135948}, doi = {10.1016/j.chemosphere.2022.135948}, pmid = {35963375}, issn = {1879-1298}, mesh = {*Air Pollutants/analysis ; *Air Pollution/analysis ; Cloud Computing ; Environmental Monitoring/methods ; Humans ; Oxides ; *Wearable Electronic Devices ; }, abstract = {Breathing poor-quality air is a global threat at the same level as unhealthy diets or tobacco smoking, so the availability of affordable instrument for the measurement of air pollutant levels is highly relevant for human and environmental protection. We developed an air quality monitoring platform that comprises a wearable device embedding low-cost metal oxide semiconductor (MOS) gas sensors, a PM sensor, and a smartphone for collecting the data using Bluetooth Low Energy (BLE) communication. Our own developed app displays information about the air surrounding the user and sends the gathered geolocalized data to a cloud, where the users can map the air quality levels measured in the network. The resulting device is small-sized, light-weighted, compact, and belt-worn, with a user-friendly interface and a low cost. The data collected by the sensor array are validated in two experimental setups, first in laboratory-controlled conditions and then against referential pollutant concentrations measured by standard instruments in an outdoor environment. The performance of our air quality platform was tested in a field testing campaign in Barcelona with six moving devices acting as wireless sensor nodes. Devices were trained by means of machine learning algorithms to differentiate between air quality index (AQI) referential concentration values (97% success in the laboratory, 82.3% success in the field). Humidity correction was applied to all data.}, } @article {pmid35958753, year = {2022}, author = {Qi, W and Wang, H and Chen, T}, title = {Multimedia System Design and Data Storage Optimization Based on Machine Learning Algorithm.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6426551}, pmid = {35958753}, issn = {1687-5273}, mesh = {Algorithms ; *Information Storage and Retrieval ; Machine Learning ; *Multimedia ; Reproducibility of Results ; }, abstract = {With the advancement of science and technology, digital technology and Internet of Things network technology have been developed rapidly, and multimedia technology has also been widely used. Multimedia formats such as digital TV and elevator posters are shaking up traditional media. At the same time, many media operation models and multimedia technologies are combined to plan operational strategies, determine operational goals, and change the traditional media structure to achieve commercial profits and society benefit. However, due to limitations in the existing operating model or unreasonable technical solutions, it is not easy to maximize the value of multimedia technology. The XML-based database has been submitted, and it will carry out the business requirements of the transaction network and the business platform of the transaction network. Integrated management mechanism is analyzed and applied. The framework design includes parallel quota processing module, update processing module, result processing module, and storage library and database connection management module. The department runs multiple parts of the system together and completes the database. The development of cloud database is based on cloud computing. It can effectively fill the shortcomings and gaps of traditional database storage and processing, and it can also provide high-reciprocity databases to provide storage and management services. It has high reliability. Cloud servers use fair weighted rounding algorithms to achieve load balancing and use the in-memory database Redis to realize terminal data caching. After a comprehensive test of the system, the system can perform all functions normally, and it has good performance and stable operation.}, } @article {pmid35958748, year = {2022}, author = {Rahman, AU and Asif, RN and Sultan, K and Alsaif, SA and Abbas, S and Khan, MA and Mosavi, A}, title = {ECG Classification for Detecting ECG Arrhythmia Empowered with Deep Learning Approaches.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6852845}, pmid = {35958748}, issn = {1687-5273}, mesh = {Arrhythmias, Cardiac/diagnosis ; Cloud Computing ; *Deep Learning ; Electrocardiography/methods ; Humans ; Machine Learning ; }, abstract = {According to the World Health Organization (WHO) report, heart disease is spreading throughout the world very rapidly and the situation is becoming alarming in people aged 40 or above (Xu, 2020). Different methods and procedures are adopted to detect and diagnose heart abnormalities. Data scientists are working on finding the different methods with the required accuracy (Strodthoff et al., 2021). Electrocardiogram (ECG) is the procedure to find the heart condition in the waveform. For ages, the machine learning techniques, which are feature based, played a vital role in the medical sciences and centralized the data in cloud computing and having access throughout the world. Furthermore, deep learning or transfer learning widens the vision and introduces different transfer learning methods to ensure accuracy and time management to detect the ECG in a better way in comparison to the previous and machine learning methods. Hence, it is said that transfer learning has turned world research into more appropriate and innovative research. Here, the proposed comparison and accuracy analysis of different transfer learning methods by using ECG classification for detecting ECG Arrhythmia (CAA-TL). The CAA-TL model has the multiclassification of the ECG dataset, which has been taken from Kaggle. Some of the healthy and unhealthy datasets have been taken in real-time, augmented, and fused with the Kaggle dataset, i.e., Massachusetts Institute of Technology-Beth Israel Hospital (MIT-BIH dataset). The CAA-TL worked on the accuracy of heart problem detection by using different methods like ResNet50, AlexNet, and SqueezeNet. All three deep learning methods showed remarkable accuracy, which is improved from the previous research. The comparison of different deep learning approaches with respect to layers widens the research and gives the more clarity and accuracy and at the same time finds it time-consuming while working with multiclassification with massive dataset of ECG. The implementation of the proposed method showed an accuracy of 98.8%, 90.08%, and 91% for AlexNet, SqueezeNet, and ResNet50, respectively.}, } @article {pmid35958385, year = {2022}, author = {Jiang, S}, title = {Hotspot Mining in the Field of Library and Information Science under the Environment of Big Data.}, journal = {Journal of environmental and public health}, volume = {2022}, number = {}, pages = {2802835}, pmid = {35958385}, issn = {1687-9813}, mesh = {*Artificial Intelligence ; *Big Data ; Data Mining/methods ; }, abstract = {Currently, with the implementation of big data strategies in countries all over the world, big data has achieved vigorous development in various fields. Big data research and application practices have also rapidly attracted the attention of the library and information field. Objective. The study explored the current state of research and research hotspots of big data in the library and information field and further discussed the future research trends. Methods. In the CNKI database, 16 CSSCI source journals in the discipline of library information and digital library were selected as data sources, and the relevant literature was retrieved with the theme of "big data." The collected literature was excluded and expanded according to the citation relationship. Then, with the help of Bicomb and SPSS, co-word analysis and cluster analysis would be carried out on these literature results. Results. According to the findings of the data analysis, the research hotspots on the topic mainly focus on five major research themes, namely, big data and smart library, big data and intelligence research, data mining and cloud computing, big data and information analysis, and library innovation and services. Limitations. At present, the research scope and coverage on this topic are wide, which leads to the research still staying at the macro level. Conclusions. Big data research will remain one of the hotspots in the future. However, the most study is still limited to the perspective of library and information and has not yet analyzed the research status, research hotspots, and development trends in this field from the perspective of big data knowledge structure. Moreover, machine learning, artificial intelligence, knowledge services, AR, and VR may be new directions for future attention and development.}, } @article {pmid35957481, year = {2022}, author = {Foroughimehr, N and Vilagosh, Z and Yavari, A and Wood, A}, title = {The Impact of Base Cell Size Setup on the Finite Difference Time Domain Computational Simulation of Human Cornea Exposed to Millimeter Wave Radiation at Frequencies above 30 GHz.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35957481}, issn = {1424-8220}, mesh = {Cell Size ; Computer Simulation ; *Cornea ; Electricity ; *Electromagnetic Fields ; Humans ; }, abstract = {Mobile communication has achieved enormous technology innovations over many generations of progression. New cellular technology, including 5G cellular systems, is being deployed and making use of higher frequencies, including the Millimetre Wave (MMW) range (30-300 GHz) of the electromagnetic spectrum. Numerical computational techniques such as the Finite Difference Time Domain (FDTD) method have been used extensively as an effective approach for assessing electromagnetic fields' biological impacts. This study demonstrates the variation of the accuracy of the FDTD computational simulation system when different meshing sizes are used, by using the interaction of the critically sensitive human cornea with EM in the 30 to 100 GHz range. Different approaches of base cell size specifications were compared. The accuracy of the computation is determined by applying planar sensors showing the detail of electric field distribution as well as the absolute values of electric field collected by point sensors. It was found that manually defining the base cell sizes reduces the model size as well as the computation time. However, the accuracy of the computation decreases in an unpredictable way. The results indicated that using a cloud computing capacity plays a crucial role in minimizing the computation time.}, } @article {pmid35957453, year = {2022}, author = {Bahache, M and Tahari, AEK and Herrera-Tapia, J and Lagraa, N and Calafate, CT and Kerrache, CA}, title = {Towards an Accurate Faults Detection Approach in Internet of Medical Things Using Advanced Machine Learning Techniques.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35957453}, issn = {1424-8220}, mesh = {Humans ; Internet ; *Machine Learning ; *Wireless Technology ; }, abstract = {Remotely monitoring people's healthcare is still among the most important research topics for researchers from both industry and academia. In addition, with the Wireless Body Networks (WBANs) emergence, it becomes possible to supervise patients through an implanted set of body sensors that can communicate through wireless interfaces. These body sensors are characterized by their tiny sizes, and limited resources (power, computing, and communication capabilities), which makes these devices prone to have faults and sensible to be damaged. Thus, it is necessary to establish an efficient system to detect any fault or anomalies when receiving sensed data. In this paper, we propose a novel, optimized, and hybrid solution between machine learning and statistical techniques, for detecting faults in WBANs that do not affect the devices' resources and functionality. Experimental results illustrate that our approach can detect unwanted measurement faults with a high detection accuracy ratio that exceeds the 99.62%, and a low mean absolute error of 0.61%, clearly outperforming the existing state-of-art solutions.}, } @article {pmid35957452, year = {2022}, author = {Kim, M and Joo, S}, title = {Time-Constrained Adversarial Defense in IoT Edge Devices through Kernel Tensor Decomposition and Multi-DNN Scheduling.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35957452}, issn = {1424-8220}, abstract = {The development of deep learning technology has resulted in great contributions in many artificial intelligence services, but adversarial attack techniques on deep learning models are also becoming more diverse and sophisticated. IoT edge devices take cloud-independent on-device DNN (deep neural network) processing technology to exhibit a fast response time. However, if the computational complexity of the denoizer for adversarial noises is high, or if a single embedded GPU is shared by multiple DNN models, adversarial defense at the on-device level is bound to represent a long latency. To solve this problem, eDenoizer is proposed in this paper. First, it applies Tucker decomposition to reduce the computational amount required for convolutional kernel tensors in the denoizer. Second, eDenoizer effectively orchestrates both the denoizer and the model defended by the denoizer simultaneously. In addition, the priority of the CPU side can be projected onto the GPU which is completely priority-agnostic, so that the delay can be minimized when the denoizer and the defense target model are assigned a high priority. As a result of confirming through extensive experiments, the reduction of classification accuracy was very marginal, up to 1.78%, and the inference speed accompanied by adversarial defense was improved up to 51.72%.}, } @article {pmid35957450, year = {2022}, author = {Liutkevičius, A and Morkevičius, N and Venčkauskas, A and Toldinas, J}, title = {Distributed Agent-Based Orchestrator Model for Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35957450}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; Delivery of Health Care ; }, abstract = {Fog computing is an extension of cloud computing that provides computing services closer to user end-devices at the network edge. One of the challenging topics in fog networks is the placement of tasks on fog nodes to obtain the best performance and resource usage. The process of mapping tasks for resource-constrained devices is known as the service or fog application placement problem (SPP, FAPP). The highly dynamic fog infrastructures with mobile user end-devices and constantly changing fog nodes resources (e.g., battery life, security level) require distributed/decentralized service placement (orchestration) algorithms to ensure better resilience, scalability, and optimal real-time performance. However, recently proposed service placement algorithms rarely support user end-device mobility, constantly changing the resource availability of fog nodes and the ability to recover from fog node failures at the same time. In this article, we propose a distributed agent-based orchestrator model capable of flexible service provisioning in a dynamic fog computing environment by considering the constraints on the central processing unit (CPU), memory, battery level, and security level of fog nodes. Distributing the decision-making to multiple orchestrator fog nodes instead of relying on the mapping of a single central entity helps to spread the load and increase scalability and, most importantly, resilience. The prototype system based on the proposed orchestrator model was implemented and tested with real hardware. The results show that the proposed model is efficient in terms of response latency and computational overhead, which are minimal compared to the placement algorithm itself. The research confirms that the proposed orchestrator approach is suitable for various fog network applications when scalability, mobility, and fault tolerance must be guaranteed.}, } @article {pmid35957307, year = {2022}, author = {Ismail, L and Buyya, R}, title = {Artificial Intelligence Applications and Self-Learning 6G Networks for Smart Cities Digital Ecosystems: Taxonomy, Challenges, and Future Directions.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35957307}, issn = {1424-8220}, mesh = {*Artificial Intelligence ; Cities ; *Ecosystem ; Technology/methods ; Wireless Technology ; }, abstract = {The recent upsurge of smart cities' applications and their building blocks in terms of the Internet of Things (IoT), Artificial Intelligence (AI), federated and distributed learning, big data analytics, blockchain, and edge-cloud computing has urged the design of the upcoming 6G network generation, due to their stringent requirements in terms of the quality of services (QoS), availability, and dependability to satisfy a Service-Level-Agreement (SLA) for the end users. Industries and academia have started to design 6G networks and propose the use of AI in its protocols and operations. Published papers on the topic discuss either the requirements of applications via a top-down approach or the network requirements in terms of agility, performance, and energy saving using a down-top perspective. In contrast, this paper adopts a holistic outlook, considering the applications, the middleware, the underlying technologies, and the 6G network systems towards an intelligent and integrated computing, communication, coordination, and decision-making ecosystem. In particular, we discuss the temporal evolution of the wireless network generations' development to capture the applications, middleware, and technological requirements that led to the development of the network generation systems from 1G to AI-enabled 6G and its employed self-learning models. We provide a taxonomy of the technology-enabled smart city applications' systems and present insights into those systems for the realization of a trustworthy and efficient smart city ecosystem. We propose future research directions in 6G networks for smart city applications.}, } @article {pmid35957281, year = {2022}, author = {Alwaheidi, MKS and Islam, S}, title = {Data-Driven Threat Analysis for Ensuring Security in Cloud Enabled Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35957281}, issn = {1424-8220}, mesh = {*Cloud Computing ; Computer Security ; *Ecosystem ; }, abstract = {Cloud computing offers many benefits including business flexibility, scalability and cost savings but despite these benefits, there exist threats that require adequate attention for secure service delivery. Threats in a cloud-based system need to be considered from a holistic perspective that accounts for data, application, infrastructure and service, which can pose potential risks. Data certainly plays a critical role within the whole ecosystem and organisations should take account of and protect data from any potential threats. Due to the variation of data types, status, and location, understanding the potential security concerns in cloud-based infrastructures is more complex than in a traditional system. The existing threat modeling approaches lack the ability to analyse and prioritise data-related threats. The main contribution of the paper is a novel data-driven threat analysis (d-TM) approach for the cloud-based systems. The main motivation of d-TM is the integration of data from three levels of abstractions, i.e., management, control, and business and three phases, i.e., storage, process and transmittance, within each level. The d-TM provides a systematic flow of attack surface analysis from the user agent to the cloud service provider based on the threat layers in cloud computing. Finally, a cloud-based use case scenario was used to demonstrate the applicability of the proposed approach. The result shows that d-TM revealed four critical threats out of the seven threats based on the identified assets. The threats targeted management and business data in general, while targeting data in process and transit more specifically.}, } @article {pmid35945076, year = {2022}, author = {Jones, HE and Wilson, PB}, title = {Progress and opportunities through use of genomics in animal production.}, journal = {Trends in genetics : TIG}, volume = {38}, number = {12}, pages = {1228-1252}, doi = {10.1016/j.tig.2022.06.014}, pmid = {35945076}, issn = {0168-9525}, mesh = {Animals ; Humans ; *Animal Husbandry ; *Livestock/genetics ; Animal Welfare ; Genomics ; Genome/genetics ; }, abstract = {The rearing of farmed animals is a vital component of global food production systems, but its impact on the environment, human health, animal welfare, and biodiversity is being increasingly challenged. Developments in genetic and genomic technologies have had a key role in improving the productivity of farmed animals for decades. Advances in genome sequencing, annotation, and editing offer a means not only to continue that trend, but also, when combined with advanced data collection, analytics, cloud computing, appropriate infrastructure, and regulation, to take precision livestock farming (PLF) and conservation to an advanced level. Such an approach could generate substantial additional benefits in terms of reducing use of resources, health treatments, and environmental impact, while also improving animal health and welfare.}, } @article {pmid35942755, year = {2022}, author = {Chiang, TW and Chiang, DL and Chen, TS and Lin, FY and Shen, VRL and Wang, MC}, title = {Novel Lagrange interpolation polynomials for dynamic access control in a healthcare cloud system.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {19}, number = {9}, pages = {9200-9219}, doi = {10.3934/mbe.2022427}, pmid = {35942755}, issn = {1551-0018}, mesh = {Algorithms ; Cloud Computing ; *Computer Security ; *Confidentiality ; Delivery of Health Care ; }, abstract = {The authority of user personal health records (PHRs) is usually determined by the owner of a cloud computing system. When a PHR file is accessed, a dynamic access control algorithm must be used to authenticate the users. The proposed dynamic access control algorithm is based on a novel Lagrange interpolation polynomial with timestamps, mainly functioning to authenticate the users with key information. Moreover, the inclusion of timestamps allows user access within an approved time slot to enhance the security of the healthcare cloud system. According to the security analysis results, this healthcare cloud system can effectively resist common attacks, including external attacks, internal attacks, collaborative attacks and equation-based attacks. Furthermore, the overall computational complexity of establishing and updating the polynomials is O(n*m* (log m)[2]), which is a promising result, where m denotes the degree of $ polynomial~G\left(x, y\right) $ and n denotes the number of secure users in the hierarchy.}, } @article {pmid35942754, year = {2022}, author = {Cui, D and Huang, H and Peng, Z and Li, Q and He, J and Qiu, J and Luo, X and Ou, J and Fan, C}, title = {Next-generation 5G fusion-based intelligent health-monitoring platform for ethylene cracking furnace tube.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {19}, number = {9}, pages = {9168-9199}, doi = {10.3934/mbe.2022426}, pmid = {35942754}, issn = {1551-0018}, mesh = {*Artificial Intelligence ; Automation ; Ethylenes ; *Intelligence ; }, abstract = {This study aimed to develop a 5G + "mixed computing" + deep learning-based next-generation intelligent health-monitoring platform for an ethylene cracking furnace tube based on 5G communication technology, with the goal of improving the health management level of the key component of ethylene production, that is, the cracking furnace tube, and focusing on the key common technical difficulties of ethylene production of tube outer-surface temperature sensing and tube slagging diagnosis. It also integrated the edge-fog-cloud "mixed computing" technology and deep learning technology in artificial intelligence, which had a higher degree in the research and development of automation and intelligence, and was more versatile in an industrial environment. The platform included a 5G-based tube intelligent temperature-measuring device, a 5G-based intelligent peep door gearing, a 5G-based edge-fog-cloud collaboration mechanism, and a mixed deep learning-related application. The platform enhanced the automation and intelligence of the enterprise, which could not only promote the quality and efficiency of the enterprise but also protect the safe operation of the cracking furnace device and lead the technological progress and transformation and upgrading of the industry through the application.}, } @article {pmid35942147, year = {2022}, author = {Zhang, T and Han, Q and Zhang, Z}, title = {Sport Resource Classification Algorithm for Health Promotion Based on Cloud Computing: Rhythmic Gymnastics' Example.}, journal = {Journal of environmental and public health}, volume = {2022}, number = {}, pages = {2587169}, pmid = {35942147}, issn = {1687-9813}, mesh = {Algorithms ; *Cloud Computing ; *Gymnastics ; Health Promotion ; }, abstract = {In the processing of rhythmic gymnastics resources, there are inefficiency problems such as confusion of teaching resources and lack of individuation. To improve the health access to teaching resource data, such as videos and documents, this study proposes a cloud computing-based personalized rhythmic gymnastics teaching resource classification algorithm for health promotion. First, personalized rhythmic gymnastics teaching resource database is designed based on cloud computing technology, and the teaching resources in the database are preprocessed to obtain a meta-sample set. Then, the characteristics of teaching resources are selected by the information acquisition method, and a vector space model is established to calculate the similarity of teaching resources. Finally, the distance-weighted k-NN method is used to classify the teaching resources for health promotion. The experimental results show that the classification accuracy of the proposed algorithm is high, the recall rate is high, and the F-measure value is high, which verifies the effectiveness of the algorithm.}, } @article {pmid35937323, year = {2022}, author = {Huang, C and Li, W and Zhang, Z and Hua, X and Yang, J and Ye, J and Duan, L and Liang, X and Yang, W}, title = {An Intelligent Rice Yield Trait Evaluation System Based on Threshed Panicle Compensation.}, journal = {Frontiers in plant science}, volume = {13}, number = {}, pages = {900408}, pmid = {35937323}, issn = {1664-462X}, abstract = {High-throughput phenotyping of yield-related traits is meaningful and necessary for rice breeding and genetic study. The conventional method for rice yield-related trait evaluation faces the problems of rice threshing difficulties, measurement process complexity, and low efficiency. To solve these problems, a novel intelligent system, which includes an integrated threshing unit, grain conveyor-imaging units, threshed panicle conveyor-imaging unit, and specialized image analysis software has been proposed to achieve rice yield trait evaluation with high throughput and high accuracy. To improve the threshed panicle detection accuracy, the Region of Interest Align, Convolution Batch normalization activation with Leaky Relu module, Squeeze-and-Excitation unit, and optimal anchor size have been adopted to optimize the Faster-RCNN architecture, termed 'TPanicle-RCNN,' and the new model achieved F1 score 0.929 with an increase of 0.044, which was robust to indica and japonica varieties. Additionally, AI cloud computing was adopted, which dramatically reduced the system cost and improved flexibility. To evaluate the system accuracy and efficiency, 504 panicle samples were tested, and the total spikelet measurement error decreased from 11.44 to 2.99% with threshed panicle compensation. The average measuring efficiency was approximately 40 s per sample, which was approximately twenty times more efficient than manual measurement. In this study, an automatic and intelligent system for rice yield-related trait evaluation was developed, which would provide an efficient and reliable tool for rice breeding and genetic research.}, } @article {pmid35931501, year = {2022}, author = {Kumon, RE}, title = {Teaching an advanced undergraduate acoustics laboratory without a laboratory: Course developments enabling teaching during the COVID-19 pandemic.}, journal = {The Journal of the Acoustical Society of America}, volume = {152}, number = {1}, pages = {9}, doi = {10.1121/10.0011808}, pmid = {35931501}, issn = {1520-8524}, mesh = {Acoustics ; *COVID-19/epidemiology ; Humans ; Learning ; Pandemics ; Students ; Teaching ; }, abstract = {This paper describes ongoing developments to an advanced laboratory course at Kettering University, which is targeted to students in engineering and engineering physics and emphasizes theoretical, computational, and experimental components in the context of airborne acoustics and modal testing [cf. D. A. Russell and D. O. Ludwigsen, J. Acoust. Soc. Am. 131, 2515-2524 (2012)]. These developments have included a transition to electronic laboratory notebooks and cloud-based computing resources, incorporation of updated hardware and software, and creation and testing of a multiple-choice assessment instrument for the course. When Kettering University suddenly shifted to exclusively remote teaching in March 2020 due to the COVID-19 pandemic, many of these changes proved to be essential for enabling rapid adaptation to a situation in which a laboratory was not available for the course. Laboratory activities were rewritten by crowdsourcing archived data, videos were incorporated to illustrate dynamic phenomena, and computer simulations were used to retain student interactivity. The comparison of multiple measures, including the assessment instrument, team-based grades on project papers, and individual grades on final exams, indicates that most students were successful at learning the course material and adapting to work on team-based projects in the midst of challenging remote learning conditions.}, } @article {pmid35930042, year = {2022}, author = {Mokhtarzadeh, H and Jiang, F and Zhao, S and Malekipour, F}, title = {OpenColab project: OpenSim in Google colaboratory to explore biomechanics on the web.}, journal = {Computer methods in biomechanics and biomedical engineering}, volume = {}, number = {}, pages = {1-9}, doi = {10.1080/10255842.2022.2104607}, pmid = {35930042}, issn = {1476-8259}, abstract = {OpenSim is an open-source biomechanical package with a variety of applications. It is available for many users with bindings in MATLAB, Python, and Java via its application programming interfaces (APIs). Although the developers described well the OpenSim installation on different operating systems (Windows, Mac, and Linux), it is time-consuming and complex since each operating system requires a different configuration. This project aims to demystify the development of neuro-musculoskeletal modeling in OpenSim with zero configuration on any operating system for installation (thus cross-platform), easy to share models while accessing free graphical processing units (GPUs) on a web-based platform of Google Colab. To achieve this, OpenColab was developed where OpenSim source code was used to build a Conda package that can be installed on the Google Colab with only one block of code in less than 7 min. To use OpenColab, one requires a connection to the internet and a Gmail account. Moreover, OpenColab accesses vast libraries of machine learning methods available within free Google products, e.g. TensorFlow. Next, we performed an inverse problem in biomechanics and compared OpenColab results with OpenSim graphical user interface (GUI) for validation. The outcomes of OpenColab and GUI matched well (r≥0.82). OpenColab takes advantage of the zero-configuration of cloud-based platforms, accesses GPUs, and enables users to share and reproduce modeling approaches for further validation, innovative online training, and research applications. Step-by-step installation processes and examples are available at: https://simtk.org/projects/opencolab.}, } @article {pmid35928494, year = {2022}, author = {Amanat, A and Rizwan, M and Maple, C and Zikria, YB and Almadhor, AS and Kim, SW}, title = {Blockchain and cloud computing-based secure electronic healthcare records storage and sharing.}, journal = {Frontiers in public health}, volume = {10}, number = {}, pages = {938707}, pmid = {35928494}, issn = {2296-2565}, mesh = {*Blockchain ; Cloud Computing ; Delivery of Health Care ; Electronic Health Records ; Electronics ; Humans ; }, abstract = {Healthcare information is essential for both service providers and patients. Further secure sharing and maintenance of Electronic Healthcare Records (EHR) are imperative. EHR systems in healthcare have traditionally relied on a centralized system (e.g., cloud) to exchange health data across healthcare stakeholders, which may expose private and sensitive patient information. EHR has struggled to meet the demands of several stakeholders and systems in terms of safety, isolation, and other regulatory constraints. Blockchain is a distributed, decentralized ledger technology that can provide secured, validated, and immutable data sharing facilities. Blockchain creates a distributed ledger system using techniques of cryptography (hashes) that are consistent and permit actions to be carried out in a distributed manner without needing a centralized authority. Data exploitation is difficult and evident in a blockchain network due to its immutability. We propose an architecture based on blockchain technology that authenticates the user identity using a Proof of Stake (POS) cryptography consensus mechanism and Secure Hash Algorithm (SHA256) to secure EHR sharing among different electronic healthcare systems. An Elliptic Curve Digital Signature Algorithm (ECDSA) is used to verify EHR sensors to assemble and transmit data to cloud infrastructure. Results indicate that the proposed solution performs exceptionally well when compared with existing solutions, which include Proof-Of-Work (POW), Secure Hash Algorithm (SHA-1), and Message Digest (MD5) in terms of power consumption, authenticity, and security of healthcare records.}, } @article {pmid35923220, year = {2022}, author = {Qi, L and Wu, F and Ge, Z and Sun, Y}, title = {DeepMatch: Toward Lightweight in Point Cloud Registration.}, journal = {Frontiers in neurorobotics}, volume = {16}, number = {}, pages = {891158}, pmid = {35923220}, issn = {1662-5218}, abstract = {From source to target, point cloud registration solves for a rigid body transformation that aligns the two point clouds. IterativeClosest Point (ICP) and other traditional algorithms require a long registration time and are prone to fall into local optima. Learning-based algorithms such as Deep ClosestPoint (DCP) perform better than those traditional algorithms and escape from local optimality. However, they are still not perfectly robust and rely on the complex model design due to the extracted local features are susceptible to noise. In this study, we propose a lightweight point cloud registration algorithm, DeepMatch. DeepMatch extracts a point feature for each point, which is a spatial structure composed of each point itself, the center point of the point cloud, and the farthest point of each point. Because of the superiority of this per-point feature, the computing resources and time required by DeepMatch to complete the training are less than one-tenth of other learning-based algorithms with similar performance. In addition, experiments show that our algorithm achieves state-of-the-art (SOTA) performance on both clean, with Gaussian noise and unseen category datasets. Among them, on the unseen categories, compared to the previous best learning-based point cloud registration algorithms, the registration error of DeepMatch is reduced by two orders of magnitude, achieving the same performance as on the categories seen in training, which proves DeepMatch is generalizable in point cloud registration tasks. Finally, only our DeepMatch completes 100% recall on all three test sets.}, } @article {pmid35922695, year = {2022}, author = {Pouya, S and Aghlmand, M}, title = {Evaluation of urban green space per capita with new remote sensing and geographic information system techniques and the importance of urban green space during the COVID-19 pandemic.}, journal = {Environmental monitoring and assessment}, volume = {194}, number = {9}, pages = {633}, pmid = {35922695}, issn = {1573-2959}, mesh = {*COVID-19/epidemiology ; Cities ; Environmental Monitoring/methods ; *Geographic Information Systems ; Humans ; Pandemics ; Parks, Recreational ; Remote Sensing Technology ; Urbanization ; }, abstract = {A recently conducted study by the Centers for Disease Control and Prevention encouraged access to urban green space for the public over the prevalence of COVID-19 in that exposure to urban green space can positively affect the physical and mental health, including the reduction rate of heart disease, obesity, stress, stroke, and depression. COVID-19 has foregrounded the inadequacy of green space in populated cities. It has also highlighted the extant inequities so as to unequal access to urban green space both quantitatively and qualitatively. In this regard, it seems that one of the problems related to Malatya is the uncoordinated distribution of green space in different parts of the city. Therefore, knowing the quantity and quality of these spaces in each region can play an effective role in urban planning. The aim of the present study has been to evaluate urban green space per capita and to investigate its distribution based on the population of the districts of Battalgazi county in Malatya city through developing an integrated methodology (remote sensing and geographic information system). Accordingly, in Google Earth Engine by images of Sentinel-1 and PlanetScope satellites, it was calculated different indexes (NDVI, EVI, PSSR, GNDVI, and NDWI). The data set was prepared and then by combining different data, classification was performed according to support vector machine algorithm. From the landscaping maps obtained, the map was selected with the highest accuracy (overall accuracy: 94.43; and kappa coefficient: 90.5). Finally, by the obtained last map, the distribution of urban green space per capita and their functions in Battalgazi county and its districts were evaluated. The results of the study showed that the existing urban green spaces in the Battalgazi/Malatya were not distributed evenly on the basis of the districts. The per capita of urban green space is twenty-four regions which is more than 9m[2] and in twenty-three ones is less than 9m[2]. The recommendation of this study was that Türkiye city planners and landscape designers should replan and redesign the quality and equal distribution of urban green spaces, especially during and following COVID-19 pandemic. Additionally, drawing on the Google Earth Engine cloud system, which has revolutionized GIS and remote sensing, is recommended to be used in land use land cover modeling. It is straightforward to access information and analyze them quickly in Google Earth Engine. The published codes in this study makes it possible to conduct further relevant studies.}, } @article {pmid35920716, year = {2022}, author = {Petrović, D and Scott, JS and Bodnarchuk, MS and Lorthioir, O and Boyd, S and Hughes, GM and Lane, J and Wu, A and Hargreaves, D and Robinson, J and Sadowski, J}, title = {Virtual Screening in the Cloud Identifies Potent and Selective ROS1 Kinase Inhibitors.}, journal = {Journal of chemical information and modeling}, volume = {62}, number = {16}, pages = {3832-3843}, doi = {10.1021/acs.jcim.2c00644}, pmid = {35920716}, issn = {1549-960X}, mesh = {*Carcinoma, Non-Small-Cell Lung ; Cloud Computing ; Drug Evaluation, Preclinical ; Humans ; *Lung Neoplasms ; Molecular Docking Simulation ; Prospective Studies ; Protein Kinase Inhibitors/chemistry/pharmacology ; Protein-Tyrosine Kinases ; Proto-Oncogene Proteins ; Receptor Protein-Tyrosine Kinases ; }, abstract = {ROS1 rearrangements account for 1-2% of non-small cell lung cancer patients, yet there are no specifically designed, selective ROS1 therapies in the clinic. Previous knowledge of potent ROS1 inhibitors with selectivity over TrkA, a selected antitarget, enabled virtual screening as a hit finding approach in this project. The ligand-based virtual screening was focused on identifying molecules with a similar 3D shape and pharmacophore to the known actives. To that end, we turned to the AstraZeneca virtual library, estimated to cover 10[15] synthesizable make-on-demand molecules. We used cloud computing-enabled FastROCS technology to search the enumerated 10[10] subset of the full virtual space. A small number of specific libraries were prioritized based on the compound properties and a medicinal chemistry assessment and further enumerated with available building blocks. Following the docking evaluation to the ROS1 structure, the most promising hits were synthesized and tested, resulting in the identification of several potent and selective series. The best among them gave a nanomolar ROS1 inhibitor with over 1000-fold selectivity over TrkA and, from the preliminary established SAR, these have the potential to be further optimized. Our prospective study describes how conceptually simple shape-matching approaches can identify potent and selective compounds by searching ultralarge virtual libraries, demonstrating the applicability of such workflows and their importance in early drug discovery.}, } @article {pmid35912308, year = {2022}, author = {Qie, D}, title = {The Relevance of Virtual-Assisted Early Childhood Education and Occupational Psychotherapy Based on Emotional Interaction.}, journal = {Occupational therapy international}, volume = {2022}, number = {}, pages = {2785987}, pmid = {35912308}, issn = {1557-0703}, mesh = {Child, Preschool ; Emotions ; Health Education ; Humans ; *Occupational Therapy ; Psychotherapy ; School Teachers ; }, abstract = {This paper presents an in-depth study and analysis of the relevance of early childhood education to occupational psychotherapy using a virtual-assisted affective interaction approach. Starting from the educational theory of interactive cognitive psychology, the theoretical basis for parent-child picture book education for interactive learning is explored, as well as the theoretical development after the introduction of AR technology. Firstly, the analysis of young children's emotions involves massive image processing, and the use of cloud computing architecture leads to high latency, while young children's safety is a latency-sensitive service. Secondly, face recognition accuracy based on static images is not high due to problems such as inconspicuous facial features of toddlers and low-quality kindergarten surveillance videos. In this paper, a face identity correction model based on location features is proposed and the superiority of the model is demonstrated through experiments. Finally, this paper analyzes and mines the emotional data of young children. The level of kindergarten teachers' awareness of early childhood mental health education generally showed an upward trend as their titles rose, and there were significant differences in the seven dimensions of early childhood mental health, the purpose and meaning of early childhood mental health education, implementers, targets, content, pathways, and effects; significant differences existed between teachers of different kindergarten natures, and there were significant differences in the purpose and meaning of early childhood mental health education, implementers, targets, content, pathways, effects, and mental health education for young children. Therefore, this paper proposes a face identity correction model based on position information, which considers both the correlation between pixel values in the spatial domain and the correlation between frames in the temporal domain. This paper has developed an emotion analysis system for kindergartens and put it into use in kindergartens to meet the needs of monitoring the safety of young children and evaluating early childhood education and has received good feedback from users, demonstrating the effectiveness of the system.}, } @article {pmid35910077, year = {2022}, author = {Lutnick, B and Manthey, D and Becker, JU and Zuckerman, JE and Rodrigues, L and Jen, KY and Sarder, P}, title = {A tool for federated training of segmentation models on whole slide images.}, journal = {Journal of pathology informatics}, volume = {13}, number = {}, pages = {100101}, pmid = {35910077}, issn = {2229-5089}, abstract = {The largest bottleneck to the development of convolutional neural network (CNN) models in the computational pathology domain is the collection and curation of diverse training datasets. Training CNNs requires large cohorts of image data, and model generalizability is dependent on training data heterogeneity. Including data from multiple centers enhances the generalizability of CNN-based models, but this is hindered by the logistical challenges of sharing medical data. In this paper, we explore the feasibility of training our recently developed cloud-based segmentation tool (Histo-Cloud) using federated learning. Using a dataset of renal tissue biopsies we show that federated training to segment interstitial fibrosis and tubular atrophy (IFTA) using datasets from three institutions is not found to be different from a training by pooling the data on one server when tested on a fourth (holdout) institution's data. Further, training a model to segment glomeruli for a federated dataset (split by staining) demonstrates similar performance.}, } @article {pmid35909867, year = {2022}, author = {Zhang, H and Feng, Y and Wang, L}, title = {Cloud Computing to Tourism Economic Data Scheduling Algorithm under the Background of Image and Video.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3948221}, pmid = {35909867}, issn = {1687-5273}, mesh = {Algorithms ; *Cloud Computing ; *Tourism ; }, abstract = {With the rapid development of image video and tourism economy, tourism economic data are gradually becoming big data. Therefore, how to schedule between data has become a hot topic. This paper first summarizes the research results on image video, cloud computing, tourism economy, and data scheduling algorithms. Secondly, the origin, structure, development, and service types of cloud computing are expounded in detail. And in order to solve the problem of tourism economic data scheduling, this paper regards the completion time and cross-node transmission delay as the constraints of tourism economic data scheduling. The constraint model of data scheduling is established, the fitness function is improved on the basis of an artificial immune algorithm combined with the constraint model, and the directional recombination of excellent antibodies is carried out by using the advantages of gene recombination so as to obtain the optimal solution to the problem more appropriately. When the resource node scale is 100, the response time of EDSA is 107.92 seconds.}, } @article {pmid35909865, year = {2022}, author = {Yan, S and Shi, L and Wang, L}, title = {Influence of the Urban Built Environment on Physical and Mental Health of the Elderly under the Background of Big Data.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4266723}, pmid = {35909865}, issn = {1687-5273}, mesh = {Aged ; *Big Data ; Built Environment ; Cities ; City Planning ; Humans ; Male ; *Mental Health ; }, abstract = {With the advent of the information technology revolution and the Internet era, information technology is gradually occupying an important position and becoming an important strategic factor in economic development. As an emerging technology that has been developing continuously in recent years, big data is becoming an important industry to improve the innovation and development of the urban economy. Like AI technology, cloud computing, and the Internet, big data has become an important application technology for economic growth and economic efficiency improvement in today's world. It is an effective means of progress and development in a region and an important strategic resource. As a new technology, big data has attracted more and more attention from all walks of life. Many companies have turned their attention to developing big data for economic benefits. "Enjoy your old age" is the yearning of every old man and his family. In recent years, the national level has been committed to "creating an urban built environment for the elderly to achieve healthy aging." From the perspective of promoting the physical and mental health of the elderly, this paper analyzes the impact of the urban built environment on the physical and mental health of the elderly based on the needs of the elderly and puts forward countermeasures and suggestions based on the current status and existing problems of the urban built environment for the elderly. Based on the combined data analysis method and technology in big data, this paper conducted a field questionnaire survey on a total of 4,000 elderly people in urban and rural areas by means of the questionnaire survey. It is found that the existing problems of the built environment in the old cities include scattered content, one-sided understanding, and rigid design. According to the problems, the solutions of building consensus, paying attention to planning, combining urban characteristics, and the joint efforts of all sectors of society are put forward. And programming tools are used to combine formulas and analyze related data in detail. The analysis results show that the physical and mental health index of the elderly is highly correlated with factors such as changes in the consensus degree of the urban built environment, urban built environment planning, urban built environment policy support, and multiparty efforts in the urban built environment. Changes show a positive change.}, } @article {pmid35903800, year = {2022}, author = {Mishra, N and Singh, RK and Yadav, SK}, title = {Detection of DDoS Vulnerability in Cloud Computing Using the Perplexed Bayes Classifier.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9151847}, pmid = {35903800}, issn = {1687-5273}, mesh = {*Algorithms ; Bayes Theorem ; *Cloud Computing ; Machine Learning ; }, abstract = {Cloud computing security has been a critical issue with its increase in demand. One of the most challenging problems in cloud computing is detecting distributed denial-of-service (DDoS) attacks. The attack detection framework for the DDoS attack is tricky because of its nonlinear nature of interruption activities, atypical system traffic behaviour, and many features in the problem space. As a result, creating defensive solutions against these attacks is critical for mainstream cloud computing adoption. In this novel research, by using performance parameters, perplexed-based classifiers with and without feature selection will be compared with the existing machine learning algorithms such as naïve Bayes and random forest to prove the efficacy of the perplexed-based classification algorithm. Comparing the performance parameters like accuracy, sensitivity, and specificity, the proposed algorithm has an accuracy of 99%, which is higher than the existing algorithms, proving that the proposed algorithm is highly efficient in detecting the DDoS attacks in cloud computing systems. To extend our research in the area of nature-inspired computing, we compared our perplexed Bayes classifier feature selection with nature-inspired feature selection like genetic algorithm (GA) and particle swarm optimization (PSO) and found that our classifier is highly efficient in comparison with GA and PSO and their accuracies are 2% and 8%, respectively, less than those of perplexed Bayes classifier.}, } @article {pmid35901084, year = {2022}, author = {Ali-Eldin, AMT}, title = {A hybrid trust computing approach for IoT using social similarity and machine learning.}, journal = {PloS one}, volume = {17}, number = {7}, pages = {e0265658}, pmid = {35901084}, issn = {1932-6203}, mesh = {Algorithms ; Humans ; Machine Learning ; *Privacy ; *Trust ; }, abstract = {Every year, millions of new devices are added to the Internet of things, which has both great benefits and serious security risks for user data privacy. It is the device owners' responsibility to ensure that the ownership settings of Internet of things devices are maintained, allowing them to communicate with other user devices autonomously. The ultimate goal of the future Internet of Things is for it to be able to make decisions on its own, without the need for human intervention. Therefore, trust computing and prediction have become more vital in the processing and handling of data as well as in the delivery of services. In this paper, we compute trust in social IoT scenarios using a hybrid approach that combines a distributed computation technique and a global machine learning approach. The approach considers social similarity while assessing other users' ratings and utilize a cloud-based architecture. Further, we propose a dynamic way to aggregate the different computed trust values. According to the results of the experimental work, it is shown that the proposed approaches outperform related work. Besides, it is shown that the use of machine learning provides slightly better performance than the computing model. Both proposed approaches were found successful in degrading malicious ratings without the need for more complex algorithms.}, } @article {pmid35898787, year = {2022}, author = {Lin, K}, title = {Big Data Technology in the Macrodecision-Making Model of Regional Industrial Economic Information Applied Research.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7400797}, pmid = {35898787}, issn = {1687-5273}, mesh = {*Artificial Intelligence ; *Big Data ; Data Mining ; Industry ; Technology ; }, abstract = {In the era of Internet +, modern industry has developed rapidly, the network economy has promoted the great development of the industrial economy, and the traditional industrial economic statistics method has not been suitable for the development needs of modern enterprises. In today's society, it can be described as the era of big data, the use of big data technology for industrial economic statistics is needed for the development of industrial modernization, and it is also a new requirement for industrial economic statistics put forward by social development. With the wide application of Internet of Things, cloud computing, mobile Internet, remote sensing, and geographic information technology in the economic field, precise economic policies have gradually developed and matured. Especially for different industries in the regional economy, according to the big data in the region, the big data mining technology and analysis technology can be used to obtain the development situation and future trend of the industrial economy in a timely and effective manner. Applying big data technology to macrodecision of regional economic information is an effective way to make macrodecision of current economy. Based on this background, this paper proposes a macroeconomic decision-making method for regional industries based on big data technology. Using data mining technology, time series data analysis methods combined with artificial intelligence analysis, the development trend of regional industries is obtained, and then the development trend of the industry is obtained. Development makes macroeconomic decisions. Taking agriculture as an example, the most popular analysis of the price trend of a certain agricultural product provides an effective reference for the development strategy of this agricultural product. The results show that the method proposed in this paper can effectively apply big data technology to the macrodecision-making of regional industrial economy. And it has better promotion significance.}, } @article {pmid35898480, year = {2022}, author = {Gao, S}, title = {Network Security Problems and Countermeasures of Hospital Information System after Going to the Cloud.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {9725741}, pmid = {35898480}, issn = {1748-6718}, mesh = {*Cloud Computing ; *Computer Security ; Delivery of Health Care ; *Hospital Information Systems ; Hospitals ; Humans ; }, abstract = {In the current social context, information technology, network technology, and cloud computing have been widely used in all walks of life. The analysis of the specific application results of progressive technology shows that the use of technology has changed the working state of various industries and improved the work efficiency and quality of the industry. It should be noted that although the application of some technologies will bring many positive belongings, the potential risks brought by them cannot be ignored. As far as the hospital is concerned, the information system using cloud computing technology can make better use of the hospital's information data, but after the information system is on the cloud, new problems will appear in network security, resulting in the leakage of hospital patient information or research information. Based on this, in practice, it is necessary to analyze the network security problems after the hospital information system goes to the cloud and build and implement the corresponding strategies. The author analyzes and discusses the corresponding contents through work practice and combined with previous articles, in order to provide guidance and help for peers.}, } @article {pmid35897994, year = {2022}, author = {Wang, B and Ben, K and Lin, H and Zuo, M and Zhang, F}, title = {EP-ADTA: Edge Prediction-Based Adaptive Data Transfer Algorithm for Underwater Wireless Sensor Networks (UWSNs).}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35897994}, issn = {1424-8220}, abstract = {The underwater wireless sensor network is an important component of the underwater three-dimensional monitoring system. Due to the high bit error rate, high delay, low bandwidth, limited energy, and high dynamic of underwater networks, it is very difficult to realize efficient and reliable data transmission. Therefore, this paper posits that it is not enough to design the routing algorithm only from the perspective of the transmission environment; the comprehensive design of the data transmission algorithm should also be combined with the application. An edge prediction-based adaptive data transmission algorithm (EP-ADTA) is proposed that can dynamically adapt to the needs of underwater monitoring applications and the changes in the transmission environment. EP-ADTA uses the end-edge-cloud architecture to define the underwater wireless sensor networks. The algorithm uses communication nodes as the agents, realizes the monitoring data prediction and compression according to the edge prediction, dynamically selects the transmission route, and controls the data transmission accuracy based on reinforcement learning. The simulation results show that EP-ADTA can meet the accuracy requirements of underwater monitoring applications, dynamically adapt to the changes in the transmission environment, and ensure efficient and reliable data transmission in underwater wireless sensor networks.}, } @article {pmid35891110, year = {2022}, author = {Qiu, S and Li, A}, title = {Application of Chaos Mutation Adaptive Sparrow Search Algorithm in Edge Data Compression.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35891110}, issn = {1424-8220}, mesh = {Algorithms ; Cloud Computing ; *Data Compression ; Mutation ; }, abstract = {In view of the large amount of data collected by an edge server, when compression technology is used for data compression, data classification accuracy is reduced and data loss is large. This paper proposes a data compression algorithm based on the chaotic mutation adaptive sparrow search algorithm (CMASSA). Constructing a new fitness function, CMASSA optimizes the hyperparameters of the Convolutional Auto-Encoder Network (CAEN) on the cloud service center, aiming to obtain the optimal CAEN model. The model is sent to the edge server to compress the data at the lower level of edge computing. The effectiveness of CMASSA performance is tested on ten high-dimensional benchmark functions, and the results show that CMASSA outperforms other comparison algorithms. Subsequently, experiments are compared with other literature on the Multi-class Weather Dataset (MWD). Experiments show that under the premise of ensuring a certain compression ratio, the proposed algorithm not only has better accuracy in classification tasks than other algorithms but also maintains a high degree of data reconstruction.}, } @article {pmid35891007, year = {2022}, author = {Alatoun, K and Matrouk, K and Mohammed, MA and Nedoma, J and Martinek, R and Zmij, P}, title = {A Novel Low-Latency and Energy-Efficient Task Scheduling Framework for Internet of Medical Things in an Edge Fog Cloud System.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35891007}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; Computer Simulation ; Electrocardiography ; Internet ; }, abstract = {In healthcare, there are rapid emergency response systems that necessitate real-time actions where speed and efficiency are critical; this may suffer as a result of cloud latency because of the delay caused by the cloud. Therefore, fog computing is utilized in real-time healthcare applications. There are still limitations in response time, latency, and energy consumption. Thus, a proper fog computing architecture and good task scheduling algorithms should be developed to minimize these limitations. In this study, an Energy-Efficient Internet of Medical Things to Fog Interoperability of Task Scheduling (EEIoMT) framework is proposed. This framework schedules tasks in an efficient way by ensuring that critical tasks are executed in the shortest possible time within their deadline while balancing energy consumption when processing other tasks. In our architecture, Electrocardiogram (ECG) sensors are used to monitor heart health at home in a smart city. ECG sensors send the sensed data continuously to the ESP32 microcontroller through Bluetooth (BLE) for analysis. ESP32 is also linked to the fog scheduler via Wi-Fi to send the results data of the analysis (tasks). The appropriate fog node is carefully selected to execute the task by giving each node a special weight, which is formulated on the basis of the expected amount of energy consumed and latency in executing this task and choosing the node with the lowest weight. Simulations were performed in iFogSim2. The simulation outcomes show that the suggested framework has a superior performance in reducing the usage of energy, latency, and network utilization when weighed against CHTM, LBS, and FNPA models.}, } @article {pmid35890918, year = {2022}, author = {Khanna, A and Sah, A and Bolshev, V and Burgio, A and Panchenko, V and Jasiński, M}, title = {Blockchain-Cloud Integration: A Survey.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35890918}, issn = {1424-8220}, mesh = {*Blockchain ; Cloud Computing ; Computer Security ; Data Management ; Technology ; }, abstract = {Over the last couple of years, Blockchain technology has emerged as a game-changer for various industry domains, ranging from FinTech and the supply chain to healthcare and education, thereby enabling them to meet the competitive market demands and end-user requirements. Blockchain technology gained its popularity after the massive success of Bitcoin, of which it constitutes the backbone technology. While blockchain is still emerging and finding its foothold across domains, Cloud computing is comparatively well defined and established. Organizations such as Amazon, IBM, Google, and Microsoft have extensively invested in Cloud and continue to provide a plethora of related services to a wide range of customers. The pay-per-use policy and easy access to resources are some of the biggest advantages of Cloud, but it continues to face challenges like data security, compliance, interoperability, and data management. In this article, we present the advantages of integrating Cloud and blockchain technology along with applications of Blockchain-as-a-Service. The article presents itself with a detailed survey illustrating recent works combining the amalgamation of both technologies. The survey also talks about blockchain-cloud services being offered by existing Cloud Service providers.}, } @article {pmid35890848, year = {2022}, author = {Khalil, U and Malik, OA and Uddin, M and Chen, CL}, title = {A Comparative Analysis on Blockchain versus Centralized Authentication Architectures for IoT-Enabled Smart Devices in Smart Cities: A Comprehensive Review, Recent Advances, and Future Research Directions.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35890848}, issn = {1424-8220}, mesh = {Artificial Intelligence ; *Blockchain ; Cities ; Computer Security ; *Internet of Things ; }, abstract = {Smart devices have become an essential part of the architectures such as the Internet of Things (IoT), Cyber-Physical Systems (CPSs), and Internet of Everything (IoE). In contrast, these architectures constitute a system to realize the concept of smart cities and, ultimately, a smart planet. The adoption of these smart devices expands to different cyber-physical systems in smart city architecture, i.e., smart houses, smart healthcare, smart transportation, smart grid, smart agriculture, etc. The edge of the network connects these smart devices (sensors, aggregators, and actuators) that can operate in the physical environment and collects the data, which is further used to make an informed decision through actuation. Here, the security of these devices is immensely important, specifically from an authentication standpoint, as in the case of unauthenticated/malicious assets, the whole infrastructure would be at stake. We provide an updated review of authentication mechanisms by categorizing centralized and distributed architectures. We discuss the security issues regarding the authentication of these IoT-enabled smart devices. We evaluate and analyze the study of the proposed literature schemes that pose authentication challenges in terms of computational costs, communication overheads, and models applied to attain robustness. Hence, lightweight solutions in managing, maintaining, processing, and storing authentication data of IoT-enabled assets are an urgent need. From an integration perspective, cloud computing has provided strong support. In contrast, decentralized ledger technology, i.e., blockchain, light-weight cryptosystems, and Artificial Intelligence (AI)-based solutions, are the areas with much more to explore. Finally, we discuss the future research challenges, which will eventually help address the ambiguities for improvement.}, } @article {pmid35890825, year = {2022}, author = {Nakazato, J and Li, Z and Maruta, K and Kubota, K and Yu, T and Tran, GK and Sakaguchi, K and Masuko, S}, title = {MEC/Cloud Orchestrator to Facilitate Private/Local Beyond 5G with MEC and Proof-of-Concept Implementation.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35890825}, issn = {1424-8220}, mesh = {*Cloud Computing ; Communication ; *Ecosystem ; }, abstract = {The emergence of 5G-IoT opens up unprecedented connectivity possibilities for new service use cases and players. Multi-access edge computing (MEC) is a crucial technology and enabler for Beyond 5G, supporting next-generation communications with service guarantees (e.g., ultra-low latency, high security) from an end-to-end (E2E) perspective. On the other hand, one notable advance is the platform that supports virtualization from RAN to applications. Deploying Radio Access Networks (RAN) and MEC, including third-party applications on virtualization platforms, and renting other equipment from legacy telecom operators will make it easier for new telecom operators, called Private/Local Telecom Operators, to join the ecosystem. Our preliminary studies have discussed the ecosystem for private and local telecom operators regarding business potential and revenue and provided numerical results. What remains is how Private/Local Telecom Operators can manage and deploy their MEC applications. In this paper, we designed the architecture for fully virtualized MEC 5G cellular networks with local use cases (e.g., stadiums, campuses). We propose an MEC/Cloud Orchestrator implementation for intelligent deployment selection. In addition, we provide implementation schemes in several cases held by either existing cloud owners or private and local operators. In order to verify the proposal's feasibility, we designed the system level in E2E and constructed a Beyond 5G testbed at the Ōokayama Campus of the Tokyo Institute of Technology. Through proof-of-concept in the outdoor field, the proposed system's feasibility is verified by E2E performance evaluation. The verification results prove that the proposed approach can reduce latency and provide a more stable throughput than conventional cloud services.}, } @article {pmid35890793, year = {2022}, author = {Wang, Q and Jiang, L and Sun, X and Zhao, J and Deng, Z and Yang, S}, title = {An Efficient LiDAR Point Cloud Map Coding Scheme Based on Segmentation and Frame-Inserting Network.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35890793}, issn = {1424-8220}, abstract = {In this article, we present an efficient coding scheme for LiDAR point cloud maps. As a point cloud map consists of numerous single scans spliced together, by recording the time stamp and quaternion matrix of each scan during map building, we cast the point cloud map compression into the point cloud sequence compression problem. The coding architecture includes two techniques: intra-coding and inter-coding. For intra-frames, a segmentation-based intra-prediction technique is developed. For inter-frames, an interpolation-based inter-frame coding network is explored to remove temporal redundancy by generating virtual point clouds based on the decoded frames. We only need to code the difference between the original LiDAR data and the intra/inter-predicted point cloud data. The point cloud map can be reconstructed according to the decoded point cloud sequence and quaternion matrices. Experiments on the KITTI dataset show that the proposed coding scheme can largely eliminate the temporal and spatial redundancies. The point cloud map can be encoded to 1/24 of its original size with 2 mm-level precision. Our algorithm also obtains better coding performance compared with the octree and Google Draco algorithms.}, } @article {pmid35890787, year = {2022}, author = {Hussein, M and Mohammed, YS and Galal, AI and Abd-Elrahman, E and Zorkany, M}, title = {Smart Cognitive IoT Devices Using Multi-Layer Perception Neural Network on Limited Microcontroller.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35890787}, issn = {1424-8220}, mesh = {Algorithms ; *Artificial Intelligence ; Cognition ; *Internet of Things ; Neural Networks, Computer ; }, abstract = {The Internet of Things (IoT) era is mainly dependent on the word "Smart", such as smart cities, smart homes, and smart cars. This aspect can be achieved through the merging of machine learning algorithms with IoT computing models. By adding the Artificial Intelligence (AI) algorithms to IoT, the result is the Cognitive IoT (CIoT). In the automotive industry, many researchers worked on self-diagnosis systems using deep learning, but most of them performed this process on the cloud due to the hardware limitations of the end-devices, and the devices obtain the decision via the cloud servers. Others worked with simple traditional algorithms of machine learning to solve these limitations of the processing capabilities of the end-devices. In this paper, a self-diagnosis smart device is introduced with fast responses and little overhead using the Multi-Layer Perceptron Neural Network (MLP-NN) as a deep learning technique. The MLP-NN learning stage is performed using a Tensorflow framework to generate an MLP model's parameters. Then, the MLP-NN model is implemented using these model's parameters on a low cost end-device such as ARM Cortex-M Series architecture. After implementing the MLP-NN model, the IoT implementation is built to publish the decision results. With the proposed implemented method for the smart device, the output decision based on sensors values can be taken by the IoT node itself without returning to the cloud. For comparison, another solution is proposed for the cloud-based architecture, where the MLP-NN model is implemented on Cloud. The results clarify a successful implemented MLP-NN model for little capabilities end-devices, where the smart device solution has a lower traffic and latency than the cloud-based solution.}, } @article {pmid35880010, year = {2022}, author = {Hemalatha, M}, title = {A hybrid random forest deep learning classifier empowered edge cloud architecture for COVID-19 and pneumonia detection.}, journal = {Expert systems with applications}, volume = {210}, number = {}, pages = {118227}, pmid = {35880010}, issn = {0957-4174}, abstract = {COVID-19 is a global pandemic that mostly affects patients' respiratory systems, and the only way to protect oneself against the virus at present moment is to diagnose the illness, isolate the patient, and provide immunization. In the present situation, the testing used to predict COVID-19 is inefficient and results in more false positives. This difficulty can be solved by developing a remote medical decision support system that detects illness using CT scans or X-ray images with less manual interaction and is less prone to errors. The state-of-art techniques mainly used complex deep learning architectures which are not quite effective when deployed in resource-constrained edge devices. To overcome this problem, a multi-objective Modified Heat Transfer Search (MOMHTS) optimized hybrid Random Forest Deep learning (HRFDL) classifier is proposed in this paper. The MOMHTS algorithm mainly optimizes the deep learning model in the HRFDL architecture by optimizing the hyperparameters associated with it to support the resource-constrained edge devices. To evaluate the efficiency of this technique, extensive experimentation is conducted on two real-time datasets namely the COVID19 lung CT scan dataset and the Chest X-ray images (Pneumonia) datasets. The proposed methodology mainly offers increased speed for communication between the IoT devices and COVID-19 detection via the MOMHTS optimized HRFDL classifier is modified to support the resources which can only support minimal computation and handle minimum storage. The proposed methodology offers an accuracy of 99% for both the COVID19 lung CT scan dataset and the Chest X-ray images (Pneumonia) datasets with minimal computational time, cost, and storage. Based on the simulation outcomes, we can conclude that the proposed methodology is an appropriate fit for edge computing detection to identify the COVID19 and pneumonia with higher detection accuracy.}, } @article {pmid35879937, year = {2022}, author = {Siriborvornratanakul, T}, title = {Human behavior in image-based Road Health Inspection Systems despite the emerging AutoML.}, journal = {Journal of big data}, volume = {9}, number = {1}, pages = {96}, pmid = {35879937}, issn = {2196-1115}, abstract = {INTRODUCTION: The emergence of automated machine learning or AutoML has raised an interesting trend of no-code and low-code machine learning where most tasks in the machine learning pipeline can possibly be automated without support from human data scientists. While it sounds reasonable that we should leave repetitive trial-and-error tasks of designing complex network architectures and tuning a lot of hyperparameters to AutoML, leading research using AutoML is still scarce. Thereby, the overall purpose of this case study is to investigate the gap between current AutoML frameworks and practical machine learning development.

CASE DESCRIPTION: First, this paper confirms the increasing trend of AutoML via an indirect indicator of the numbers of search results in Google trend, IEEE Xplore, and ACM Digital Library during 2012-2021. Then, the three most popular AutoML frameworks (i.e., Auto-Sklearn, AutoKeras, and Google Cloud AutoML) are inspected as AutoML's representatives; the inspection includes six comparative aspects. Based on the features available in the three AutoML frameworks investigated, our case study continues to observe recent machine learning research regarding the background of image-based machine learning. This is because the field of computer vision spans several levels of machine learning from basic to advanced and it has been one of the most popular fields in studying machine learning and artificial intelligence lately. Our study is specific to the context of image-based road health inspection systems as it has a long history in computer vision, allowing us to observe solution transitions from past to present.

DISCUSSION AND EVALUATION: After confirming the rising numbers of AutoML search results in the three search engines, our study regarding the three AutoML representatives further reveals that there are many features that can be used to automate the development pipeline of image-based road health inspection systems. Nevertheless, we find that recent works in image-based road health inspection have not used any form of AutoML in their works. Digging into these recent works, there are two main problems that best conclude why most researchers do not use AutoML in their image-based road health inspection systems yet. Firstly, it is because AutoML's trial-and-error decision involves much extra computation compared to human-guided decisions. Secondly, using AutoML adds another layer of non-interpretability to a model. As these two problems are the major pain points in modern neural networks and deep learning, they may require years to resolve, delaying the mass adoption of AutoML in image-based road health inspection systems.

CONCLUSIONS: In conclusion, although AutoML's utilization is not mainstream at this moment, we believe that the trend of AutoML will continue to grow. This is because there exists a demand for AutoML currently, and in the future, more demand for no-code or low-code machine learning development alternatives will grow together with the expansion of machine learning solutions. Nevertheless, this case study focuses on selected papers whose authors are researchers who can publish their works in academic conferences and journals. In the future, the study should continue to include observing novice users, non-programmer users, and machine learning practitioners in order to discover more insights from non-research perspectives.}, } @article {pmid35875731, year = {2022}, author = {Hameed Abdulkareem, K and Awad Mutlag, A and Musa Dinar, A and Frnda, J and Abed Mohammed, M and Hasan Zayr, F and Lakhan, A and Kadry, S and Ali Khattak, H and Nedoma, J}, title = {Smart Healthcare System for Severity Prediction and Critical Tasks Management of COVID-19 Patients in IoT-Fog Computing Environments.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {5012962}, pmid = {35875731}, issn = {1687-5273}, mesh = {Algorithms ; *COVID-19 ; Delivery of Health Care ; Humans ; *Internet of Things ; }, abstract = {COVID-19 has depleted healthcare systems around the world. Extreme conditions must be defined as soon as possible so that services and treatment can be deployed and intensified. Many biomarkers are being investigated in order to track the patient's condition. Unfortunately, this may interfere with the symptoms of other diseases, making it more difficult for a specialist to diagnose or predict the severity level of the case. This research develops a Smart Healthcare System for Severity Prediction and Critical Tasks Management (SHSSP-CTM) for COVID-19 patients. On the one hand, a machine learning (ML) model is projected to predict the severity of COVID-19 disease. On the other hand, a multi-agent system is proposed to prioritize patients according to the seriousness of the COVID-19 condition and then provide complete network management from the edge to the cloud. Clinical data, including Internet of Medical Things (IoMT) sensors and Electronic Health Record (EHR) data of 78 patients from one hospital in the Wasit Governorate, Iraq, were used in this study. Different data sources are fused to generate new feature pattern. Also, data mining techniques such as normalization and feature selection are applied. Two models, specifically logistic regression (LR) and random forest (RF), are used as baseline severity predictive models. A multi-agent algorithm (MAA), consisting of a personal agent (PA) and fog node agent (FNA), is used to control the prioritization process of COVID-19 patients. The highest prediction result is achieved based on data fusion and selected features, where all examined classifiers observe a significant increase in accuracy. Furthermore, compared with state-of-the-art methods, the RF model showed a high and balanced prediction performance with 86% accuracy, 85.7% F-score, 87.2% precision, and 86% recall. In addition, as compared to the cloud, the MAA showed very significant performance where the resource usage was 66% in the proposed model and 34% in the traditional cloud, the delay was 19% in the proposed model and 81% in the cloud, and the consumed energy was 31% in proposed model and 69% in the cloud. The findings of this study will allow for the early detection of three severity cases, lowering mortality rates.}, } @article {pmid35875729, year = {2022}, author = {Zhang, L}, title = {B/S-Based Construction of a Big Data Logistics Platform.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6873062}, pmid = {35875729}, issn = {1687-5273}, mesh = {*Big Data ; *Cloud Computing ; Humans ; }, abstract = {Due to the overwhelming characteristic of the Internet of Things, devices belonging to these networks are utilized in almost every domain of real life in order to improve the lifestyle of humans. However, these networks result in a huge amount of data related to different application domains, leading to another important research aspect, i.e., big data and cloud computing. Big data and cloud computing technologies in the logistics field have experienced initial contact, gradual penetration, and widespread application. Moreover, it supports traditional logistics to upgrade to smart logistics, aiming to achieve the fundamental requirements of today's logistics industry and reduce costs with enhanced efficiency. However, the big data and cloud computing wisdom logistics model still has many problems in the construction of logistics public information platforms, end coordination development, government platform construction, and so on, in order to solve the problems of low efficiency, high cost, and low service satisfaction of traditional logistics. In this article, we have designed a new big data-enabled logistics detection system that is based on B/S architecture, constructed a smart logistics model consisting of a supply subsystem, demand subsystem, and supervision subsystem, and finally realized the operation process of the smart logistics model based on big data cloud computing.}, } @article {pmid35875634, year = {2022}, author = {Chen, X and Gao, T and Gao, H and Liu, B and Chen, M and Wang, B}, title = {A multi-stage heuristic method for service caching and task offloading to improve the cooperation between edge and cloud computing.}, journal = {PeerJ. Computer science}, volume = {8}, number = {}, pages = {e1012}, pmid = {35875634}, issn = {2376-5992}, abstract = {Edge-cloud computing has attracted increasing attention recently due to its efficiency on providing services for not only delay-sensitive applications but also resource-intensive requests, by combining low-latency edge resources and abundant cloud resources. A carefully designed strategy of service caching and task offloading helps to improve the user satisfaction and the resource efficiency. Thus, in this article, we focus on joint service caching and task offloading problem in edge-cloud computing environments, to improve the cooperation between edge and cloud resources. First, we formulated the problem into a mix-integer nonlinear programming, which is proofed as NP-hard. Then, we proposed a three-stage heuristic method for solving the problem in polynomial time. In the first stages, our method tried to make full use of abundant cloud resources by pre-offloading as many tasks as possible to the cloud. Our method aimed at making full use of low-latency edge resources by offloading remaining tasks and caching corresponding services on edge resources. In the last stage, our method focused on improving the performance of tasks offloaded to the cloud, by re-offloading some tasks from cloud resources to edge resources. The performance of our method was evaluated by extensive simulated experiments. The results show that our method has up to 155%, 56.1%, and 155% better performance in user satisfaction, resource efficiency, and processing efficiency, respectively, compared with several classical and state-of-the-art task scheduling methods.}, } @article {pmid35874097, year = {2022}, author = {Huang, CW and Chuang, WH and Lin, CY and Chen, SH}, title = {Elegancy: Digitizing the wisdom from laboratories to the cloud with free no-code platform.}, journal = {iScience}, volume = {25}, number = {8}, pages = {104710}, pmid = {35874097}, issn = {2589-0042}, abstract = {One of the top priorities in any laboratory is archiving experimental data in the most secure, efficient, and errorless way. It is especially important to those in chemical and biological research, for it is more likely to damage experiment records. In addition, the transmission of experiment results from paper to electronic devices is time-consuming and redundant. Therefore, we introduce an open-source no-code electronic laboratory notebook, Elegancy, a cloud-based/standalone web service distributed as a Docker image. Elegancy fits all laboratories but is specially equipped with several features benefitting biochemical laboratories. It can be accessed via various web browsers, allowing researchers to upload photos or audio recordings directly from their mobile devices. Elegancy also contains a meeting arrangement module, audit/revision control, and laboratory supply management system. We believe Elegancy could help the scientific research community gather evidence, share information, reorganize knowledge, and digitize laboratory works with greater ease and security.}, } @article {pmid35873307, year = {2022}, author = {Rodas-Martinez, AK and Altamirano-Yupanqui, JR}, title = {[Mass vaccinations against COVID-19 through the use of technologies for the management of appointment scheduling and data of large volumes of vaccinated].}, journal = {Vacunas}, volume = {23}, number = {}, pages = {S111-S120}, pmid = {35873307}, issn = {1576-9887}, abstract = {Mass vaccination against COVID-19 using technologies to manage appointment scheduling and data in large volumes of vaccinated people Abstract Mass vaccination poses a challenge for health authorities due to the high volume of people who need to be vaccinated in a short period of time. Manual processes in vaccination centres to record and control vaccinations where the data is entered on paper result in delays in the timely input of information rendering the vaccination process inefficient. The proposed prototype, as a strategy for mass COVID-19 vaccination, to generate appointments, record, and control entry to vaccination centres, uses mobile technology, QR codes, and cloud computing to automate these data-driven processes. Technology-based processes help people by giving them the flexibility to choose the most convenient vaccination centre and provide health authorities with data-driven tools for management, control, and real-time decision-making.}, } @article {pmid35870448, year = {2022}, author = {Abe, T and Kinsella, I and Saxena, S and Buchanan, EK and Couto, J and Briggs, J and Kitt, SL and Glassman, R and Zhou, J and Paninski, L and Cunningham, JP}, title = {Neuroscience Cloud Analysis As a Service: An open-source platform for scalable, reproducible data analysis.}, journal = {Neuron}, volume = {110}, number = {17}, pages = {2771-2789.e7}, pmid = {35870448}, issn = {1097-4199}, support = {T32 NS064929/NS/NINDS NIH HHS/United States ; UF1 NS107696/NS/NINDS NIH HHS/United States ; RF1 MH120680/MH/NIMH NIH HHS/United States ; U19 NS107613/NS/NINDS NIH HHS/United States ; U19 NS104649/NS/NINDS NIH HHS/United States ; UF1 NS108213/NS/NINDS NIH HHS/United States ; U19 NS123716/NS/NINDS NIH HHS/United States ; U01 NS103489/NS/NINDS NIH HHS/United States ; }, mesh = {Cloud Computing ; *Data Analysis ; *Neurosciences ; Reproducibility of Results ; Software ; }, abstract = {A key aspect of neuroscience research is the development of powerful, general-purpose data analyses that process large datasets. Unfortunately, modern data analyses have a hidden dependence upon complex computing infrastructure (e.g., software and hardware), which acts as an unaddressed deterrent to analysis users. Although existing analyses are increasingly shared as open-source software, the infrastructure and knowledge needed to deploy these analyses efficiently still pose significant barriers to use. In this work, we develop Neuroscience Cloud Analysis As a Service (NeuroCAAS): a fully automated open-source analysis platform offering automatic infrastructure reproducibility for any data analysis. We show how NeuroCAAS supports the design of simpler, more powerful data analyses and that many popular data analysis tools offered through NeuroCAAS outperform counterparts on typical infrastructure. Pairing rigorous infrastructure management with cloud resources, NeuroCAAS dramatically accelerates the dissemination and use of new data analyses for neuroscientific discovery.}, } @article {pmid35867406, year = {2022}, author = {Merdan, O and Şişman, AS and Aksoy, SA and Kızıl, S and Tüzemen, NÜ and Yılmaz, E and Ener, B}, title = {Investigation of the Defective Growth Pattern and Multidrug Resistance in a Clinical Isolate of Candida glabrata Using Whole-Genome Sequencing and Computational Biology Applications.}, journal = {Microbiology spectrum}, volume = {10}, number = {4}, pages = {e0077622}, pmid = {35867406}, issn = {2165-0497}, mesh = {*Amphotericin B/metabolism/pharmacology ; Animals ; Antifungal Agents/pharmacology ; Artificial Intelligence ; Azoles/metabolism/pharmacology ; *Candida glabrata/genetics ; Cholesterol/metabolism/pharmacology ; Computational Biology ; Drug Resistance, Fungal/genetics ; Drug Resistance, Multiple ; Ergosterol/metabolism ; Microbial Sensitivity Tests ; Sheep ; }, abstract = {Candida glabrata is increasingly isolated from blood cultures, and multidrug-resistant isolates have important implications for therapy. This study describes a cholesterol-dependent clinical C. glabrata isolate (ML72254) that did not grow without blood (containing cholesterol) on routine mycological media and that showed azole and amphotericin B (AmB) resistance. Matrix-assisted laser desorption ionization-time of flight (MALDI-TOF) and whole-genome sequencing (WGS) were used for species identification. A modified Etest method (Mueller-Hinton agar supplemented with 5% sheep blood) was used for antifungal susceptibility testing. WGS data were processed via the Galaxy platform, and the genomic variations of ML72254 were retrieved. A computational biology workflow utilizing web-based applications (PROVEAN, AlphaFold Colab, and Missense3D) was constructed to predict possible deleterious effects of these missense variations on protein functions. The predictive ability of this workflow was tested with previously reported missense variations in ergosterol synthesis genes of C. glabrata. ML72254 was identified as C. glabrata sensu stricto with MALDI-TOF, and WGS confirmed this identification. The MICs of fluconazole, voriconazole, and amphotericin B were >256, >32, and >32 μg/mL, respectively. A novel frameshift mutation in the ERG1 gene (Pro314fs) and many missense variations were detected in the ergosterol synthesis genes. None of the missense variations in the ML72254 ergosterol synthesis genes were deleterious, and the Pro314fs mutation was identified as the causative molecular change for a cholesterol-dependent and multidrug-resistant phenotype. This study verified that web-based computational biology solutions can be powerful tools for examining the possible impacts of missense mutations in C. glabrata. IMPORTANCE In this study, a cholesterol-dependent C. glabrata clinical isolate that confers azole and AmB resistance was investigated using artificial intelligence (AI) technologies and cloud computing applications. This is the first of the known cholesterol-dependent C. glabrata isolate to be found in Turkey. Cholesterol-dependent C. glabrata isolates are rarely isolated in clinical samples; they can easily be overlooked during routine laboratory procedures. Microbiologists therefore need to be alert when discrepancies occur between microscopic examination and growth on routine media. In addition, because these isolates confer antifungal resistance, patient management requires extra care.}, } @article {pmid35866176, year = {2021}, author = {Zhou, H and Ouyang, X and Su, J and de Laat, C and Zhao, Z}, title = {Enforcing trustworthy cloud SLA with witnesses: A game theory-based model using smart contracts.}, journal = {Concurrency and computation : practice & experience}, volume = {33}, number = {14}, pages = {e5511}, doi = {10.1002/cpe.5511}, pmid = {35866176}, issn = {1532-0626}, abstract = {There lacks trust between the cloud customer and provider to enforce traditional cloud SLA (Service Level Agreement) where the blockchain technique seems a promising solution. However, current explorations still face challenges to prove that the off-chain SLO (Service Level Objective) violations really happen before recorded into the on-chain transactions. In this paper, a witness model is proposed implemented with smart contracts to solve this trust issue. The introduced role, "Witness", gains rewards as an incentive for performing the SLO violation report, and the payoff function is carefully designed in a way that the witness has to tell the truth, for maximizing the rewards. This fact that the witness has to be honest is analyzed and proved using the Nash Equilibrium principle of game theory. For ensuring the chosen witnesses are random and independent, an unbiased selection algorithm is proposed to avoid possible collusions. An auditing mechanism is also introduced to detect potential malicious witnesses. Specifically, we define three types of malicious behaviors and propose quantitative indicators to audit and detect these behaviors. Moreover, experimental studies based on Ethereum blockchain demonstrate the proposed model is feasible, and indicate that the performance, ie, transaction fee, of each interface follows the design expectations.}, } @article {pmid35865872, year = {2022}, author = {Zhou, Y}, title = {The Application Trend of Digital Finance and Technological Innovation in the Development of Green Economy.}, journal = {Journal of environmental and public health}, volume = {2022}, number = {}, pages = {1064558}, pmid = {35865872}, issn = {1687-9813}, mesh = {*Artificial Intelligence ; Conservation of Energy Resources ; Economic Development ; *Inventions ; Sustainable Development ; }, abstract = {Based on the perspective of digital finance and technological innovation, this paper analyzes its application in economic development, green economy, and sustainable development. With the continuous development of technological economy, methods such as artificial intelligence, Internet of Things, big data, and cloud computing become increasingly mature. Economic development is inseparable from the empowerment of technology. In this paper, firstly, we introduce the basic concepts and main forms of digital finance and technological economy and list the cutting-edge technologies including blockchain, VR, sharing economy, and other modes. Then, we analyze the application trend of technology economy. Finally, we analyze the examples of digital finance and technological innovation in detail, including tourism economy, digital marketing, sharing economy, smart city, digital healthcare, and personalized education, three hot topics of technology intersection and integration. In the end, we put forward prospects for the development of a digital economy, digital finance, and technological innovation.}, } @article {pmid35860795, year = {2022}, author = {Yan, L and Chen, Y and Caixia, G and Jiangying, W and Xiaoying, L and Zhe, L}, title = {Medical Big Data and Postoperative Nursing of Fracture Patients Based on Cloud Computing.}, journal = {BioMed research international}, volume = {2022}, number = {}, pages = {4090235}, pmid = {35860795}, issn = {2314-6141}, mesh = {*Big Data ; Cloud Computing ; *Fractures, Bone/surgery ; Humans ; *Postoperative Care/nursing ; Reproducibility of Results ; Wireless Technology ; }, abstract = {Based on the standards for wireless sensor system identification, the sensor node identity OID identification and the management object OID identification in the SNMP MIB are merged, and a management object OID identification coding mechanism for the SNMP-based wireless sensor system is proposed to make the node management system only. The identity, attributes, and multiple entities of the target sensor node in the wireless sensor network can be identified and managed by the node management object OID. The source of abnormal medical big data generally uses two models of multidimensional data and sliding window for detection and verification. First, the sliding window can be used to detect abnormalities. The result is that under this condition, the detection rate of medical big data is more than 95%; the effect is very good, but in different dimensions, the detection rate of four-dimensional data is 2.9% higher than that of a single-dimensional one. On the basis of the ZigBee wireless network, the terminal signal transmission of fracture treatment can be realized. On this basis, combined with the actual needs of fracture treatment, it can be built with its wireless module. The wireless network has a certain basic function. The reform of the nursing system was carried out on the basis of the safety and reliability of the nursing system, the efficiency of the nursing system was improved, and timely and safe nursing services were achieved.}, } @article {pmid35860647, year = {2022}, author = {Li, H}, title = {Computer Security Issues and Legal System Based on Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8112212}, pmid = {35860647}, issn = {1687-5273}, mesh = {*Cloud Computing ; *Computer Security ; Confidentiality ; Information Storage and Retrieval ; Software ; }, abstract = {To effectively improve the security and accuracy of computer information storage, a computer security problem and legal system based on cloud computing are proposed. Firstly, this article details the evolution of cloud computing, its characteristics, architecture, and application status of cloud computing in detail. Second, we discussed security strategies to ensure the confidentiality and integrity of cloud computing information, focuses on the data encryption technology of cloud data security, and designs and implements the data backup and recovery system based on the cloud platform. The core layers of the system are the system layer and data operation layer. The system uses multithreading technology based on epoll and thread pool to improve the efficiency of data transmission. At the same time, the basic visual page is realized, and users can use the page to create a convenient operating system. Finally, the system is built in the laboratory environment and tested as a whole. The test results show that through the performance comparison with the current commonly used systems, it is found that the system in this paper has a certain improvement in data transmission rate, but the utilization rate of node CPU is as high as 40%, which leads to certain requirements for node CPU performance. Therefore, the system meets the functional requirements proposed in the design. Compared to the existing system, its performance has been found to meet the actual requirements of use, proving that the system is accessible and efficient.}, } @article {pmid35858750, year = {2022}, author = {, }, title = {Diversifying the genomic data science research community.}, journal = {Genome research}, volume = {32}, number = {7}, pages = {1231-1241}, pmid = {35858750}, issn = {1549-5469}, support = {U24 HG010262/HG/NHGRI NIH HHS/United States ; U24 HG010263/HG/NHGRI NIH HHS/United States ; }, abstract = {Over the past 20 years, the explosion of genomic data collection and the cloud computing revolution have made computational and data science research accessible to anyone with a web browser and an internet connection. However, students at institutions with limited resources have received relatively little exposure to curricula or professional development opportunities that lead to careers in genomic data science. To broaden participation in genomics research, the scientific community needs to support these programs in local education and research at underserved institutions (UIs). These include community colleges, historically Black colleges and universities, Hispanic-serving institutions, and tribal colleges and universities that support ethnically, racially, and socioeconomically underrepresented students in the United States. We have formed the Genomic Data Science Community Network to support students, faculty, and their networks to identify opportunities and broaden access to genomic data science. These opportunities include expanding access to infrastructure and data, providing UI faculty development opportunities, strengthening collaborations among faculty, recognizing UI teaching and research excellence, fostering student awareness, developing modular and open-source resources, expanding course-based undergraduate research experiences (CUREs), building curriculum, supporting student professional development and research, and removing financial barriers through funding programs and collaborator support.}, } @article {pmid35854299, year = {2022}, author = {Wang, R and Han, J and Liu, C and Wang, L}, title = {Relationship between medical students' perceived instructor role and their approaches to using online learning technologies in a cloud-based virtual classroom.}, journal = {BMC medical education}, volume = {22}, number = {1}, pages = {560}, pmid = {35854299}, issn = {1472-6920}, mesh = {Cloud Computing ; Cross-Sectional Studies ; *Education, Distance/methods ; Humans ; *Students, Medical ; Universities ; }, abstract = {BACKGROUND: Students can take different approaches to using online learning technologies: deep and surface. It is important to understand the relationship between instructor role and student approaches to using online learning technologies in online learning settings supported by cloud computing techniques.

METHODS: A descriptive, cross-sectional study was conducted to analyze the relationships between medical students' perceptions of instructor role (instructor support, instructor-student interaction, and instructor innovation) and students' approaches to using online learning technologies in cloud-based virtual classrooms. A 25-item online questionnaire along with a sheet with basic demographic was administered to all medical students at Qilu Medical Schools of Shandong University China. Overall, 213 of 4000 medical students (5.34%) at the medical school participated in the survey.

RESULTS: The results showed high levels of medical students' perceived instructor support, instructor-student interaction and instructor innovation. Most students adopted the deep approaches to using online learning technologies. Instructor support, instructor-student interaction and innovation were positively related to students' deep approaches to using online learning technologies. Instructor support was negatively related to students' surface approaches to using online learning technologies.

CONCLUSIONS: The relationship between instructor role (instructor support, instructor-student interaction and instructor innovation) and students' approaches to using online learning technologies highlight the importance of instructor support and innovation in facilitating students' adoption of desirable approaches to learning from the application of technologies.}, } @article {pmid35850085, year = {2022}, author = {Peng, Y and Sengupta, D and Duan, Y and Chen, C and Tian, B}, title = {Accurate mapping of Chinese coastal aquaculture ponds using biophysical parameters based on Sentinel-2 time series images.}, journal = {Marine pollution bulletin}, volume = {181}, number = {}, pages = {113901}, doi = {10.1016/j.marpolbul.2022.113901}, pmid = {35850085}, issn = {1879-3363}, mesh = {Aquaculture/methods ; *Environmental Monitoring ; *Ponds ; Time Factors ; Water ; }, abstract = {Aquaculture plays a crucial role in the global food security and nutrition supply, where China accounts for the largest market share. Although there are some studies that focus on large-scale extraction of coastal aquaculture ponds from satellite images, they have often variable accuracies and encounter misclassification due to the similar geometric characteristics of various vivid water bodies. This paper proposes an efficient and novel method that integrates the spatial characteristics and three biophysical parameters (Chlorophyll-a, Trophic State Index, and Floating Algae Index) to map coastal aquaculture ponds at a national scale. These parameters are derived from bio-optical models based on the Google Earth Engine (GEE) cloud computing platform and time series of high-resolution Sentinel-2 images. Our proposed method effectively addresses the misclassification issue between the aquaculture ponds and rivers, lakes, reservoirs, and salt pans and achieves an overall accuracy of 91 % and a Kappa coefficient of 0.83 in the Chinese coastal zone. Our results indicate that the total area of Chinese coastal aquaculture ponds was 1,039,214 ha in 2019, mainly distributed in the Shandong and Guangdong provinces. The highest aquaculture intensity occurs within the 1 km coastal buffer zone, accounting for 22.4 % of the total area. Furthermore, more than half of the Chinese coastal aquaculture ponds are concentrated in the 0-5 km buffer zone. Our method is of general applicability and thus is suitable for large-scale aquaculture ponds mapping projects. Moreover, the biophysical parameters we employ can be considered as new indicators for the classification of various water bodies even with different aquaculture species.}, } @article {pmid35846728, year = {2022}, author = {Yi, J and Zhang, H and Mao, J and Chen, Y and Zhong, H and Wang, Y}, title = {Review on the COVID-19 pandemic prevention and control system based on AI.}, journal = {Engineering applications of artificial intelligence}, volume = {114}, number = {}, pages = {105184}, pmid = {35846728}, issn = {0952-1976}, abstract = {As a new technology, artificial intelligence (AI) has recently received increasing attention from researchers and has been successfully applied to many domains. Currently, the outbreak of the COVID-19 pandemic has not only put people's lives in jeopardy but has also interrupted social activities and stifled economic growth. Artificial intelligence, as the most cutting-edge science field, is critical in the fight against the pandemic. To respond scientifically to major emergencies like COVID-19, this article reviews the use of artificial intelligence in the combat against the pandemic from COVID-19 large data, intelligent devices and systems, and intelligent robots. This article's primary contributions are in two aspects: (1) we summarized the applications of AI in the pandemic, including virus spreading prediction, patient diagnosis, vaccine development, excluding potential virus carriers, telemedicine service, economic recovery, material distribution, disinfection, and health care. (2) We concluded the faced challenges during the AI-based pandemic prevention process, including multidimensional data, sub-intelligent algorithms, and unsystematic, and discussed corresponding solutions, such as 5G, cloud computing, and unsupervised learning algorithms. This article systematically surveyed the applications and challenges of AI technology during the pandemic, which is of great significance to promote the development of AI technology and can serve as a new reference for future emergencies.}, } @article {pmid35845885, year = {2022}, author = {Yao, Y and Li, S}, title = {Design and Analysis of Intelligent Robot Based on Internet of Things Technology.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7304180}, pmid = {35845885}, issn = {1687-5273}, mesh = {Cloud Computing ; Humans ; Intelligence ; *Internet of Things ; *Robotics ; }, abstract = {This research uses Auto-ID Labs radio frequency identification system to realize the information dissemination from the destination node to the nodes in its neighborhood. The purpose is to forward messages and explore typical applications. Realize the intelligent analysis and management of IoT devices and data. Design a set of edge video CDN system, in the G1 data set A = 9, p = 9, ℤp = 9, lℤp = 8, AES = 5, ES = 9. Distribute some hot content to public wireless hotspots closer to users in advance, A = 9, p = 7, ℤp = 9, lℤp = 9, AES = 9, ES = 8. At present, a large amount of research is mainly to deploy an edge node between the end node of the Internet of Things and the cloud computing center to provide high-quality services. By learning a stable dynamic system from human teaching to ensure the robustness of the controller to spatial disturbances. FPP-SCA plan FPP-SCA = 1.99, FPP-SCA = 1.86, FPP-SCA = 1.03, FPP-SCA = 1.18, FPP-SCA = 1.01, FPP-SCA = 1.46, FPP-SCA = 1.61.The more robots work in an unstructured environment, with different scenarios and tasks, the comparison shows that the FPP-SCA scheme is the optimal model F-S0 = 2.52, F-S5 = 2.38, F-S10 = 2.5, F- S15 = 2.09, F-S20 = 2.54, F-S25 = 2.8, F-S30 = 2.98.}, } @article {pmid35843990, year = {2022}, author = {Kölzsch, A and Davidson, SC and Gauggel, D and Hahn, C and Hirt, J and Kays, R and Lang, I and Lohr, A and Russell, B and Scharf, AK and Schneider, G and Vinciguerra, CM and Wikelski, M and Safi, K}, title = {MoveApps: a serverless no-code analysis platform for animal tracking data.}, journal = {Movement ecology}, volume = {10}, number = {1}, pages = {30}, pmid = {35843990}, issn = {2051-3933}, support = {80NSSC21K1182/NASA/NASA/United States ; }, abstract = {BACKGROUND: Bio-logging and animal tracking datasets continuously grow in volume and complexity, documenting animal behaviour and ecology in unprecedented extent and detail, but greatly increasing the challenge of extracting knowledge from the data obtained. A large variety of analysis methods are being developed, many of which in effect are inaccessible to potential users, because they remain unpublished, depend on proprietary software or require significant coding skills.

RESULTS: We developed MoveApps, an open analysis platform for animal tracking data, to make sophisticated analytical tools accessible to a global community of movement ecologists and wildlife managers. As part of the Movebank ecosystem, MoveApps allows users to design and share workflows composed of analysis modules (Apps) that access and analyse tracking data. Users browse Apps, build workflows, customise parameters, execute analyses and access results through an intuitive web-based interface. Apps, coded in R or other programming languages, have been developed by the MoveApps team and can be contributed by anyone developing analysis code. They become available to all user of the platform. To allow long-term and cross-system reproducibility, Apps have public source code and are compiled and run in Docker containers that form the basis of a serverless cloud computing system. To support reproducible science and help contributors document and benefit from their efforts, workflows of Apps can be shared, published and archived with DOIs in the Movebank Data Repository. The platform was beta launched in spring 2021 and currently contains 49 Apps that are used by 316 registered users. We illustrate its use through two workflows that (1) provide a daily report on active tag deployments and (2) segment and map migratory movements.

CONCLUSIONS: The MoveApps platform is meant to empower the community to supply, exchange and use analysis code in an intuitive environment that allows fast and traceable results and feedback. By bringing together analytical experts developing movement analysis methods and code with those in need of tools to explore, answer questions and inform decisions based on data they collect, we intend to increase the pace of knowledge generation and integration to match the huge growth rate in bio-logging data acquisition.}, } @article {pmid35829789, year = {2022}, author = {Mozaffaree Pour, N and Karasov, O and Burdun, I and Oja, T}, title = {Simulation of land use/land cover changes and urban expansion in Estonia by a hybrid ANN-CA-MCA model and utilizing spectral-textural indices.}, journal = {Environmental monitoring and assessment}, volume = {194}, number = {8}, pages = {584}, pmid = {35829789}, issn = {1573-2959}, mesh = {Agriculture ; *Conservation of Natural Resources ; *Environmental Monitoring ; Estonia ; Wetlands ; }, abstract = {Over the recent two decades, land use/land cover (LULC) drastically changed in Estonia. Even though the population decreased by 11%, noticeable agricultural and forest land areas were turned into urban land. In this work, we analyzed those LULC changes by mapping the spatial characteristics of LULC and urban expansion in the years 2000-2019 in Estonia. Moreover, using the revealed spatiotemporal transitions of LULC, we simulated LULC and urban expansion for 2030. Landsat 5 and 8 data were used to estimate 147 spectral-textural indices in the Google Earth Engine cloud computing platform. After that, 19 selected indices were used to model LULC changes by applying the hybrid artificial neural network, cellular automata, and Markov chain analysis (ANN-CA-MCA). While determining spectral-textural indices is quite common for LULC classifications, utilization of these continues indices in LULC change detection and examining these indices at the landscape scale is still in infancy. This country-wide modeling approach provided the first comprehensive projection of future LULC utilizing spectral-textural indices. In this work, we utilized the hybrid ANN-CA-MCA model for predicting LULC in Estonia for 2030; we revealed that the predicted changes in LULC from 2019 to 2030 were similar to the observed changes from 2011 to 2019. The predicted change in the area of artificial surfaces was an increased rate of 1.33% to reach 787.04 km[2] in total by 2030. Between 2019 and 2030, the other significant changes were the decrease of 34.57 km[2] of forest lands and the increase of agricultural lands by 14.90 km[2] and wetlands by 9.31 km[2]. These findings can develop a proper course of action for long-term spatial planning in Estonia. Therefore, a key policy priority should be to plan for the stable care of forest lands to maintain biodiversity.}, } @article {pmid35816521, year = {2022}, author = {Singh, P and Gaba, GS and Kaur, A and Hedabou, M and Gurtov, A}, title = {Dew-Cloud-Based Hierarchical Federated Learning for Intrusion Detection in IoMT.}, journal = {IEEE journal of biomedical and health informatics}, volume = {PP}, number = {}, pages = {}, doi = {10.1109/JBHI.2022.3186250}, pmid = {35816521}, issn = {2168-2208}, abstract = {The coronavirus pandemic has overburdened medical institutions, forcing physicians to diagnose and treat their patients remotely. Moreover, COVID-19 has made humans more conscious about their health, resulting in the extensive purchase of IoT-enabled medical devices. The rapid boom in the market worth of the internet of medical things (IoMT) captured cyber attackers' attention. Like health, medical data is also sensitive and worth a lot on the dark web. Despite the fact that the patient's health details have not been protected appropriately, letting the trespassers exploit them. The system administrator is unable to fortify security measures due to the limited storage capacity and computation power of the resource-constrained network devices'. Although various supervised and unsupervised machine learning algorithms have been developed to identify anomalies, the primary undertaking is to explore the swift progressing malicious attacks before they deteriorate the wellness system's integrity. In this paper, a Dew-Cloud based model is designed to enable hierarchical federated learning (HFL). The proposed Dew-Cloud model provides a higher level of data privacy with greater availability of IoMT critical application(s). The hierarchical long-term memory (HLSTM) model is deployed at distributed Dew servers with a backend supported by cloud computing. Data pre-processing feature helps the proposed model achieve high training accuracy (99.31 %) with minimum training loss (0.034). The experiment results demonstrate that the proposed HFL-HLSTM model is superior to existing schemes in terms of performance metrics such as accuracy, precision, recall, and f-score.}, } @article {pmid35808479, year = {2022}, author = {Romeo, L and Marani, R and Perri, AG and D'Orazio, T}, title = {Microsoft Azure Kinect Calibration for Three-Dimensional Dense Point Clouds and Reliable Skeletons.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808479}, issn = {1424-8220}, mesh = {Calibration ; *Gestures ; Humans ; *Skeleton ; }, abstract = {Nowadays, the need for reliable and low-cost multi-camera systems is increasing for many potential applications, such as localization and mapping, human activity recognition, hand and gesture analysis, and object detection and localization. However, a precise camera calibration approach is mandatory for enabling further applications that require high precision. This paper analyzes the available two-camera calibration approaches to propose a guideline for calibrating multiple Azure Kinect RGB-D sensors to achieve the best alignment of point clouds in both color and infrared resolutions, and skeletal joints returned by the Microsoft Azure Body Tracking library. Different calibration methodologies using 2D and 3D approaches, all exploiting the functionalities within the Azure Kinect devices, are presented. Experiments demonstrate that the best results are returned by applying 3D calibration procedures, which give an average distance between all couples of corresponding points of point clouds in color or an infrared resolution of 21.426 mm and 9.872 mm for a static experiment and of 20.868 mm and 7.429 mm while framing a dynamic scene. At the same time, the best results in body joint alignment are achieved by three-dimensional procedures on images captured by the infrared sensors, resulting in an average error of 35.410 mm.}, } @article {pmid35808459, year = {2022}, author = {Khan, A and Umar, AI and Shirazi, SH and Ishaq, W and Shah, M and Assam, M and Mohamed, A}, title = {QoS-Aware Cost Minimization Strategy for AMI Applications in Smart Grid Using Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808459}, issn = {1424-8220}, mesh = {*Cloud Computing ; Computer Simulation ; *Computer Systems ; Models, Theoretical ; Software ; }, abstract = {Cloud computing coupled with Internet of Things technology provides a wide range of cloud services such as memory, storage, computational processing, network bandwidth, and database application to the end users on demand over the Internet. More specifically, cloud computing provides efficient services such as "pay as per usage". However, Utility providers in Smart Grid are facing challenges in the design and implementation of such architecture in order to minimize the cost of underlying hardware, software, and network services. In Smart Grid, smart meters generate a large volume of different traffics, due to which efficient utilization of available resources such as buffer, storage, limited processing, and bandwidth is required in a cost-effective manner in the underlying network infrastructure. In such context, this article introduces a QoS-aware Hybrid Queue Scheduling (HQS) model that can be seen over the IoT-based network integrated with cloud environment for different advanced metering infrastructure (AMI) application traffic, which have different QoS levels in the Smart Grid network. The proposed optimization model supports, classifies, and prioritizes the AMI application traffic. The main objective is to reduce the cost of buffer, processing power, and network bandwidth utilized by AMI applications in the cloud environment. For this, we developed a simulation model in the CloudSim simulator that uses a simple mathematical model in order to achieve the objective function. During the simulations, the effects of various numbers of cloudlets on the cost of virtual machine resources such as RAM, CPU processing, and available bandwidth have been investigated in cloud computing. The obtained simulation results exhibited that our proposed model successfully competes with the previous schemes in terms of minimizing the processing, memory, and bandwidth cost by a significant margin. Moreover, the simulation results confirmed that the proposed optimization model behaves as expected and is realistic for AMI application traffic in the Smart Grid network using cloud computing.}, } @article {pmid35808452, year = {2022}, author = {Shen, X and Chang, Z and Niu, S}, title = {Mobile Edge Computing Task Offloading Strategy Based on Parking Cooperation in the Internet of Vehicles.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808452}, issn = {1424-8220}, mesh = {*Cloud Computing ; Computer Simulation ; *Internet ; }, abstract = {Due to the limited computing capacity of onboard devices, they can no longer meet a large number of computing requirements. Therefore, mobile edge computing (MEC) provides more computing and storage capabilities for vehicles. Inspired by a large number of roadside parking vehicles, this paper takes the roadside parking vehicles with idle computing resources as the task offloading platform and proposes a mobile edge computing task offloading strategy based on roadside parking cooperation. The resource sharing and mutual utilization among roadside vehicles, roadside units (RSU), and cloud servers (cloud servers) were established, and the collaborative offloading problem of computing tasks was transformed into a constraint problem. The hybrid genetic algorithm (HHGA) with a mountain-climbing operator was used to solve the multi-constraint problem, to reduce the delay and energy consumption of computing tasks. The simulation results show that when the number of tasks is 25, the delay and energy consumption of the HHGA algorithm is improved by 24.1% and 11.9%, respectively, compared with Tradition. When the task size is 1.0 MB, the HHGA algorithm reduces the system overhead by 7.9% compared with Tradition. Therefore, the proposed scheme can effectively reduce the total system cost during task offloading.}, } @article {pmid35808373, year = {2022}, author = {Loukatos, D and Lygkoura, KA and Maraveas, C and Arvanitis, KG}, title = {Enriching IoT Modules with Edge AI Functionality to Detect Water Misuse Events in a Decentralized Manner.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808373}, issn = {1424-8220}, mesh = {Agriculture ; *Artificial Intelligence ; Humans ; *Internet of Things ; Machine Learning ; Water ; }, abstract = {The digital transformation of agriculture is a promising necessity for tackling the increasing nutritional needs of the population on Earth and the degradation of natural resources. Focusing on the "hot" area of natural resource preservation, the recent appearance of more efficient and cheaper microcontrollers, the advances in low-power and long-range radios, and the availability of accompanying software tools are exploited in order to monitor water consumption and to detect and report misuse events, with reduced power and network bandwidth requirements. Quite often, large quantities of water are wasted for a variety of reasons; from broken irrigation pipes to people's negligence. To tackle this problem, the necessary design and implementation details are highlighted for an experimental water usage reporting system that exhibits Edge Artificial Intelligence (Edge AI) functionality. By combining modern technologies, such as Internet of Things (IoT), Edge Computing (EC) and Machine Learning (ML), the deployment of a compact automated detection mechanism can be easier than before, while the information that has to travel from the edges of the network to the cloud and thus the corresponding energy footprint are drastically reduced. In parallel, characteristic implementation challenges are discussed, and a first set of corresponding evaluation results is presented.}, } @article {pmid35808368, year = {2022}, author = {Sefati, SS and Halunga, S}, title = {A Hybrid Service Selection and Composition for Cloud Computing Using the Adaptive Penalty Function in Genetic and Artificial Bee Colony Algorithm.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808368}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; Reproducibility of Results ; }, abstract = {The rapid development of Cloud Computing (CC) has led to the release of many services in the cloud environment. Service composition awareness of Quality of Service (QoS) is a significant challenge in CC. A single service in the cloud environment cannot respond to the complex requests and diverse requirements of the real world. In some cases, one service cannot fulfill the user's needs, so it is necessary to combine different services to meet these requirements. Many available services provide an enormous QoS and selecting or composing those combined services is called an Np-hard optimization problem. One of the significant challenges in CC is integrating existing services to meet the intricate necessities of different types of users. Due to NP-hard complexity of service composition, many metaheuristic algorithms have been used so far. This article presents the Artificial Bee Colony and Genetic Algorithm (ABCGA) as a metaheuristic algorithm to achieve the desired goals. If the fitness function of the services selected by the Genetic Algorithm (GA) is suitable, a set of services is further introduced for the Artificial Bee Colony (ABC) algorithm to choose the appropriate service from, according to each user's needs. The proposed solution is evaluated through experiments using Cloud SIM simulation, and the numerical results prove the efficiency of the proposed method with respect to reliability, availability, and cost.}, } @article {pmid35808345, year = {2022}, author = {Shahzad, A and Gherbi, A and Zhang, K}, title = {Enabling Fog-Blockchain Computing for Autonomous-Vehicle-Parking System: A Solution to Reinforce IoT-Cloud Platform for Future Smart Parking.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808345}, issn = {1424-8220}, mesh = {*Blockchain ; Cloud Computing ; Computer Security ; Humans ; Privacy ; }, abstract = {With the advent of modern technologies, including the IoT and blockchain, smart-parking (SP) systems are becoming smarter and smarter. Similar to other automated systems, and particularly those that require automation or minimal interaction with humans, the SP system is heuristic in delivering performances, such as throughput in terms of latency, efficiency, privacy, and security, and it is considered a long-term cost-effective solution. This study looks ahead to future trends and developments in SP systems and presents an inclusive, long-term, effective, and well-performing smart autonomous vehicle parking (SAVP) system that explores and employs the emerging fog-computing and blockchain technologies as robust solutions to strengthen the existing collaborative IoT-cloud platform to build and manage SP systems for autonomous vehicles (AVs). In other words, the proposed SAVP system offers a smart-parking solution, both indoors and outdoors, and mainly for AVs looking for vacant parking, wherein the fog nodes act as a middleware layer that provides various parking operations closer to IoT-enabled edge devices. To address the challenges of privacy and security, a lightweight integrated blockchain and cryptography (LIBC) module is deployed, which is functional at each fog node, to authorize and grant access to the AVs in every phase of parking (e.g., from the parking entrance to the parking slot to the parking exit). A proof-of-concept implementation was conducted, wherein the overall computed results, such as the average response time, efficiency, privacy, and security, were examined as highly efficient to enable a proven SAVP system. This study also examined an innovative pace, with careful considerations to combatting the existing SP-system challenges and, therefore, to building and managing future scalable SP systems.}, } @article {pmid35808322, year = {2022}, author = {Katayama, Y and Tachibana, T}, title = {Optimal Task Allocation Algorithm Based on Queueing Theory for Future Internet Application in Mobile Edge Computing Platform.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808322}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; *Computer Heuristics ; Forecasting ; Internet/trends ; }, abstract = {For 5G and future Internet, in this paper, we propose a task allocation method for future Internet application to reduce the total latency in a mobile edge computing (MEC) platform with three types of servers: a dedicated MEC server, a shared MEC server, and a cloud server. For this platform, we first calculate the delay between sending a task and receiving a response for the dedicated MEC server, shared MEC server, and cloud server by considering the processing time and transmission delay. Here, the transmission delay for the shared MEC server is derived using queueing theory. Then, we formulate an optimization problem for task allocation to minimize the total latency for all tasks. By solving this optimization problem, tasks can be allocated to the MEC servers and cloud server appropriately. In addition, we propose a heuristic algorithm to obtain the approximate optimal solution in a shorter time. This heuristic algorithm consists of four algorithms: a main algorithm and three additional algorithms. In this algorithm, tasks are divided into two groups, and task allocation is executed for each group. We compare the performance of our proposed heuristic algorithm with the solution obtained by three other methods and investigate the effectiveness of our algorithm. Numerical examples are used to demonstrate the effectiveness of our proposed heuristic algorithm. From some results, we observe that our proposed heuristic algorithm can perform task allocation in a short time and can effectively reduce the total latency in a short time. We conclude that our proposed heuristic algorithm is effective for task allocation in a MEC platform with multiple types of MEC servers.}, } @article {pmid35808234, year = {2022}, author = {Chen, X and Liu, G}, title = {Federated Deep Reinforcement Learning-Based Task Offloading and Resource Allocation for Smart Cities in a Mobile Edge Network.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808234}, issn = {1424-8220}, abstract = {Mobile edge computing (MEC) has become an indispensable part of the era of the intelligent manufacturing industry 4.0. In the smart city, computation-intensive tasks can be offloaded to the MEC server or the central cloud server for execution. However, the privacy disclosure issue may arise when the raw data is migrated to other MEC servers or the central cloud server. Since federated learning has the characteristics of protecting the privacy and improving training performance, it is introduced to solve the issue. In this article, we formulate the joint optimization problem of task offloading and resource allocation to minimize the energy consumption of all Internet of Things (IoT) devices subject to delay threshold and limited resources. A two-timescale federated deep reinforcement learning algorithm based on Deep Deterministic Policy Gradient (DDPG) framework (FL-DDPG) is proposed. Simulation results show that the proposed algorithm can greatly reduce the energy consumption of all IoT devices.}, } @article {pmid35808224, year = {2022}, author = {Khanh, TT and Hai, TH and Hossain, MD and Huh, EN}, title = {Fuzzy-Assisted Mobile Edge Orchestrator and SARSA Learning for Flexible Offloading in Heterogeneous IoT Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808224}, issn = {1424-8220}, mesh = {Algorithms ; Fuzzy Logic ; *Internet of Things ; Learning ; Reward ; }, abstract = {In the era of heterogeneous 5G networks, Internet of Things (IoT) devices have significantly altered our daily life by providing innovative applications and services. However, these devices process large amounts of data traffic and their application requires an extremely fast response time and a massive amount of computational resources, leading to a high failure rate for task offloading and considerable latency due to congestion. To improve the quality of services (QoS) and performance due to the dynamic flow of requests from devices, numerous task offloading strategies in the area of multi-access edge computing (MEC) have been proposed in previous studies. Nevertheless, the neighboring edge servers, where computational resources are in excess, have not been considered, leading to unbalanced loads among edge servers in the same network tier. Therefore, in this paper, we propose a collaboration algorithm between a fuzzy-logic-based mobile edge orchestrator (MEO) and state-action-reward-state-action (SARSA) reinforcement learning, which we call the Fu-SARSA algorithm. We aim to minimize the failure rate and service time of tasks and decide on the optimal resource allocation for offloading, such as a local edge server, cloud server, or the best neighboring edge server in the MEC network. Four typical application types, healthcare, AR, infotainment, and compute-intensive applications, were used for the simulation. The performance results demonstrate that our proposed Fu-SARSA framework outperformed other algorithms in terms of service time and the task failure rate, especially when the system was overloaded.}, } @article {pmid35808184, year = {2022}, author = {Aldhyani, THH and Alkahtani, H}, title = {Artificial Intelligence Algorithm-Based Economic Denial of Sustainability Attack Detection Systems: Cloud Computing Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808184}, issn = {1424-8220}, mesh = {Algorithms ; *Artificial Intelligence ; *Cloud Computing ; Neural Networks, Computer ; Support Vector Machine ; }, abstract = {Cloud computing is currently the most cost-effective means of providing commercial and consumer IT services online. However, it is prone to new flaws. An economic denial of sustainability attack (EDoS) specifically leverages the pay-per-use paradigm in building up resource demands over time, culminating in unanticipated usage charges to the cloud customer. We present an effective approach to mitigating EDoS attacks in cloud computing. To mitigate such distributed attacks, methods for detecting them on different cloud computing smart grids have been suggested. These include hard-threshold, machine, and deep learning, support vector machine (SVM), K-nearest neighbors (KNN), random forest (RF) tree algorithms, namely convolutional neural network (CNN), and long short-term memory (LSTM). These algorithms have greater accuracies and lower false alarm rates and are essential for improving the cloud computing service provider security system. The dataset of nine injection attacks for testing machine and deep learning algorithms was obtained from the Cyber Range Lab at the University of New South Wales (UNSW), Canberra. The experiments were conducted in two categories: binary classification, which included normal and attack datasets, and multi-classification, which included nine classes of attack data. The results of the proposed algorithms showed that the RF approach achieved accuracy of 98% with binary classification, whereas the SVM model achieved accuracy of 97.54% with multi-classification. Moreover, statistical analyses, such as mean square error (MSE), Pearson correlation coefficient (R), and the root mean square error (RMSE), were applied in evaluating the prediction errors between the input data and the prediction values from different machine and deep learning algorithms. The RF tree algorithm achieved a very low prediction level (MSE = 0.01465) and a correlation R[2] (R squared) level of 92.02% with the binary classification dataset, whereas the algorithm attained an R[2] level of 89.35% with a multi-classification dataset. The findings of the proposed system were compared with different existing EDoS attack detection systems. The proposed attack mitigation algorithms, which were developed based on artificial intelligence, outperformed the few existing systems. The goal of this research is to enable the detection and effective mitigation of EDoS attacks.}, } @article {pmid35801559, year = {2022}, author = {Mueen, A and Awedh, M and Zafar, B}, title = {Multi-obstacle aware smart navigation system for visually impaired people in fog connected IoT-cloud environment.}, journal = {Health informatics journal}, volume = {28}, number = {3}, pages = {14604582221112609}, doi = {10.1177/14604582221112609}, pmid = {35801559}, issn = {1741-2811}, mesh = {Algorithms ; Humans ; *Visually Impaired Persons ; }, abstract = {Design of smart navigation for visually impaired/blind people is a hindering task. Existing researchers analyzed it in either indoor or outdoor environment and also it's failed to focus on optimum route selection, latency minimization and multi-obstacle presence. In order to overcome these challenges and to provide precise assistance to visually impaired people, this paper proposes smart navigation system for visually impaired people based on both image and sensor outputs of the smart wearable. The proposed approach involves the upcoming processes: (i) the input query of the visually impaired people (users) is improved by the query processor in order to achieve accurate assistance. (ii) The safest route from source to destination is provided by implementing Environment aware Bald Eagle Search Optimization algorithm in which multiple routes are identified and classified into three different classes from which the safest route is suggested to the users. (iii) The concept of fog computing is leveraged and the optimal fog node is selected in order to minimize the latency. The fog node selection is executed by using Nearest Grey Absolute Decision Making Algorithm based on multiple parameters. (iv) The retrieval of relevant information is performed by means of computing Euclidean distance between the reference and database information. (v) The multi-obstacle detection is carried out by YOLOv3 Tiny in which both the static and dynamic obstacles are classified into small, medium and large obstacles. (vi) The decision upon navigation is provided by implementing Adaptive Asynchronous Advantage Actor-Critic (A3C) algorithm based on fusion of both image and sensor outputs. (vii) Management of heterogeneous is carried out by predicting and pruning the fault data in the sensor output by minimum distance based extended kalman filter for better accuracy and clustering the similar information by implementing Spatial-Temporal Optics Clustering Algorithm to reduce complexity. The proposed model is implemented in NS 3.26 and the results proved that it outperforms other existing works in terms of obstacle detection and task completion time.}, } @article {pmid35800683, year = {2022}, author = {Chen, T}, title = {Deep Learning-Based Optimization Algorithm for Enterprise Personnel Identity Authentication.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9662817}, pmid = {35800683}, issn = {1687-5273}, mesh = {Algorithms ; *Deep Learning ; Humans ; }, abstract = {Enterprise strategic management is not only an important part of enterprise work, but also an important factor to deepen the reform of management system and promote the centralized and unified management of enterprises. Enterprise strategic management is to study the major problems of survival and development of enterprises in the competitive environment from the overall and long-term point of view. It is the most important function of senior leaders of modern enterprises. Starting from the characteristics of the recognition object, this paper analyzes the individual differences of biometrics through intelligent face image recognition technology to identify biometrics, which can be used to identify different individuals. This paper studies the main problems of personnel identity authentication in the current enterprise strategic management system. Based on identity management and supported by face image recognition technology, deep learning, and cloud computing technology, the personnel management model of the management system is constructed, which solves the problems of personnel real identity authentication and personnel safety behavior control. Experiments show that the model can simplify the workflow, improve the operation efficiency, and reduce the management cost. From the perspective of enterprise system development, building a scientific enterprise strategic management system is of great significance to improve the scientific level of enterprise system management.}, } @article {pmid35799648, year = {2022}, author = {Ehsan, A and Haider, KZ and Faisal, S and Zahid, FM and Wangari, IM}, title = {Self-Adaptation Resource Allocation for Continuous Offloading Tasks in Pervasive Computing.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {8040487}, pmid = {35799648}, issn = {1748-6718}, mesh = {Algorithms ; *Artificial Intelligence ; Cloud Computing ; Humans ; *Mobile Applications ; Resource Allocation ; }, abstract = {Advancement in technology has led to an increase in data. Consequently, techniques such as deep learning and artificial intelligence which are used in deciphering data are increasingly becoming popular. Further, advancement in technology does increase user expectations on devices, including consumer interfaces such as mobile apps, virtual environments, or popular software systems. As a result, power from the battery is consumed fast as it is used in providing high definition display as well as in charging the sensors of the devices. Low latency requires more power consumption in certain conditions. Cloud computing improves the computational difficulties of smart devices with offloading. By optimizing the device's parameters to make it easier to find optimal decisions for offloading tasks, using a metaheuristic algorithm to transfer the data or offload the task, cloud computing makes it easier. In cloud servers, we offload the tasks and limit their resources by simulating them in a virtual environment. Then we check resource parameters and compare them using metaheuristic algorithms. When comparing the default algorithm FCFS to ACO or PSO, we find that PSO has less battery or makespan time compared to FCFS or ACO. The energy consumption of devices is reduced if their resources are offloaded, so we compare the results of metaheuristic algorithms to find less battery usage or makespan time, resulting in the PSO increasing battery life or making the system more efficient.}, } @article {pmid35795755, year = {2022}, author = {Li, J and Guo, B and Liu, K and Zhou, J}, title = {Low Power Scheduling Approach for Heterogeneous System Based on Heuristic and Greedy Method.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9598933}, pmid = {35795755}, issn = {1687-5273}, abstract = {Big data, cloud computing, and artificial intelligence technologies supported by heterogeneous systems are constantly changing our life and cognition of the world. At the same time, its energy consumption affects the operation cost and system reliability, and this attracts the attention of architecture designers and researchers. In order to solve the problem of energy in heterogeneous system environment, inspired by the results of 0-1 programming, a scheduling method of heuristic and greedy energy saving (HGES) approach is proposed to allocate tasks reasonably to achieve the purpose of energy saving. Firstly, all tasks are assigned to each GPU in the system, and then the tasks are divided into high-value tasks and low-value tasks by the calculated average time value and variance value of all tasks. By using the greedy method, the high-value tasks are assigned first, and then the low-value tasks are allocated. In order to verify the effectiveness and rationality of HGES, different tasks with different inputs and different comparison methods are designed and tested. The experimental results on different platforms show that the HGES has better energy saving than that of existing method and can get result faster than that of the 0-1 programming.}, } @article {pmid35795749, year = {2022}, author = {Zhang, H and Zuo, F}, title = {Construction of Digital Teaching Resources of British and American Literature Using Few-Shot Learning and Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4526128}, pmid = {35795749}, issn = {1687-5273}, mesh = {*Cloud Computing ; Humans ; *Learning ; Reproducibility of Results ; United States ; }, abstract = {British and American literature is a compulsory course for English majors in Chinese colleges and universities. It plays an important role in cultivating students' aesthetic consciousness and moral cultivation, improving students' humanistic quality and cultural taste, and shaping students' complete personalities. With the rapid development of cloud technology and mobile Internet technology, mobile learning based on mobile devices will become an important direction of mobile Internet technology applications. Based on cloud computing, this paper studies the construction of digital teaching resources of the British and American literature. Through the experiment on the learning simplicity of literature courses for English majors, it is found that during the learning period of 40 people, the average proportion of the most difficult is 16.3%, the average proportion of the second difficult is 35.2%, and the average proportion of the easier is 18.5%. Compared with the next difficulty, the proportion of difficulty is the highest, followed by the easy and finally the most difficult. As one of the core technologies of cloud computing, data split storage technology adopts measures such as isomorphism and interchangeability of computing nodes, redundant storage, and multicopy fault tolerance to ensure the high security and reliability of user data and users do not have to worry about data loss and virus invasion. As a new generation of technical means, cloud computing can realize the unified management and scheduling of distributed and heterogeneous resources and provide a new development direction for promoting the coconstruction and sharing of the British and American literature digital teaching platforms in higher vocational colleges and truly realizing national learning and lifelong learning.}, } @article {pmid35792609, year = {2022}, author = {Gause, G and Mokgaola, IO and Rakhudu, MA}, title = {Technology usage for teaching and learning in nursing education: An integrative review.}, journal = {Curationis}, volume = {45}, number = {1}, pages = {e1-e9}, pmid = {35792609}, issn = {2223-6279}, mesh = {*COVID-19 ; *Education, Nursing ; Humans ; Learning ; Technology ; }, abstract = {BACKGROUND: The increasing availability of technology devices or portable digital assistant devices continues to change the teaching-learning landscape, including technology-supported learning. Portable digital assistants and technology usage have become an integral part of teaching and learning nowadays. Cloud computing, which includes YouTube, Google Apps, Dropbox and Twitter, has become the reality of today's teaching and learning and has noticeably improved higher education, including nursing education.

OBJECTIVES:  The aim of this integrative literature review was to explore and describe technology usage for teaching and learning in nursing education.

METHOD:  A five-step integrative review framework by Whittemore and Knafl was used to attain the objective of this study. The authors searched for both empirical and non-empirical articles from EBSCOhost (health information source and health science), ScienceDirect and African Journals Online Library databases to establish what is already known about the keywords. Key terms included in literature search were coronavirus disease 2019 (COVID-19), digital learning, online learning, nursing, teaching and learning, and technology use.

RESULTS:  Nineteen articles were selected for analysis. The themes that emerged from this review were (1) technology use in nursing education, (2) the manner in which technology is used in nursing education, (3) antecedents for technology use in nursing education, (4) advantages of technology use in nursing education, (5) disadvantages of technology use in nursing education and (6) technology use in nursing education amidst COVID-19.

CONCLUSION:  Technology in nursing education is used in both clinical and classroom teaching to complement learning. However, there is still a gap in its acceptance despite its upward trend.Contribution: The findings of this study contribute to the body of knowledge on the phenomenon of technology use for teaching and learning in nursing education.}, } @article {pmid35782725, year = {2022}, author = {Wang, X and Wang, C and Li, L and Ma, Q and Ma, A and Liu, B}, title = {DESSO-DB: A web database for sequence and shape motif analyses and identification.}, journal = {Computational and structural biotechnology journal}, volume = {20}, number = {}, pages = {3053-3058}, pmid = {35782725}, issn = {2001-0370}, abstract = {Cis-regulatory motif (motif for short) identification and analyses are essential steps in detecting gene regulatory mechanisms. Deep learning (DL) models have shown substantial advances in motif prediction. In parallel, intuitive and integrative web databases are needed to make effective use of DL models and ensure easy access to the identified motifs. Here, we present DESSO-DB, a web database developed to allow efficient access to the identified motifs and diverse motif analyses. DESSO-DB provides motif prediction results and visualizations of 690 ENCODE human Chromatin Immunoprecipitation sequencing (ChIP-seq) data (including 161 transcription factors (TFs) in 91 cell lines) and 1,677 human ChIP-seq data (including 547 TFs in 359 cell lines) from Cistrome DB using DESSO, which is an in-house developed DL tool for motif prediction. It also provides online motif finding and scanning functions for new ChIP-seq/ATAC-seq datasets and downloadable motif results of the above 690 DECODE datasets, 126 cancer ChIP-seq, 55 RNA Crosslinking-Immunoprecipitation and high-throughput sequencing (CLIP-seq) data. DESSO-DB is deployed on the Google Cloud Platform, providing stabilized and efficient resources freely to the public. DESSO-DB is free and available at http://cloud.osubmi.com/DESSO/.}, } @article {pmid35773889, year = {2022}, author = {Kiourtis, A and Karamolegkos, P and Karabetian, A and Voulgaris, K and Poulakis, Y and Mavrogiorgou, A and Kyriazis, D}, title = {An Autoscaling Platform Supporting Graph Data Modelling Big Data Analytics.}, journal = {Studies in health technology and informatics}, volume = {295}, number = {}, pages = {376-379}, doi = {10.3233/SHTI220743}, pmid = {35773889}, issn = {1879-8365}, mesh = {Big Data ; *COVID-19 ; Data Science ; Delivery of Health Care ; *Diastema ; Humans ; }, abstract = {Big Data has proved to be vast and complex, without being efficiently manageable through traditional architectures, whereas data analysis is considered crucial for both technical and non-technical stakeholders. Current analytics platforms are siloed for specific domains, whereas the requirements to enhance their use and lower their technicalities are continuously increasing. This paper describes a domain-agnostic single access autoscaling Big Data analytics platform, namely Diastema, as a collection of efficient and scalable components, offering user-friendly analytics through graph data modelling, supporting technical and non-technical stakeholders. Diastema's applicability is evaluated in healthcare through a predicting classifier for a COVID19 dataset, considering real-world constraints.}, } @article {pmid35759991, year = {2022}, author = {Wu, Z and Xuan, S and Xie, J and Lin, C and Lu, C}, title = {How to ensure the confidentiality of electronic medical records on the cloud: A technical perspective.}, journal = {Computers in biology and medicine}, volume = {147}, number = {}, pages = {105726}, doi = {10.1016/j.compbiomed.2022.105726}, pmid = {35759991}, issn = {1879-0534}, mesh = {Computer Security ; *Confidentiality ; *Electronic Health Records ; Humans ; }, abstract = {From a technical perspective, for electronic medical records (EMR), this paper proposes an effective confidential management solution on the cloud, whose basic idea is to deploy a trusted local server between the untrusted cloud and each trusted client of a medical information management system, responsible for running an EMR cloud hierarchical storage model and an EMR cloud segmentation query model. (1) The EMR cloud hierarchical storage model is responsible for storing light EMR data items (such as patient basic information) on the local server, while encrypting heavy EMR data items (such as patient medical images) and storing them on the cloud, to ensure the confidentiality of electronic medical records on the cloud. (2) The EMR cloud segmentation query model performs EMR related query operations through the collaborative interaction between the local server and the cloud server, to ensure the accuracy and efficiency of each EMR query statement. Finally, both theoretical analysis and experimental evaluation demonstrate the effectiveness of the proposed solution for confidentiality management of electronic medical records on the cloud, i.e., which can ensure the confidentiality of electronic medical records on the untrusted cloud, without compromising the availability of an existing medical information management system.}, } @article {pmid35756852, year = {2022}, author = {Puneet, and Kumar, R and Gupta, M}, title = {Optical coherence tomography image based eye disease detection using deep convolutional neural network.}, journal = {Health information science and systems}, volume = {10}, number = {1}, pages = {13}, pmid = {35756852}, issn = {2047-2501}, abstract = {Over the past few decades, health care industries and medical practitioners faced a lot of obstacles to diagnosing medical-related problems due to inadequate technology and availability of equipment. In the present era, computer science technologies such as IoT, Cloud Computing, Artificial Intelligence and its allied techniques, etc. play a crucial role in the identification of medical diseases, especially in the domain of Ophthalmology. Despite this, ophthalmologists have to perform the various disease diagnosis task manually which is time-consuming and the chances of error are also very high because some of the abnormalities of eye diseases possess the same symptoms. Furthermore, multiple autonomous systems also exist to categorize the diseases but their prediction rate does not accomplish state-of-art accuracy. In the proposed approach by implementing the concept of Attention, Transfer Learning with the Deep Convolution Neural Network, the model accomplished an accuracy of 97.79% and 95.6% on the training and testing data respectively. This autonomous model efficiently classifies the various oscular disorders namely Choroidal Neovascularization, Diabetic Macular Edema, Drusen from the Optical Coherence Tomography images. It may provide a realistic solution to the healthcare sector to bring down the ophthalmologist burden in the screening of Diabetic Retinopathy.}, } @article {pmid35756406, year = {2022}, author = {Zhang, H and Li, M}, title = {Integrated Design and Development of Intelligent Scenic Area Rural Tourism Information Service Based on Hybrid Cloud.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {5316304}, pmid = {35756406}, issn = {1748-6718}, mesh = {Humans ; Information Services ; *Tourism ; *Travel ; }, abstract = {Although the "Internet+" technologies (big data and cloud computing) have been implemented in many industries, each industry involved in rural tourism economic information services has its own database, and there are still vast economic information resources that have not been exploited. Z travel agency through rural tourism enterprise third-party information services and mobile context-awareness-based Z travel has achieved good economic and social benefits by deep value mining and innovative application of the existing data of the enterprise through the third-party information service of rural tourism enterprises and mobile context-aware travel recommendation service. It clearly demonstrates that, in order to maximise the benefits of economic data, rural tourist businesses should focus not only on the application of new technologies and methodologies but also on the core of demand and data-driven and thoroughly investigate the potential value of current data. This paper mainly analyzes the problems related to how rural tourism can be upgraded under the smart tourism platform, with the aim of improving the development of China's rural tourism industry with the help of an integrated smart tourism platform, and proposes a hybrid cloud-based integrated system of smart scenic rural tourism information services, which can meet the actual use needs of rural tourism, with good shared service effect and platform application performance, and promote the development of rural tourism and resource utilization rate.}, } @article {pmid35755764, year = {2022}, author = {Hu, Q}, title = {Optimization of Online Course Platform for Piano Preschool Education Based on Internet Cloud Computing System.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6525866}, pmid = {35755764}, issn = {1687-5273}, mesh = {*Cloud Computing ; Humans ; Internet ; *Learning ; Students ; }, abstract = {This article focuses on introducing online piano teaching methods and has developed and implemented a preschool piano education online course platform. The system consists of four parts: backend, WeChat, client, and web page. Backend development uses PHP language and Laravel system framework, WeChat and web development both use JavaScript language and React framework, client development uses Objective-C language, and the system provides internal support for RESTful API, mainly for client, WeChat, and web. The client relies on the existing voice sensors of the research group to recognize and evaluate the performance of the students. The role of the client is to show the students their homework and demonstrate the activities performed by the teacher. The function of the WeChat terminal is to manage student work, user information, and user social interaction functions. The function of the web page is the score management and data analysis functions. Based on the knowledge of network course design, this article studies the design of piano preschool education platform and adds relevant components of the Internet cloud computer system and voice sensor to this platform, which provides great convenience for students to learn piano.}, } @article {pmid35755732, year = {2022}, author = {Liu, B and Zhang, T and Hu, W}, title = {Intelligent Traffic Flow Prediction and Analysis Based on Internet of Things and Big Data.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6420799}, pmid = {35755732}, issn = {1687-5273}, mesh = {Automobiles ; *Big Data ; Cities ; Humans ; *Internet of Things ; Transportation ; }, abstract = {Nowadays, the problem of road traffic safety cannot be ignored. Almost all major cities have problems such as poor traffic environment and low road efficiency. Large-scale and long-term traffic congestion occurs almost every day. Transportation has developed rapidly, and more and more advanced means of transportation have emerged. However, automobile is one of the main means of transportation for people to travel. In the world, there are serious traffic jams in almost all cities. The excessive traffic flow every day leads to the paralysis of the urban transportation system, which brings great inconvenience and impact to people's travel. Various countries have also actively taken corresponding measures, i.e., traffic diversion, number restriction, or expanding the scale of the road network, but these measures can bring little effect. Traditional intelligent traffic flow forecasting has some problems, such as low accuracy and delay. Aiming at this problem, this paper uses the model of the combination of Internet of Things and big data to apply and analyze its social benefits in intelligent traffic flow forecasting and analyzes its three-tier network architecture model, namely, perception layer, network layer, and application layer. Research and analyze the mode of combining cloud computing and edge computing. From the multiperspective linear discriminant analysis algorithm of the combination method of combining the same points and differences between data and data into multiple atomic services, intelligent traffic flow prediction based on the combination of Internet of Things and big data is performed. Through the monitoring and extraction of relevant traffic flow data, data analysis, processing and storage, and visual display, improve the accuracy and effectiveness and make it easier to improve the prediction accuracy of overall traffic flow. The traffic flow prediction of the system of Internet of Things and big data is given through the case experiment. The method proposed in this paper can be applied in intelligent transportation services and can predict the stability of transportation and traffic flow in real time so as to optimize traffic congestion, reduce manual intervention, and achieve the goal of intelligent traffic management.}, } @article {pmid35755635, year = {2022}, author = {Sladky, V and Nejedly, P and Mivalt, F and Brinkmann, BH and Kim, I and St Louis, EK and Gregg, NM and Lundstrom, BN and Crowe, CM and Attia, TP and Crepeau, D and Balzekas, I and Marks, VS and Wheeler, LP and Cimbalnik, J and Cook, M and Janca, R and Sturges, BK and Leyde, K and Miller, KJ and Van Gompel, JJ and Denison, T and Worrell, GA and Kremen, V}, title = {Distributed brain co-processor for tracking spikes, seizures and behaviour during electrical brain stimulation.}, journal = {Brain communications}, volume = {4}, number = {3}, pages = {fcac115}, pmid = {35755635}, issn = {2632-1297}, abstract = {Early implantable epilepsy therapy devices provided open-loop electrical stimulation without brain sensing, computing, or an interface for synchronized behavioural inputs from patients. Recent epilepsy stimulation devices provide brain sensing but have not yet developed analytics for accurately tracking and quantifying behaviour and seizures. Here we describe a distributed brain co-processor providing an intuitive bi-directional interface between patient, implanted neural stimulation and sensing device, and local and distributed computing resources. Automated analysis of continuous streaming electrophysiology is synchronized with patient reports using a handheld device and integrated with distributed cloud computing resources for quantifying seizures, interictal epileptiform spikes and patient symptoms during therapeutic electrical brain stimulation. The classification algorithms for interictal epileptiform spikes and seizures were developed and parameterized using long-term ambulatory data from nine humans and eight canines with epilepsy, and then implemented prospectively in out-of-sample testing in two pet canines and four humans with drug-resistant epilepsy living in their natural environments. Accurate seizure diaries are needed as the primary clinical outcome measure of epilepsy therapy and to guide brain-stimulation optimization. The brain co-processor system described here enables tracking interictal epileptiform spikes, seizures and correlation with patient behavioural reports. In the future, correlation of spikes and seizures with behaviour will allow more detailed investigation of the clinical impact of spikes and seizures on patients.}, } @article {pmid35751030, year = {2022}, author = {Shaukat, Z and Farooq, QUA and Tu, S and Xiao, C and Ali, S}, title = {A state-of-the-art technique to perform cloud-based semantic segmentation using deep learning 3D U-Net architecture.}, journal = {BMC bioinformatics}, volume = {23}, number = {1}, pages = {251}, pmid = {35751030}, issn = {1471-2105}, mesh = {*Brain Neoplasms/diagnostic imaging/pathology ; Cloud Computing ; *Deep Learning ; *Glioma ; Humans ; Image Processing, Computer-Assisted/methods ; Magnetic Resonance Imaging/methods ; Semantics ; }, abstract = {Glioma is the most aggressive and dangerous primary brain tumor with a survival time of less than 14 months. Segmentation of tumors is a necessary task in the image processing of the gliomas and is important for its timely diagnosis and starting a treatment. Using 3D U-net architecture to perform semantic segmentation on brain tumor dataset is at the core of deep learning. In this paper, we present a unique cloud-based 3D U-Net method to perform brain tumor segmentation using BRATS dataset. The system was effectively trained by using Adam optimization solver by utilizing multiple hyper parameters. We got an average dice score of 95% which makes our method the first cloud-based method to achieve maximum accuracy. The dice score is calculated by using Sørensen-Dice similarity coefficient. We also performed an extensive literature review of the brain tumor segmentation methods implemented in the last five years to get a state-of-the-art picture of well-known methodologies with a higher dice score. In comparison to the already implemented architectures, our method ranks on top in terms of accuracy in using a cloud-based 3D U-Net framework for glioma segmentation.}, } @article {pmid35747132, year = {2022}, author = {Li, W and Guo, Y}, title = {A Secure Private Cloud Storage Platform for English Education Resources Based on IoT Technology.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {8453470}, pmid = {35747132}, issn = {1748-6718}, mesh = {*Big Data ; *Cloud Computing ; Humans ; Technology ; }, abstract = {The contemporary ubiquitous "cloud" network knowledge and information resources, as well as ecological pedagogy theory, have enlarged teaching research's perspective, widened teaching research's innovation area, and created practical options for English classroom reform. Cloud education relies on the Internet of Things, cloud computing, and big data to have a huge impact on the English learning process. The key to the integration of English education resources is the storage of huge amount of English teaching data. Applying the technology and methods of cloud storage to the construction of English education resource integration can effectively save the educational resources of schools, improve the utilization rate of English education resources, and thus enhance the teaching level of English subjects. In this work, we examine the existing state of English education resource building and teaching administration and offer a way for creating a "private cloud" of English education materials. We not only examined the architecture and three-layer modules of cloud computing in depth, but we also analyzed the "private cloud" technology and built the cloud structure of English teaching materials on this foundation. We hope that this paper can help and inspire us to solve the problems of uneven distribution, irregular management, and difficult sharing in the construction of English education resources.}, } @article {pmid35746414, year = {2022}, author = {Ud Din, MM and Alshammari, N and Alanazi, SA and Ahmad, F and Naseem, S and Khan, MS and Haider, HSI}, title = {InteliRank: A Four-Pronged Agent for the Intelligent Ranking of Cloud Services Based on End-Users' Feedback.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {12}, pages = {}, pmid = {35746414}, issn = {1424-8220}, mesh = {*Cloud Computing ; Data Collection ; Feedback ; Reproducibility of Results ; *Software ; }, abstract = {Cloud Computing (CC) provides a combination of technologies that allows the user to use the most resources in the least amount of time and with the least amount of money. CC semantics play a critical role in ranking heterogeneous data by using the properties of different cloud services and then achieving the optimal cloud service. Regardless of the efforts made to enable simple access to this CC innovation, in the presence of various organizations delivering comparative services at varying cost and execution levels, it is far more difficult to identify the ideal cloud service based on the user's requirements. In this research, we propose a Cloud-Services-Ranking Agent (CSRA) for analyzing cloud services using end-users' feedback, including Platform as a Service (PaaS), Infrastructure as a Service (IaaS), and Software as a Service (SaaS), based on ontology mapping and selecting the optimal service. The proposed CSRA possesses Machine-Learning (ML) techniques for ranking cloud services using parameters such as availability, security, reliability, and cost. Here, the Quality of Web Service (QWS) dataset is used, which has seven major cloud services categories, ranked from 0-6, to extract the required persuasive features through Sequential Minimal Optimization Regression (SMOreg). The classification outcomes through SMOreg are capable and demonstrate a general accuracy of around 98.71% in identifying optimum cloud services through the identified parameters. The main advantage of SMOreg is that the amount of memory required for SMO is linear. The findings show that our improved model in terms of precision outperforms prevailing techniques such as Multilayer Perceptron (MLP) and Linear Regression (LR).}, } @article {pmid35746245, year = {2022}, author = {Liu, X and Jin, J and Dong, F}, title = {Edge-Computing-Based Intelligent IoT: Architectures, Algorithms and Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {12}, pages = {}, pmid = {35746245}, issn = {1424-8220}, abstract = {With the rapid growth of the Internet of Things (IoT), 5G networks and beyond, the computing paradigm for intelligent IoT systems is shifting from conventional centralized-cloud computing to distributed edge computing [...].}, } @article {pmid35746169, year = {2022}, author = {Dezfouli, B and Liu, Y}, title = {Editorial: Special Issue "Edge and Fog Computing for Internet of Things Systems".}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {12}, pages = {}, pmid = {35746169}, issn = {1424-8220}, abstract = {Employing edge and fog computing for building IoT systems is essential, especially because of the massive number of data generated by sensing devices, the delay requirements of IoT applications, the high burden of data processing on cloud platforms, and the need to take immediate actions against security threats.&nbsp.}, } @article {pmid35746127, year = {2022}, author = {Lakhan, A and Morten Groenli, T and Majumdar, A and Khuwuthyakorn, P and Hussain Khoso, F and Thinnukool, O}, title = {Potent Blockchain-Enabled Socket RPC Internet of Healthcare Things (IoHT) Framework for Medical Enterprises.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {12}, pages = {}, pmid = {35746127}, issn = {1424-8220}, mesh = {*Blockchain ; Cloud Computing ; Computer Security ; Delivery of Health Care ; Humans ; Internet ; *Internet of Things ; }, abstract = {Present-day intelligent healthcare applications offer digital healthcare services to users in a distributed manner. The Internet of Healthcare Things (IoHT) is the mechanism of the Internet of Things (IoT) found in different healthcare applications, with devices that are attached to external fog cloud networks. Using different mobile applications connecting to cloud computing, the applications of the IoHT are remote healthcare monitoring systems, high blood pressure monitoring, online medical counseling, and others. These applications are designed based on a client-server architecture based on various standards such as the common object request broker (CORBA), a service-oriented architecture (SOA), remote method invocation (RMI), and others. However, these applications do not directly support the many healthcare nodes and blockchain technology in the current standard. Thus, this study devises a potent blockchain-enabled socket RPC IoHT framework for medical enterprises (e.g., healthcare applications). The goal is to minimize service costs, blockchain security costs, and data storage costs in distributed mobile cloud networks. Simulation results show that the proposed blockchain-enabled socket RPC minimized the service cost by 40%, the blockchain cost by 49%, and the storage cost by 23% for healthcare applications.}, } @article {pmid35745356, year = {2022}, author = {Liu, H and Zhang, R and Liu, Y and He, C}, title = {Unveiling Evolutionary Path of Nanogenerator Technology: A Novel Method Based on Sentence-BERT.}, journal = {Nanomaterials (Basel, Switzerland)}, volume = {12}, number = {12}, pages = {}, pmid = {35745356}, issn = {2079-4991}, abstract = {In recent years, nanogenerator technology has developed rapidly with the rise of cloud computing, artificial intelligence, and other fields. Therefore, the quick identification of the evolutionary path of nanogenerator technology from a large amount of data attracts much attention. It is of great significance in grasping technical trends and analyzing technical areas of interest. However, there are some limitations in previous studies. On the one hand, previous research on technological evolution has generally utilized bibliometrics, patent analysis, and citations between patents and papers, ignoring the rich semantic information contained therein; on the other hand, its evolution analysis perspective is single, and it is difficult to obtain accurate results. Therefore, this paper proposes a new framework based on the methods of Sentence-BERT and phrase mining, using multi-source data, such as papers and patents, to unveil the evolutionary path of nanogenerator technology. Firstly, using text vectorization, clustering algorithms, and the phrase mining method, current technical themes of significant interest to researchers can be obtained. Next, this paper correlates the multi-source fusion themes through semantic similarity calculation and demonstrates the multi-dimensional technology evolutionary path by using the "theme river map". Finally, this paper presents an evolution analysis from the perspective of frontier research and technology research, so as to discover the development focus of nanogenerators and predict the future application prospects of nanogenerator technology.}, } @article {pmid35742161, year = {2022}, author = {Ashraf, E and Areed, NFF and Salem, H and Abdelhay, EH and Farouk, A}, title = {FIDChain: Federated Intrusion Detection System for Blockchain-Enabled IoT Healthcare Applications.}, journal = {Healthcare (Basel, Switzerland)}, volume = {10}, number = {6}, pages = {}, pmid = {35742161}, issn = {2227-9032}, abstract = {Recently, there has been considerable growth in the internet of things (IoT)-based healthcare applications; however, they suffer from a lack of intrusion detection systems (IDS). Leveraging recent technologies, such as machine learning (ML), edge computing, and blockchain, can provide suitable and strong security solutions for preserving the privacy of medical data. In this paper, FIDChain IDS is proposed using lightweight artificial neural networks (ANN) in a federated learning (FL) way to ensure healthcare data privacy preservation with the advances of blockchain technology that provides a distributed ledger for aggregating the local weights and then broadcasting the updated global weights after averaging, which prevents poisoning attacks and provides full transparency and immutability over the distributed system with negligible overhead. Applying the detection model at the edge protects the cloud if an attack happens, as it blocks the data from its gateway with smaller detection time and lesser computing and processing capacity as FL deals with smaller sets of data. The ANN and eXtreme Gradient Boosting (XGBoost) models were evaluated using the BoT-IoT dataset. The results show that ANN models have higher accuracy and better performance with the heterogeneity of data in IoT devices, such as intensive care unit (ICU) in healthcare systems. Testing the FIDChain with different datasets (CSE-CIC-IDS2018, Bot Net IoT, and KDD Cup 99) reveals that the BoT-IoT dataset has the most stable and accurate results for testing IoT applications, such as those used in healthcare systems.}, } @article {pmid35734349, year = {2022}, author = {Aldahwan, NS and Ramzan, MS}, title = {The Descriptive Data Analysis for the Adoption of Community Cloud in Saudi HEI-Based Factor Adoption.}, journal = {BioMed research international}, volume = {2022}, number = {}, pages = {7765204}, pmid = {35734349}, issn = {2314-6141}, mesh = {*Cloud Computing ; *Data Analysis ; Humans ; Reproducibility of Results ; Saudi Arabia ; Surveys and Questionnaires ; }, abstract = {Due to its increased reliability, adaptability, scalability, availability, and processing capacity, cloud computing is rapidly becoming a popular trend around the world. One of the major issues with cloud computing is making informed decision about adoption of community cloud (CC) computing (ACCC). To date, there are various technology acceptance theories and models to validate perspective of ACCC at both organizational and individual levels. However, no experimental studies have been carried out to provide a comprehensive assessment of the factors of ACCC, specifically in the area of the Saudi Higher Education (HEI) Institution. Thus, this research was aimed at exploring the factors of ACCC and the relationship to the experiences of the employees. The analysis of the employee context was driven by the success factors of technological, organizational, environmental, human, security, and advantage contexts on community cloud computing adoption in HEI. The data collection was a questionnaire-based survey based on 106 responses. We present findings based on descriptive analysis in identifying the significant component that contributed to the effective implementation of ACCC. Security concerns are a significant influencing element in the adoption of community cloud technology.}, } @article {pmid35730340, year = {2022}, author = {Cotur, Y and Olenik, S and Asfour, T and Bruyns-Haylett, M and Kasimatis, M and Tanriverdi, U and Gonzalez-Macia, L and Lee, HS and Kozlov, AS and Güder, F}, title = {Bioinspired Stretchable Transducer for Wearable Continuous Monitoring of Respiratory Patterns in Humans and Animals.}, journal = {Advanced materials (Deerfield Beach, Fla.)}, volume = {34}, number = {33}, pages = {e2203310}, doi = {10.1002/adma.202203310}, pmid = {35730340}, issn = {1521-4095}, support = {214234/Z/18/Z/WT_/Wellcome Trust/United Kingdom ; }, mesh = {Animals ; Artificial Intelligence ; Dogs ; Humans ; Monitoring, Physiologic ; Silicones ; Transducers ; *Wearable Electronic Devices ; }, abstract = {A bio-inspired continuous wearable respiration sensor modeled after the lateral line system of fish is reported which is used for detecting mechanical disturbances in the water. Despite the clinical importance of monitoring respiratory activity in humans and animals, continuous measurements of breathing patterns and rates are rarely performed in or outside of clinics. This is largely because conventional sensors are too inconvenient or expensive for wearable sensing for most individuals and animals. The bio-inspired air-silicone composite transducer (ASiT) is placed on the chest and measures respiratory activity by continuously measuring the force applied to an air channel embedded inside a silicone-based elastomeric material. The force applied on the surface of the transducer during breathing changes the air pressure inside the channel, which is measured using a commercial pressure sensor and mixed-signal wireless electronics. The transducer produced in this work are extensively characterized and tested with humans, dogs, and laboratory rats. The bio-inspired ASiT may enable the early detection of a range of disorders that result in altered patterns of respiration. The technology reported can also be combined with artificial intelligence and cloud computing to algorithmically detect illness in humans and animals remotely, reducing unnecessary visits to clinics.}, } @article {pmid35730064, year = {2022}, author = {Pillen, D and Eckard, M}, title = {The impact of the shift to cloud computing on digital recordkeeping practices at the University of Michigan Bentley historical library.}, journal = {Archival science}, volume = {}, number = {}, pages = {1-16}, pmid = {35730064}, issn = {1573-7500}, abstract = {Cloud-based productivity, collaboration, and storage tools offer increased opportunities for collaboration and potential cost-savings over locally hosted solutions and have seen widespread adoption throughout industry, government, and academia over the last decade. While these tools benefit organizations, IT departments, and day-to-day-users, they present unique challenges for records managers and archivists. As a review of the relevant literature demonstrates, issues surrounding cloud computing are not limited to the technology-although the implementation and technological issues are numerous-but also include organization management, human behavior, regulation, and records management, making the process of archiving digital information in this day and age all the more difficult. This paper explores some of the consequences of this shift and its effect on digital recordkeeping at the Bentley Historical Library, whose mission is to "collect the materials for the University of Michigan." After providing context for this problem by discussing relevant literature, two practicing archivists will explore the impact of the move toward cloud computing as well as various productivity software and collaboration tools in use at U-M throughout the various stages of a standard lifecycle model for managing records.}, } @article {pmid35730008, year = {2022}, author = {Mahanty, C and Kumar, R and Patro, SGK}, title = {Internet of Medical Things-Based COVID-19 Detection in CT Images Fused with Fuzzy Ensemble and Transfer Learning Models.}, journal = {New generation computing}, volume = {40}, number = {4}, pages = {1-17}, pmid = {35730008}, issn = {0288-3635}, abstract = {One of the most difficult research areas in today's healthcare industry to combat the coronavirus pandemic is accurate COVID-19 detection. Because of its low infection miss rate and high sensitivity, chest computed tomography (CT) imaging has been recommended as a viable technique for COVID-19 diagnosis in a number of recent clinical investigations. This article presents an Internet of Medical Things (IoMT)-based platform for improving and speeding up COVID-19 identification. Clinical devices are connected to network resources in the suggested IoMT platform using cloud computing. The method enables patients and healthcare experts to work together in real time to diagnose and treat COVID-19, potentially saving time and effort for both patients and physicians. In this paper, we introduce a technique for classifying chest CT scan images into COVID, pneumonia, and normal classes that use a Sugeno fuzzy integral ensemble across three transfer learning models, namely SqueezeNet, DenseNet-201, and MobileNetV2. The suggested fuzzy ensemble techniques outperform each individual transfer learning methodology as well as trainable ensemble strategies in terms of accuracy. The suggested MobileNetV2 fused with Sugeno fuzzy integral ensemble model has a 99.15% accuracy rate. In the present research, this framework was utilized to identify COVID-19, but it may also be implemented and used for medical imaging analyses of other disorders.}, } @article {pmid35730007, year = {2022}, author = {Gupta, A and Singh, A}, title = {An Intelligent Healthcare Cyber Physical Framework for Encephalitis Diagnosis Based on Information Fusion and Soft-Computing Techniques.}, journal = {New generation computing}, volume = {40}, number = {4}, pages = {1-31}, pmid = {35730007}, issn = {0288-3635}, abstract = {Viral encephalitis is a contagious disease that causes life insecurity and is considered one of the major health concerns worldwide. It causes inflammation of the brain and, if left untreated, can have persistent effects on the central nervous system. Conspicuously, this paper proposes an intelligent cyber-physical healthcare framework based on the IoT-fog-cloud collaborative network, employing soft-computing technology and information fusion. The proposed framework uses IoT-based sensors, electronic medical records, and user devices for data acquisition. The fog layer, composed of numerous nodes, processes the most specific encephalitis symptom-related data to classify possible encephalitis cases in real time to issue an alarm when a significant health emergency occurs. Furthermore, the cloud layer involves a multi-step data processing scheme for in-depth data analysis. First, data obtained across multiple data generation sources are fused to obtain a more consistent, accurate, and reliable feature set. Data preprocessing and feature selection techniques are applied to the fused data for dimensionality reduction over the cloud computing platform. An adaptive neuro-fuzzy inference system is applied in the cloud to determine the risk of a disease and classify the results into one of four categories: no risk, probable risk, low risk, and acute risk. Moreover, the alerts are generated and sent to the stakeholders based on the risk factor. Finally, the computed results are stored in the cloud database for future use. For validation purposes, various experiments are performed using real-time datasets. The analysis results performed on the fog and cloud layers show higher performance than the existing models. Future research will focus on the resource allocation in the cloud layer while considering various security aspects to improve the utility of the proposed work.}, } @article {pmid35729139, year = {2022}, author = {Yue, YF and Chen, GP and Wang, L and Yang, J and Yang, KT}, title = {[Dynamic monitoring and evaluation of ecological environment quality in Zhouqu County, Gansu, China based on Google Earth Engine cloud platform].}, journal = {Ying yong sheng tai xue bao = The journal of applied ecology}, volume = {33}, number = {6}, pages = {1608-1614}, doi = {10.13287/j.1001-9332.202206.036}, pmid = {35729139}, issn = {1001-9332}, mesh = {China ; Cloud Computing ; *Ecosystem ; Environmental Monitoring/methods ; *Remote Sensing Technology ; Rivers ; Search Engine ; }, abstract = {Zhouqu County is located in the transition region from the Qinghai-Tibet Plateau to the Qinba Mountains, and is an important part of the ecological barrier in the upper stream of the Yangtze River. In this study, we used the Google Earth Engine cloud processing platform to perform inter-image optimal reconstruction of Landsat surface reflectance images from 1998-2019. We calculated four indicators of regional wet, green, dry, and hot. The component indicators were coupled by principal component analysis to construct remote sensing ecological index (RSEI) and to analyze the spatial and temporal variations of ecological environment quality in Zhouqu County. The results showed that the contribution of the four component indicators to the eigenvalues of the coupled RSEI were above 70%, with even distribution of the loadings, indicating that the RSEI integrated most of the features of the component indicators. From 1998 to 2019, the RSEI of Zhouqu County ranged from 0.55 to 0.63, showing an increasing trend with a growth rate of 0.04·(10 a)[-1], and the area of better grade increased by 425.56 km[2]. The area with altitude ≤2200 m was dominated by medium and lower ecological environment quality grade, while the area of better ecological environment quality grade area increased by 16.5%. The ecological and environmental quality of the region from 2200 to 3300 m was dominated by good grades, increasing to 71.3% in 2019, with the area of medium and below ecological and environmental quality grades decreasing year by year. The area with altitude ≥3300 m was dominated by the medium ecological quality grade. The medium and below ecological quality grades showed a "U" shape trend during the study period. The trend of ecological environment quality in Zhouqu County was becoming better, but with fluctuations. It is necessary to continuously strengthen the protection and management of ecological environment in order to guarantee the continuous improvement of ecological environment quality.}, } @article {pmid35729113, year = {2022}, author = {Erdem, C and Mutsuddy, A and Bensman, EM and Dodd, WB and Saint-Antoine, MM and Bouhaddou, M and Blake, RC and Gross, SM and Heiser, LM and Feltus, FA and Birtwistle, MR}, title = {A scalable, open-source implementation of a large-scale mechanistic model for single cell proliferation and death signaling.}, journal = {Nature communications}, volume = {13}, number = {1}, pages = {3555}, pmid = {35729113}, issn = {2041-1723}, support = {U54 CA209988/CA/NCI NIH HHS/United States ; U54 HG008100/HG/NHGRI NIH HHS/United States ; U54 HG008098/HG/NHGRI NIH HHS/United States ; R01 GM104184/GM/NIGMS NIH HHS/United States ; R35 GM141891/GM/NIGMS NIH HHS/United States ; }, mesh = {Cell Proliferation ; *Cloud Computing ; Computer Simulation ; Signal Transduction ; *Software ; }, abstract = {Mechanistic models of how single cells respond to different perturbations can help integrate disparate big data sets or predict response to varied drug combinations. However, the construction and simulation of such models have proved challenging. Here, we developed a python-based model creation and simulation pipeline that converts a few structured text files into an SBML standard and is high-performance- and cloud-computing ready. We applied this pipeline to our large-scale, mechanistic pan-cancer signaling model (named SPARCED) and demonstrate it by adding an IFNγ pathway submodel. We then investigated whether a putative crosstalk mechanism could be consistent with experimental observations from the LINCS MCF10A Data Cube that IFNγ acts as an anti-proliferative factor. The analyses suggested this observation can be explained by IFNγ-induced SOCS1 sequestering activated EGF receptors. This work forms a foundational recipe for increased mechanistic model-based data integration on a single-cell level, an important building block for clinically-predictive mechanistic models.}, } @article {pmid35725904, year = {2022}, author = {Pradhan, C and Padhee, SK and Bharti, R and Dutta, S}, title = {A process-based recovery indicator for anthropogenically disturbed river system.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {10390}, pmid = {35725904}, issn = {2045-2322}, mesh = {Cross-Sectional Studies ; Environmental Monitoring ; *Floods ; India ; *Rivers ; Seasons ; }, abstract = {The present paper utilizes entropy theory and Google earth engine cloud computing technique to investigate system state and river recovery potential in two large sub-basins of the Mahanadi River, India. The cross-sectional intensity entropy (CIE) is computed for the post-monsoon season (October-March) along the selected reaches. Further, a normalized river recovery indicator (NRRI) is formulated to assess the temporal changes in river health. Finally, NRRI is related to a process-based variable-LFE (low flow exceedance) to comprehend the dominating system dynamics and evolutionary adjustments. The results highlight the existence of both threshold-modulated and filter-dominated systems based on CIE and NRRI variabilities. In addition, the gradual decline in CIE and subsequent stabilization of vegetated landforms can develop an 'event-driven' state, where floods exceeding the low-flow channel possess a direct impact on the river recovery trajectory. Finally, this study emphasizes the presence of instream vegetation as an additional degree of freedom, which further controls the hierarchy of energy dissipation and morphological continuum in the macrochannel settings.}, } @article {pmid35721670, year = {2022}, author = {Bamasag, O and Alsaeedi, A and Munshi, A and Alghazzawi, D and Alshehri, S and Jamjoom, A}, title = {Real-time DDoS flood attack monitoring and detection (RT-AMD) model for cloud computing.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e814}, pmid = {35721670}, issn = {2376-5992}, abstract = {In recent years, the advent of cloud computing has transformed the field of computing and information technology. It has been enabling customers to rent virtual resources and take advantage of various on-demand services with the lowest costs. Despite the advantages of cloud computing, it faces several threats; an example is a distributed denial of service (DDoS) attack, which is considered among the most serious. This article presents real-time monitoring and detection of DDoS attacks on the cloud using a machine learning approach. Naïve Bayes, K-nearest neighbor, decision tree, and random forest machine learning classifiers have been selected to build a predictive model named "Real-Time DDoS flood Attack Monitoring and Detection RT-AMD." The DDoS-2020 dataset was constructed with 70,020 records to evaluate RT-AMD's accuracy. The DDoS-2020 contains three protocols for network/transport-level, which are TCP, DNS, and ICMP. This article evaluates the proposed model by comparing its accuracy with related works. Our model has shown improvement in the results and reached real-time attack detection using incremental learning. The model achieved 99.38% accuracy for the random forest in real-time on the cloud environment and 99.39% on local testing. The RT-AMD was evaluated on the NSL-KDD dataset as well, in which it achieved 99.30% accuracy in real-time in a cloud environment.}, } @article {pmid35721407, year = {2022}, author = {Osmanoglu, M and Demir, S and Tugrul, B}, title = {Privacy-preserving k-NN interpolation over two encrypted databases.}, journal = {PeerJ. Computer science}, volume = {8}, number = {}, pages = {e965}, pmid = {35721407}, issn = {2376-5992}, abstract = {Cloud computing enables users to outsource their databases and the computing functionalities to a cloud service provider to avoid the cost of maintaining a private storage and computational requirements. It also provides universal access to data, applications, and services without location dependency. While cloud computing provides many benefits, it possesses a number of security and privacy concerns. Outsourcing data to a cloud service provider in encrypted form may help to overcome these concerns. However, dealing with the encrypted data makes it difficult for the cloud service providers to perform some operations over the data that will especially be required in query processing tasks. Among the techniques employed in query processing task, the k-nearest neighbor method draws attention due to its simplicity and efficiency, particularly on massive data sets. A number of k-nearest neighbor algorithms for query processing task on a single encrypted database have been proposed. However, the performance of k-nearest neighbor algorithms on a single database may create accuracy and reliability problems. It is a fact that collaboration among different cloud service providers yields more accurate and more reliable results in query processing. By considering this fact, we focus on the k-nearest neighbor (k-NN) problem over two encrypted databases. We introduce a secure two-party k-NN interpolation protocol that enables a query owner to extract the interpolation of the k-nearest neighbors of a query point from two different databases outsourced to two different cloud service providers. We also show that our protocol protects the confidentiality of the data and the query point, and hides data access patterns. Furthermore, we conducted a number of experiment to demonstrate the efficiency of our protocol. The results show that the running time of our protocol is linearly dependent on both the number of nearest neighbours and data size.}, } @article {pmid35720928, year = {2022}, author = {Yuan, G and Xie, F and Tan, H}, title = {Construction of Economic Security Early Warning System Based on Cloud Computing and Data Mining.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {2080840}, pmid = {35720928}, issn = {1687-5273}, mesh = {Algorithms ; *Cloud Computing ; Data Mining ; Forecasting ; *Models, Theoretical ; }, abstract = {Economic security is a core theoretical issue in economics. In modern economic conditions, the ups and downs caused by economic instability in any economic system will affect the stability of the financial market, bring huge losses to the economy, and affect the development of the whole national economy. Therefore, research on the regularity of economic security and economic fluctuations is one of the important contents to ensure economic stability and scientific development. Accurate monitoring and forecasting of economic security are an indispensable link in economic system regulation, and it is also an important reference factor for any economic organization to make decisions. This article focuses on the construction of an economic security early warning system as the main research content. It integrates cloud computing and data mining technologies and is supported by CNN-SVM algorithm and designs an early warning model that can adaptively evaluate and warn the economic security state. Experiments show that when the CNN network in the model uses ReLU activation function and SVM uses RBF function, the prediction accuracy can reach 0.98, and the prediction effect is the best. The data set is verified, and the output Q province's 2018 economic security early warning comprehensive index is 0.893. The 2019 economic security early warning index is 0.829, which is consistent with the actual situation.}, } @article {pmid35720893, year = {2022}, author = {Yin, X and He, J}, title = {Construction of Tourism E-Commerce Platform Based on Artificial Intelligence Algorithm.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {5558011}, pmid = {35720893}, issn = {1687-5273}, mesh = {*Artificial Intelligence ; Commerce ; Data Analysis ; Humans ; *Tourism ; Travel ; }, abstract = {In the late twentieth century, with the rapid development of the Internet, e-commerce has emerged rapidly, which has changed the way people travel around the world. The greatest advantages of e-commerce are the flow of information and data and the importance of traveling freely to experience the mind and body in different fields. Tourism is an important part of the development of e-commerce, but the development of e-commerce tourism lags behind. To solve the current situation of the backward development of tourism e-commerce, this article studies the construction of a tourism e-commerce platform based on an artificial intelligence algorithm. By introducing modern information technology, based on a cloud computing platform, big data analysis, K-means, and other key technologies, this article solves the current situation of the development of an e-commerce platform. It also analyzes the construction methods of traditional cloud platforms and modern cloud platforms through comparative analysis and solves the construction methods suitable for artificial intelligence tourism. At the same time, combined with the actual situation of tourism, this article selects the appropriate networking method based on the analysis of the advantages and disadvantages of wired and wireless coverage methods and economics to complete the project design. Its purpose is to ensure that the work meets the specific construction needs and build an artificial intelligence-based smart tourism big data analysis model. It promotes the development of tourism e-commerce industry. It saves costs and improves efficiency for travel service providers. Then, according to the actual situation of tourism, it conducts demand analysis from the perspectives of tourists, scenic spots, service providers, tourism administrative agencies, etc. Experiments show that, through the practical application of the artificial intelligence tourism mobile e-commerce platform in this article, it can be seen that the artificial intelligence tourism mobile e-commerce platform designed in this article can meet the needs of customers for shopping-related tourism commodities. Tourists of attractions have increased by 3.54%, and the economy of tourist destinations has increased by 4.2%.}, } @article {pmid35720617, year = {2022}, author = {Cheng, W and Lian, W and Tian, J}, title = {Building the hospital intelligent twins for all-scenario intelligence health care.}, journal = {Digital health}, volume = {8}, number = {}, pages = {20552076221107894}, pmid = {35720617}, issn = {2055-2076}, abstract = {The COVID-19 pandemic has accelerated a long-term trend of smart hospital development. However, there is no consistent conceptualization of what a smart hospital entails. Few hospitals have genuinely reached being "smart," primarily failing to bring systems together and consider implications from all perspectives. Hospital Intelligent Twins, a new technology integration powered by IoT, AI, cloud computing, and 5G application to create all-scenario intelligence for health care and hospital management. This communication presented a smart hospital for all-scenario intelligence by creating the hospital Intelligent Twins. Intelligent Twins is widely involved in medical activities. However, solving the medical ethics, protecting patient privacy, and reducing security risks involved are significant challenges for all-scenario intelligence applications. This exploration of creating hospital Intelligent Twins that can be a worthwhile endeavor to assess how to inform evidence-based decision-making better and enhance patient satisfaction and outcomes.}, } @article {pmid35713563, year = {2022}, author = {Chen, X and Xue, Y and Sun, Y and Shen, J and Song, S and Zhu, M and Song, Z and Cheng, Z and Zhou, P}, title = {Neuromorphic Photonic Memory Devices Using Ultrafast, Non-Volatile Phase-Change Materials.}, journal = {Advanced materials (Deerfield Beach, Fla.)}, volume = {}, number = {}, pages = {e2203909}, doi = {10.1002/adma.202203909}, pmid = {35713563}, issn = {1521-4095}, abstract = {The search for ultrafast photonic memory devices is inspired by the ever-increasing number of cloud-computing, supercomputing, and artificial-intelligence applications, together with the unique advantages of signal processing in the optical domain such as high speed, large bandwidth, and low energy consumption. By embracing silicon photonics with chalcogenide phase-change materials (PCMs), non-volatile integrated photonic memory is developed with promising potential in photonic integrated circuits and nanophotonic applications. While conventional PCMs suffer from slow crystallization speed, scandium-doped antimony telluride (SST) has been recently developed for ultrafast phase-change random-access memory applications. An ultrafast non-volatile photonic memory based on an SST thin film with a 2 ns write/erase speed is demonstrated, which is the fastest write/erase speed ever reported in integrated phase-change photonic devices. SST-based photonic memories exhibit multilevel capabilities and good stability at room temperature. By mapping the memory level to the biological synapse weight, an artificial neural network based on photonic memory devices is successfully established for image classification. Additionally, a reflective nanodisplay application using SST with optoelectronic modulation capabilities is demonstrated. Both the optical and electrical changes in SST during the phase transition and the fast-switching speed demonstrate their potential for use in photonic computing, neuromorphic computing, nanophotonics, and optoelectronic applications.}, } @article {pmid35712069, year = {2022}, author = {Hassan, J and Shehzad, D and Habib, U and Aftab, MU and Ahmad, M and Kuleev, R and Mazzara, M}, title = {The Rise of Cloud Computing: Data Protection, Privacy, and Open Research Challenges-A Systematic Literature Review (SLR).}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8303504}, pmid = {35712069}, issn = {1687-5273}, mesh = {*Cloud Computing ; Computer Security ; Confidentiality ; Delivery of Health Care ; *Privacy ; }, abstract = {Cloud computing is a long-standing dream of computing as a utility, where users can store their data remotely in the cloud to enjoy on-demand services and high-quality applications from a shared pool of configurable computing resources. Thus, the privacy and security of data are of utmost importance to all of its users regardless of the nature of the data being stored. In cloud computing environments, it is especially critical because data is stored in various locations, even around the world, and users do not have any physical access to their sensitive data. Therefore, we need certain data protection techniques to protect the sensitive data that is outsourced over the cloud. In this paper, we conduct a systematic literature review (SLR) to illustrate all the data protection techniques that protect sensitive data outsourced over cloud storage. Therefore, the main objective of this research is to synthesize, classify, and identify important studies in the field of study. Accordingly, an evidence-based approach is used in this study. Preliminary results are based on answers to four research questions. Out of 493 research articles, 52 studies were selected. 52 papers use different data protection techniques, which can be divided into two main categories, namely noncryptographic techniques and cryptographic techniques. Noncryptographic techniques consist of data splitting, data anonymization, and steganographic techniques, whereas cryptographic techniques consist of encryption, searchable encryption, homomorphic encryption, and signcryption. In this work, we compare all of these techniques in terms of data protection accuracy, overhead, and operations on masked data. Finally, we discuss the future research challenges facing the implementation of these techniques.}, } @article {pmid35712065, year = {2022}, author = {Chen, M}, title = {Integration and Optimization of British and American Literature Information Resources in the Distributed Cloud Computing Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4318962}, pmid = {35712065}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Models, Theoretical ; Publications ; United States ; }, abstract = {One of the most effective approaches to improve resource usage efficiency and degree of resource collecting is to integrate resources. Many studies on the integration of information resources are also available. The search engines are the most well-known. At the same time, this article intends to optimize the integration of British and American literature information resources by employing distributed cloud computing, based on the needs of British and American literature. This research develops a model for the dispersed nature of cloud computing. It optimizes the method by fitting the mathematical model of transmission cost and latency. This article analyzes the weaknesses of the current British and American literature information resource integration and optimizes them for the integration of British and American literature resources. The Random algorithm has the longest delay, according to the results of this paper's experiments (maximum user weighted distance). The algorithms NPA-PDP and BWF have longer delays than the algorithm Opt. The percentage decline varies between 0.17 percent and 1.11 percent for different algorithms. It demonstrates that the algorithm presented in this work can be used to integrate and maximize information resources from English and American literature.}, } @article {pmid35707200, year = {2022}, author = {Chen, Y and Zhou, W}, title = {Application of Network Information Technology in Physical Education and Training System under the Background of Big Data.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3081523}, pmid = {35707200}, issn = {1687-5273}, mesh = {Big Data ; Humans ; *Information Technology ; *Physical Education and Training ; Students ; Universities ; }, abstract = {During the last two decades, rapid development in the network technology has been observed, particularly hardware, and the development of software technology has accelerated, resulting in the launch of a variety of novel products with a wide range of applications. Traditional sports training systems, on the other hand, have a single function and a complex operation that cannot be fully implemented in colleges and universities, causing China's sports training to stagnate for a long time. The goal of physical education and training is to teach a specific action to attain its maximum potential in a variety of ways. As a result, we should use the system to collect scientifically sound and trustworthy data to aid relevant staff in completing their training tasks. Therefore, in the context of big data, network information technology has become the main way to improve the physical education system. By applying cloud computing technology, machine vision technology, and 64-bit machine technology to the physical education training system, extract the video data of the physical education system, design the system video teaching process, and complete the construction of three-dimensional human model, so as to analyze the training situation of the trainers. In this paper, 30 basketball majors in a university are selected as the professional group and 30 computer majors as the control group. The average reaction time, scores, and expert scores of the two groups are analyzed. The results show that the test of the professional group is significantly higher than that of the amateur group. At the same time, the feedback results of students using physical education and training system and normal physical education teaching and training are compared and analyzed. One week later, the students trained by the physical education system have improved their thinking ability, movement accuracy, and judgment ability, indicating that the application of the physical education training system to the actual effect is ideal.}, } @article {pmid35700763, year = {2022}, author = {Cheah, CG and Chia, WY and Lai, SF and Chew, KW and Chia, SR and Show, PL}, title = {Innovation designs of industry 4.0 based solid waste management: Machinery and digital circular economy.}, journal = {Environmental research}, volume = {213}, number = {}, pages = {113619}, doi = {10.1016/j.envres.2022.113619}, pmid = {35700763}, issn = {1096-0953}, mesh = {Artificial Intelligence ; Humans ; Industry ; Machine Learning ; *Solid Waste/analysis ; *Waste Management ; }, abstract = {The Industrial Revolution 4.0 (IR 4.0) holds the opportunity to improve the efficiency of managing solid waste through digital and machinery applications, effectively eliminating, recovering, and repurposing waste. This research aims to discover and review the potential of current technologies encompassing innovative Industry 4.0 designs for solid waste management. Machinery and processes emphasizing on circular economy were summarized and evaluated. The application of IR 4.0 technologies shows promising opportunities in improving the management and efficiency in view of solid waste. Machine learning (ML), artificial intelligence (AI), and image recognition can be used to automate the segregation of waste, reducing the risk of exposing labour workers to harmful waste. Radio Frequency Identification (RFID) and wireless communications enable the traceability in materials to better understand the opportunities in circular economy. Additionally, the interconnectivity of systems and automatic transfer of data enable the creation of more complex system that houses a larger solution space that was previously not possible such as centralised cloud computing to reduce the cost by eliminating the need for individual computing systems. Through this comprehensive review-based work, innovative Industry 4.0 components of machinery and processes involving waste management which focuses on circular economy are identified with the critical ones evaluated briefly. It was found that the current research and work done is based on applying Industry 4.0 technologies on individual waste management systems, which lacks the coherency needed to capitalise on technologies such as cloud computing, interconnectivity, big data, etc on a larger scale. Therefore, a real world comprehensive end-to-end integration aimed to optimize every process within the solid waste management chain should be explored.}, } @article {pmid35693529, year = {2022}, author = {Zhao, Y and Du, D}, title = {Research Orientation and Development of Social Psychology's Concept of Justice in the Era of Cloud Computing.}, journal = {Frontiers in psychology}, volume = {13}, number = {}, pages = {902780}, pmid = {35693529}, issn = {1664-1078}, abstract = {With the maturity and rapid expansion of social psychology, great progress has been made in the integration of social psychology with other disciplines. From the very beginning, social psychology is destined to have a diversified and multidisciplinary research orientation and disciplinary nature, which also makes it difficult for social psychology to be defined in a single disciplinary field and a single research method. With the rapid development of the Internet, the emergence of cloud computing technology not only facilitates the orientation of psychological research, but also promotes the emergence and development of some new psychological disciplines. Therefore, the purpose of this paper is to study the orientation of social psychology and its current development in the context of cloud computing era. This paper collects, organizes, and integrates the research data of college students' view of justice from the perspective of social psychology through cloud computing technology, and uses empirical research methods to conduct in-depth research on people's view of justice in social psychology. This paper collects the data reports of college students on social justice issues through cloud computing technology to make the results more accurate. The experimental results show that nearly 70% of college students pay more attention to social justice issues. This data clearly reflects the optimistic trend of people's attention to justice issues in social psychology.}, } @article {pmid35687631, year = {2022}, author = {Chu, Z and Guo, J and Guo, J}, title = {Up-conversion Luminescence System for Quantitative Detection of IL-6.}, journal = {IEEE transactions on nanobioscience}, volume = {PP}, number = {}, pages = {}, doi = {10.1109/TNB.2022.3178754}, pmid = {35687631}, issn = {1558-2639}, abstract = {Interleukin-6 (IL-6) is a very important cytokine and an early predictor of survival in febrile patients (eg, patients with COVID-19). With the outbreak of the COVID-19 in the world, the significance of medical detection of interleukin 6 has gradually become prominent. A method to point-of-care(POCT) diagnosis and monitoring of IL-6 levels in patients is urgently needed. In this work, an up-conversion luminescence system (ULS) based on upconverting nanoparticles (UCNs) for quantitative detection of IL-6 was designed. The ULS consists of Micro Controller Units (MCU), transmission device, laser, image acquisition module, Bluetooth module, etc. Through hardware system acquisition and image software algorithm processing, we obtain a limit of detection (LOD) of IL-6 at 1 ng/mL, and the quantitative range is from 1 to 200 ng/mL. The system is handheld and has great detection accuracy. The detection time is 10 minutes. In addition, the system can access mobile device terminals (smartphones, personal computers, etc.) or 5G cloud servers via Bluetooth and WIFI. Patients and family members can view medical data through mobile terminals, and the data stored in the 5G cloud server can be used for edge computing and big data analysis. It is suitable for the early diagnosis of infectious diseases such as COVID-19 and has good application prospects.}, } @article {pmid35687417, year = {2022}, author = {Ito, H and Nakamura, Y and Takanari, K and Oishi, M and Matsuo, K and Kanbe, M and Uchibori, T and Ebisawa, K and Kamei, Y}, title = {Development of a Novel Scar Screening System with Machine Learning.}, journal = {Plastic and reconstructive surgery}, volume = {150}, number = {2}, pages = {465e-472e}, doi = {10.1097/PRS.0000000000009312}, pmid = {35687417}, issn = {1529-4242}, mesh = {Algorithms ; *Cicatrix, Hypertrophic/diagnosis/etiology ; Humans ; *Keloid/drug therapy ; Machine Learning ; }, abstract = {BACKGROUND: Hypertrophic scars and keloids tend to cause serious functional and cosmetic impediments to patients. As these scars are not life threatening, many patients do not seek proper treatment. Thus, educating physicians and patients regarding these scars is important. The authors aimed to develop an algorithm for a scar screening system and compare the accuracy of the system with that of physicians. This algorithm was designed to involve health care providers and patients.

METHODS: Digital images were obtained from Google Images (Google LLC, Mountain View, Calif.), open access repositories, and patients in the authors' hospital. After preprocessing, 3768 images were uploaded to the Google Cloud AutoML Vision platform and labeled with one of the four diagnoses: immature scars, mature scars, hypertrophic scars, and keloid. A consensus label for each image was compared with the label provided by physicians.

RESULTS: For all diagnoses, the average precision (positive predictive value) of the algorithm was 80.7 percent, the average recall (sensitivity) was 71 percent, and the area under the curve was 0.846. The algorithm afforded 77 correct diagnoses with an accuracy of 77 percent. Conversely, the average physician accuracy was 68.7 percent. The Cohen kappa coefficient of the algorithm was 0.69, while that of the physicians was 0.59.

CONCLUSIONS: The authors developed a computer vision algorithm that can diagnose four scar types using automated machine learning. Future iterations of this algorithm, with more comprehensive accuracy, can be embedded in telehealth and digital imaging platforms used by patients and primary doctors. The scar screening system with machine learning may be a valuable support tool for physicians and patients.

Diagnostic, II.}, } @article {pmid35684889, year = {2022}, author = {Hanzelik, PP and Kummer, A and Abonyi, J}, title = {Edge-Computing and Machine-Learning-Based Framework for Software Sensor Development.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {11}, pages = {}, pmid = {35684889}, issn = {1424-8220}, mesh = {*Artificial Intelligence ; Cloud Computing ; *Internet of Things ; Machine Learning ; Software ; }, abstract = {The present research presents a framework that supports the development and operation of machine-learning (ML) algorithms to develop, maintain and manage the whole lifecycle of modeling software sensors related to complex chemical processes. Our motivation is to take advantage of ML and edge computing and offer innovative solutions to the chemical industry for difficult-to-measure laboratory variables. The purpose of software sensor models is to continuously forecast the quality of products to achieve effective quality control, maintain the stable production condition of plants, and support efficient, environmentally friendly, and harmless laboratory work. As a result of the literature review, quite a few ML models have been developed in recent years that support the quality assurance of different types of materials. However, the problems of continuous operation, maintenance and version control of these models have not yet been solved. The method uses ML algorithms and takes advantage of cloud services in an enterprise environment. Industrial 4.0 devices such as the Internet of Things (IoT), edge computing, cloud computing, ML, and artificial intelligence (AI) are core techniques. The article outlines an information system structure and the related methodology based on data from a quality-assurance laboratory. During the development, we encountered several challenges resulting from the continuous development of ML models and the tuning of their parameters. The article discusses the development, version control, validation, lifecycle, and maintenance of ML models and a case study. The developed framework can continuously monitor the performance of the models and increase the amount of data that make up the models. As a result, the most accurate, data-driven and up-to-date models are always available to quality-assurance engineers with this solution.}, } @article {pmid35684844, year = {2022}, author = {Lin, HY and Tsai, TT and Ting, PY and Chen, CC}, title = {An Improved ID-Based Data Storage Scheme for Fog-Enabled IoT Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {11}, pages = {}, pmid = {35684844}, issn = {1424-8220}, abstract = {In a fog-enabled IoT environment, a fog node is regarded as the proxy between end users and cloud servers to reduce the latency of data transmission, so as to fulfill the requirement of more real-time applications. A data storage scheme utilizing fog computing architecture allows a user to share cloud data with other users via the assistance of fog nodes. In particular, a fog node obtaining a re-encryption key of the data owner is able to convert a cloud ciphertext into the one which is decryptable by another designated user. In such a scheme, a proxy should not learn any information about the plaintext during the transmission and re-encryption processes. In 2020, an ID-based data storage scheme utilizing anonymous key generation in fog computing was proposed by some researchers. Although their protocol is provably secure in a proof model of random oracles, we will point out that there are some security flaws inherited in their protocol. On the basis of their work, we further present an improved variant, which not only eliminates their security weaknesses, but also preserves the functionalities of anonymous key generation and user revocation mechanism. Additionally, under the Decisional Bilinear Diffie-Hellman (DBDH) assumption, we demonstrate that our enhanced construction is also provably secure in the security notion of IND-PrID-CPA.}, } @article {pmid35684754, year = {2022}, author = {Bhatia, S and Alsuwailam, RI and Roy, DG and Mashat, A}, title = {Improved Multimedia Object Processing for the Internet of Vehicles.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {11}, pages = {}, pmid = {35684754}, issn = {1424-8220}, mesh = {Algorithms ; Automation ; *Internet of Things ; *Multimedia ; }, abstract = {The combination of edge computing and deep learning helps make intelligent edge devices that can make several conditional decisions using comparatively secured and fast machine learning algorithms. An automated car that acts as the data-source node of an intelligent Internet of vehicles or IoV system is one of these examples. Our motivation is to obtain more accurate and rapid object detection using the intelligent cameras of a smart car. The competent supervision camera of the smart automobile model utilizes multimedia data for real-time automation in real-time threat detection. The corresponding comprehensive network combines cooperative multimedia data processing, Internet of Things (IoT) fact handling, validation, computation, precise detection, and decision making. These actions confront real-time delays during data offloading to the cloud and synchronizing with the other nodes. The proposed model follows a cooperative machine learning technique, distributes the computational load by slicing real-time object data among analogous intelligent Internet of Things nodes, and parallel vision processing between connective edge clusters. As a result, the system increases the computational rate and improves accuracy through responsible resource utilization and active-passive learning. We achieved low latency and higher accuracy for object identification through real-time multimedia data objectification.}, } @article {pmid35684631, year = {2022}, author = {Jiao, Z and Zhou, F and Wang, Q and Sun, J}, title = {RPVC: A Revocable Publicly Verifiable Computation Solution for Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {11}, pages = {}, pmid = {35684631}, issn = {1424-8220}, mesh = {Algorithms ; *Cloud Computing ; *Polyvinyl Chloride ; }, abstract = {With publicly verifiable computation (PVC) development, users with limited resources prefer to outsource computing tasks to cloud servers. However, existing PVC schemes are mainly proposed for cloud computing scenarios, which brings bandwidth consumption or network delay of IoT devices in edge computing. In addition, dishonest edge servers may reduce resource utilization by returning unreliable results. Therefore, we propose a revocable publicly verifiable computation(RPVC) scheme for edge computing. On the one hand, RPVC ensures that users can verify the correct results at a small cost. On the other hand, it can revoke the computing abilities of dishonest edge servers. First, polynomial commitments are employed to reduce proofs' length and generation speed. Then, we improve revocable group signature by knowledge signatures and subset covering theory. This makes it possible to revoke dishonest edge servers. Finally, theoretical analysis proves that RPVC has correctness and security, and experiments evaluate the efficiency of RPVC.}, } @article {pmid35677770, year = {2022}, author = {Loo, WK and Hasikin, K and Suhaimi, A and Yee, PL and Teo, K and Xia, K and Qian, P and Jiang, Y and Zhang, Y and Dhanalakshmi, S and Azizan, MM and Lai, KW}, title = {Systematic Review on COVID-19 Readmission and Risk Factors: Future of Machine Learning in COVID-19 Readmission Studies.}, journal = {Frontiers in public health}, volume = {10}, number = {}, pages = {898254}, pmid = {35677770}, issn = {2296-2565}, mesh = {*COVID-19/epidemiology ; Humans ; Logistic Models ; Machine Learning ; *Patient Readmission ; Risk Factors ; United States ; }, abstract = {In this review, current studies on hospital readmission due to infection of COVID-19 were discussed, compared, and further evaluated in order to understand the current trends and progress in mitigation of hospital readmissions due to COVID-19. Boolean expression of ("COVID-19" OR "covid19" OR "covid" OR "coronavirus" OR "Sars-CoV-2") AND ("readmission" OR "re-admission" OR "rehospitalization" OR "rehospitalization") were used in five databases, namely Web of Science, Medline, Science Direct, Google Scholar and Scopus. From the search, a total of 253 articles were screened down to 26 articles. In overall, most of the research focus on readmission rates than mortality rate. On the readmission rate, the lowest is 4.2% by Ramos-Martínez et al. from Spain, and the highest is 19.9% by Donnelly et al. from the United States. Most of the research (n = 13) uses an inferential statistical approach in their studies, while only one uses a machine learning approach. The data size ranges from 79 to 126,137. However, there is no specific guide to set the most suitable data size for one research, and all results cannot be compared in terms of accuracy, as all research is regional studies and do not involve data from the multi region. The logistic regression is prevalent in the research on risk factors of readmission post-COVID-19 admission, despite each of the research coming out with different outcomes. From the word cloud, age is the most dominant risk factor of readmission, followed by diabetes, high length of stay, COPD, CKD, liver disease, metastatic disease, and CAD. A few future research directions has been proposed, including the utilization of machine learning in statistical analysis, investigation on dominant risk factors, experimental design on interventions to curb dominant risk factors and increase the scale of data collection from single centered to multi centered.}, } @article {pmid35677629, year = {2022}, author = {Ghosh, S and Mukherjee, A}, title = {STROVE: spatial data infrastructure enabled cloud-fog-edge computing framework for combating COVID-19 pandemic.}, journal = {Innovations in systems and software engineering}, volume = {}, number = {}, pages = {1-17}, pmid = {35677629}, issn = {1614-5046}, abstract = {The outbreak of 2019 novel coronavirus (COVID-19) has triggered unprecedented challenges and put the whole world in a parlous condition. The impacts of COVID-19 is a matter of grave concern in terms of fatality rate, socio-economical condition, health infrastructure. It is obvious that only pharmaceutical solutions (vaccine) cannot eradicate this pandemic completely, and effective strategies regarding lockdown measures, restricted mobility, emergency services to users-in brief data-driven decision system is of utmost importance. This necessitates an efficient data analytics framework, data infrastructure to store, manage pandemic related information, and distributed computing platform to support such data-driven operations. In the past few decades, Internet of Things-based devices and applications have emerged significantly in various sectors including healthcare and time-critical applications. To be specific, health-sensors help to accumulate health-related parameters at different time-instances of a day, the movement sensors keep track of mobility traces of the user, and helps to assist them in varied conditions. The smartphones are equipped with several such sensors and the ability of low-cost connected sensors to cover large areas makes it the most useful component to combat pandemics such as COVID-19. However, analysing and managing the huge amount of data generated by these sensors is a big challenge. In this paper we have proposed a unified framework which has three major components: (i) Spatial Data Infrastructure to manage, store, analyse and share spatio-temporal information with stakeholders efficiently, (ii) Cloud-Fog-Edge-based hierarchical architecture to support preliminary diagnosis, monitoring patients' mobility, health parameters and activities while they are in quarantine or home-based treatment, and (iii) Assisting users in varied emergency situation leveraging efficient data-driven techniques at low-latency and energy consumption. The mobility data analytics along with SDI is required to interpret the movement dynamics of the region and correlate with COVID-19 hotspots. Further, Cloud-Fog-Edge-based system architecture is required to provision healthcare services efficiently and in timely manner. The proposed framework yields encouraging results in taking decisions based on the COVID-19 context and assisting users effectively by enhancing accuracy of detecting suspected infected people by ∼ 24% and reducing delay by ∼ 55% compared to cloud-only system.}, } @article {pmid35677197, year = {2022}, author = {Zhang, Y and Zhao, H and Peng, D}, title = {Exploration and Research on Smart Sports Classrooms in Colleges in the Information Age.}, journal = {Applied bionics and biomechanics}, volume = {2022}, number = {}, pages = {2970496}, pmid = {35677197}, issn = {1176-2322}, abstract = {Smart classrooms, made possible by the growing use of Internet information technology in the sphere of education, as one of the important foundations for the realization of smart education, have become the current hot direction of the development of educational information innovation and intend to propose some ideas and directions for smart sports teaching research in IA colleges and universities. The smart classroom is an intelligent and efficient classroom created by the "Internet +" way of thinking and the new generation of information technologies such as big data and cloud computing. This article puts forward the exploratory research methods of smart sports classrooms in colleges and universities in the IA, methods, such as document retrieval, expert interviews, questionnaire surveys, and practical research, and field investigation method, which are used in the exploration and research of college smart sports classrooms in the IA experiment. According to the findings of this study, 96.34 percent of students have a positive attitude toward the smart sports classroom teaching model, which is favorable to the growth of smart sports classroom teaching.}, } @article {pmid35676964, year = {2022}, author = {Nair, R and Zafrullah, SN and Vinayasree, P and Singh, P and Zahra, MMA and Sharma, T and Ahmadi, F}, title = {Blockchain-Based Decentralized Cloud Solutions for Data Transfer.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8209854}, pmid = {35676964}, issn = {1687-5273}, mesh = {*Blockchain ; Cloud Computing ; Information Storage and Retrieval ; }, abstract = {Cloud computing has increased its service area and user experience above traditional platforms through virtualization and resource integration, resulting in substantial economic and societal advantages. Cloud computing is experiencing a significant security and trust dilemma, requiring a trust-enabled transaction environment. The typical cloud trust model is centralized, resulting in high maintenance costs, network congestion, and even single-point failure. Also, due to a lack of openness and traceability, trust rating findings are not universally acknowledged. "Blockchain is a novel, decentralised computing system. Its unique operational principles and record traceability assure the transaction data's integrity, undeniability, and security. So, blockchain is ideal for building a distributed and decentralised trust infrastructure. This study addresses the difficulty of transferring data and related permission policies from the cloud to the distributed file systems (DFS). Our aims include moving the data files from the cloud to the distributed file system and developing a cloud policy. This study addresses the difficulty of transferring data and related permission policies from the cloud to the DFS. In DFS, no node is given the privilege, and storage of all the data is dependent on content-addressing. The data files are moved from Amazon S3 buckets to the interplanetary file system (IPFS). In DFS, no node is given the privilege, and storage of all the data is dependent on content-addressing.}, } @article {pmid35673063, year = {2022}, author = {Anderson, B and Cameron, J and Jefferson, U and Reeder, B}, title = {Designing a Cloud-Based System for Affordable Cyberinfrastructure to Support Software-Based Research.}, journal = {Studies in health technology and informatics}, volume = {290}, number = {}, pages = {489-493}, doi = {10.3233/SHTI220124}, pmid = {35673063}, issn = {1879-8365}, mesh = {*Cloud Computing ; Research ; *Software ; }, abstract = {Interest in cloud-based cyberinfrastructure among higher-education institutions is growing rapidly, driven by needs to realize cost savings and access enhanced computing resources. Through a nonprofit entity, we have created a platform that provides hosting and software support services enabling researchers to responsibly build on cloud technologies. However, there are technical, logistic, and administrative challenges if this platform is to support all types of research. Software-enhanced research is distinctly different from industry applications, typically characterized by needs for lower reduced availability, greater flexibility, and fewer resources for upkeep costs. We describe a swarm environment specifically designed for research in academic settings and our experience developing an operating model for sustainable cyberinfrastructure. We also present three case studies illustrating the types of applications supported by the cyberinfrastructure and explore techniques that address specific application needs. Our findings demonstrate safer, faster, cheaper cloud services by recognizing the intrinsic properties of academic research environments.}, } @article {pmid35673000, year = {2022}, author = {Ruokolainen, J and Haladijan, J and Juutinen, M and Puustinen, J and Holm, A and Vehkaoja, A and Nieminen, H}, title = {Mobilemicroservices Architecture for Remote Monitoring of Patients: A Feasibility Study.}, journal = {Studies in health technology and informatics}, volume = {290}, number = {}, pages = {200-204}, doi = {10.3233/SHTI220061}, pmid = {35673000}, issn = {1879-8365}, mesh = {Cloud Computing ; Delivery of Health Care ; Feasibility Studies ; Humans ; Monitoring, Physiologic ; *Telemedicine ; }, abstract = {Recent developments in smart mobile devices (SMDs), wearable sensors, the Internet, mobile networks, and computing power provide new healthcare opportunities that are not restricted geographically. This paper aims to introduce Mobilemicroservices Architecture (MMA) based on a study on architectures. In MMA, an HTTP-based Mobilemicroservivce (MM) is allocated to each SMD's sensor. The key benefits are extendibility, scalability, ease of use for the patient, security, and the possibility to collect raw data without the necessity to involve cloud services. Feasibility was investigated in a two-year project, where MMA-based solutions were used to collect motor function data from patients with Parkinson's disease. First, we collected motor function data from 98 patients and healthy controls during their visit to a clinic. Second, we monitored the same subjects in real-time for three days in their everyday living environment. These MMA applications represent HTTP-based business-logic computing in which the SMDs' resources are accessible globally.}, } @article {pmid35669983, year = {2022}, author = {Khan, NJ and Ahamad, G and Naseem, M}, title = {An IoT/FOG based framework for sports talent identification in COVID-19 like situations.}, journal = {International journal of information technology : an official journal of Bharati Vidyapeeth's Institute of Computer Applications and Management}, volume = {14}, number = {5}, pages = {2513-2521}, pmid = {35669983}, issn = {2511-2112}, abstract = {COVID-19 crippled all the domains of our society. The inevitable lockdowns and social distancing procedures have hit the process of traditional sports talent identification (TiD) severely. This will interrupt the career-excellency of athletes and will also affect the future talent in the years to come. We explore the effect of COVID-19 on sports talent identification and propose an IoT/Fog-based framework for theTiD process during COVID-19 and COVID-like situations. Our proposed novel six-layer model facilitates the sports talent identification remotely using the various latest Information and Communication Technologies like IoT, fog and cloud computing. All the stakeholders like experts, coaches, players, institutes etc. are taken into consideration. The framework is mobile, widely accessible, scalable, cost-effective, secure, platform/location independent and fast. A brief case study of cricket talent identification using the proposed framework is also provided.}, } @article {pmid35669659, year = {2022}, author = {Li, K}, title = {Application of Artificial Intelligence System Based on Wireless Sensor Network in Enterprise Management.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {2169521}, pmid = {35669659}, issn = {1687-5273}, mesh = {Artificial Intelligence ; *Computer Communication Networks ; Remote Sensing Technology ; Technology ; *Wireless Technology ; }, abstract = {With the improvement of the ability to acquire natural information, wireless sensor networks also need to transmit corresponding information in terms of collecting information. Wireless sensor nodes have great application prospects as a key component of wireless sensors. Therefore, different wireless sensors play an important decisive role in the operation of wireless network applications. With the continuous development of wireless sensor networks, existing wireless sensor network nodes exhibit limitations and shortcomings such as inflexible structure, low variability, and low versatility. Specifically, the learning and neural networks obtained by different artificial intelligence expert systems in computing technology are different. On the one hand, it can meet the needs of users for information systems to a certain extent, and on the other hand, it can also help accelerate the development of computer science. At present, the new generation of information technology industry is listed in the seven emerging strategic industries of the country. The new cloud computing technology has gradually expanded to important corporate governance capabilities in terms of information technology. The intelligent application of cloud computing technology replaces traditional enterprise management technology. Efficiency management and risk management can improve the quality and business capabilities of the entire enterprise, improve system applications according to the actual situation of the enterprise, improve system applications, and implement health and the sustainable development of the enterprise, thereby promoting the sustainable development of the computer technology industry.}, } @article {pmid35669657, year = {2022}, author = {Yang, M and Gao, C and Han, J}, title = {Edge Computing Deployment Algorithm and Sports Training Data Mining Based on Software Defined Network.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8056360}, pmid = {35669657}, issn = {1687-5273}, mesh = {Algorithms ; *Cloud Computing ; *Data Mining ; Software ; Technology ; }, abstract = {The wireless sensor network collects data from various areas through specific network nodes and uploads it to the decision-making layer for analysis and processing. Therefore, it has become a perception network of the Internet of Things and has made great achievements in monitoring and prevention at this stage. At this stage, the main problem is the motive power of sensor nodes, so the energy storage and transmission of wireless sensor network is imminent. Mobile edge computing technology provides a new type of technology for today's edge networks, enabling it to process resource-intensive data blocks and feedback to managers in time. It is a new starting point for cloud computing services, compared to traditional cloud computing services. The transmission speed is more efficient and will be widely used in various industries and serve them in the future. Among them, education and related industries urgently need in-depth information, which in turn promotes the rapid development of data mining by sensor networks. This article focuses on data mining technology, mainly expounds the meaning and main mining methods of data mining technology, and conducts data mining on sports training requirements from the aspects of demand collection and analysis, algorithm design and optimization, demand results and realization, etc. Monitor the training status and give the trainer reasonable suggestions. Through the processing of the training data mining results and proofreading the database standardized training data, we can formulate a personalized program suitable for sportsmen, reduce sports injuries caused by no trainer's guidance, and open new doors for training modes. Therefore, this paper studies the sensor network technology, edge computing deployment algorithm, and sports training data mining.}, } @article {pmid35668959, year = {2022}, author = {Zhong, M and Ali, M and Faqir, K and Begum, S and Haider, B and Shahzad, K and Nosheen, N}, title = {China Pakistan Economic Corridor Digital Transformation.}, journal = {Frontiers in psychology}, volume = {13}, number = {}, pages = {887848}, pmid = {35668959}, issn = {1664-1078}, abstract = {The China-Pakistan Economic Corridor (CPEC) vision and mission are to improve the people's living standards of Pakistan and China through bilateral investments, trade, cultural exchanges, and economic activities. To achieve this envisioned dream, Pakistan established the China-Pakistan Economic Corridor Authority (CPECA) to further its completion, but Covid-19 slowed it down. This situation compelled the digitalization of CPEC. This article reviews the best practices and success stories of various digitalization and e-governance programs and, in this light, advises the implementation of the Ajman Digital Governance (ADG) model as a theoretical framework for CPEC digitalization. This article concludes that the Pakistani government needs to transform CPEC digitalization by setting up the CPEC Digitalization and Transformation Center (DTC) at the CPECA office to attract more investors and businesses.}, } @article {pmid35668732, year = {2022}, author = {Butt, UA and Amin, R and Aldabbas, H and Mohan, S and Alouffi, B and Ahmadian, A}, title = {Cloud-based email phishing attack using machine and deep learning algorithm.}, journal = {Complex & intelligent systems}, volume = {}, number = {}, pages = {1-28}, pmid = {35668732}, issn = {2198-6053}, abstract = {Cloud computing refers to the on-demand availability of personal computer system assets, specifically data storage and processing power, without the client's input. Emails are commonly used to send and receive data for individuals or groups. Financial data, credit reports, and other sensitive data are often sent via the Internet. Phishing is a fraudster's technique used to get sensitive data from users by seeming to come from trusted sources. The sender can persuade you to give secret data by misdirecting in a phished email. The main problem is email phishing attacks while sending and receiving the email. The attacker sends spam data using email and receives your data when you open and read the email. In recent years, it has been a big problem for everyone. This paper uses different legitimate and phishing data sizes, detects new emails, and uses different features and algorithms for classification. A modified dataset is created after measuring the existing approaches. We created a feature extracted comma-separated values (CSV) file and label file, applied the support vector machine (SVM), Naive Bayes (NB), and long short-term memory (LSTM) algorithm. This experimentation considers the recognition of a phished email as a classification issue. According to the comparison and implementation, SVM, NB and LSTM performance is better and more accurate to detect email phishing attacks. The classification of email attacks using SVM, NB, and LSTM classifiers achieve the highest accuracy of 99.62%, 97% and 98%, respectively.}, } @article {pmid35665291, year = {2022}, author = {Kumar, RR and Tomar, A and Shameem, M and Alam, MN}, title = {OPTCLOUD: An Optimal Cloud Service Selection Framework Using QoS Correlation Lens.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {2019485}, pmid = {35665291}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; }, abstract = {Cloud computing has grown as a computing paradigm in the last few years. Due to the explosive increase in the number of cloud services, QoS (quality of service) becomes an important factor in service filtering. Moreover, it becomes a nontrivial problem when comparing the functionality of cloud services with different performance metrics. Therefore, optimal cloud service selection is quite challenging and extremely important for users. In the existing approaches of cloud service selection, the user's preferences are offered by the user in a quantitative form. With fuzziness and subjectivity, it is a hurdle task for users to express clear preferences. Moreover, many QoS attributes are not independent but interrelated; therefore, the existing weighted summation method cannot accommodate correlations among QoS attributes and produces inaccurate results. To resolve this problem, we propose a cloud service framework that takes the user's preferences and chooses the optimal cloud service based on the user's QoS constraints. We propose a cloud service selection algorithm, based on principal component analysis (PCA) and the best-worst method (BWM), which eliminates the correlations between QoS and provides the best cloud services with the best QoS values for users. In the end, a numerical example is shown to validate the effectiveness and feasibility of the proposed methodology.}, } @article {pmid35655579, year = {2022}, author = {Ma, S and Liu, ZP}, title = {Machine learning potential era of zeolite simulation.}, journal = {Chemical science}, volume = {13}, number = {18}, pages = {5055-5068}, pmid = {35655579}, issn = {2041-6520}, abstract = {Zeolites, owing to their great variety and complexity in structure and wide applications in chemistry, have long been the hot topic in chemical research. This perspective first presents a short retrospect of theoretical investigations on zeolites using the tools from classical force fields to quantum mechanics calculations and to the latest machine learning (ML) potential simulations. ML potentials as the next-generation tec