@article {pmid39362158,
year = {2024},
author = {Crivellaro, M and Serrao, L and Bertoldi, W and Bizzi, S and Vitti, A and Hauer, C and Skrame, K and Cekrezi, B and Zolezzi, G},
title = {Multiscale morphological trajectories to support management of free-flowing rivers: the Vjosa in South-East Europe.},
journal = {Journal of environmental management},
volume = {370},
number = {},
pages = {122541},
doi = {10.1016/j.jenvman.2024.122541},
pmid = {39362158},
issn = {1095-8630},
abstract = {Free-flowing rivers (FFRs) are fundamental references for river management, providing the opportunity to investigate river functioning under minimal anthropic disturbance. However, large free-flowing rivers are rare in Europe and worldwide, and knowledge of their dynamics is often scarce due to a lack of data and baseline studies. So far, their characterization is mainly grounded in the longitudinal connectivity assessment, with scarce integration of further hydro-morphological aspects, particularly concerning the processes and drivers of changes in their morphology over time scales of management relevance. This work aims to broaden the characterization of FFRs by reconstructing their catchment-scale morphological evolutionary trajectories and understanding their driving causes, to support their management better. This is achieved by integrating freely available global data including Landsat imagery and climatic reanalysis with the few locally available quantitative and qualitative information. The analysis of possible drivers of change at the catchment and reach scale assesses hydrological variability, flow regulation, land use change, sediment mining and bank protection works. We applied this approach to the Vjosa River (Albania), a model ecosystem of European significance and one of the few FFRs in Europe. The Vjosa was recently declared a Wild River National Park. We investigated its catchment-scale morphological changes over 50 years, considering four reaches of the Vjosa and four reaches of its main tributaries. Satellite imagery was analyzed taking advantage of Google Earth Engine cloud computing platform. The analysis reveals a catchment-scale response to climatic fluctuations, especially in the most natural reaches, with a significant narrowing of the active river corridor, following a flood-intense period in the early 1960s. The narrowing rate gradually decreased, from 35% before 1985 to 24% between 1985 and 2000, reaching a new equilibrium from 2000 to 2020. However, the recent trajectories of the lowland reaches have been impacted by human pressures, particularly sediment mining, which intensified after the 1990s, suggesting that these reaches may instead be far from equilibrium and adjusting to such persistent stressor. Identifying the key drivers of change and building catchment-scale knowledge of geomorphic change can inform the management of riverine protected areas, and the proposed integrated approach is a promising tool to help overcome the data scarcity typical of the limited remaining large FFRs.},
}
@article {pmid39348369,
year = {2024},
author = {Alwageed, HS and Keshta, I and Khan, RA and Alzahrani, A and Tariq, MU and Ghani, A},
title = {An empirical study for mitigating sustainable cloud computing challenges using ISM-ANN.},
journal = {PloS one},
volume = {19},
number = {9},
pages = {e0308971},
doi = {10.1371/journal.pone.0308971},
pmid = {39348369},
issn = {1932-6203},
mesh = {*Cloud Computing ; *Computer Security ; *Neural Networks, Computer ; Humans ; Surveys and Questionnaires ; Sustainable Development ; },
abstract = {The significance of cloud computing methods in everyday life is growing as a result of the exponential advancement and refinement of artificial technology. As cloud computing makes more progress, it will bring with it new opportunities and threats that affect the long-term health of society and the environment. Many questions remain unanswered regarding sustainability, such as, "How will widely available computing systems affect environmental equilibrium"? When hundreds of millions of microcomputers are invisible to each other, what will society look like? What does this mean for social sustainability? This paper empirically investigates the ethical challenges and practices of cloud computing about sustainable development. We conducted a systematic literature review followed by a questionnaire survey and identified 11 sustainable cloud computing challenges (SCCCs) and 66 practices for addressing the identified challenges. Interpretive structural modeling (ISM) and Artificial Neural Networks (ANN) were then used to identify and analyze the interrelationship between the SCCCs. Then, based on the results of the ISM, 11 process areas were determined to develop the proposed sustainable cloud computing challenges mitigation model (SCCCMM). The SCCCMM includes four main categories: Requirements specification, Quality of Service (QoS) and Service Legal Agreement (SLA), Complexity and Cyber security, and Trust. The model was subsequently tested with a real-world case study that was connected to the environment. In a sustainable cloud computing organization, the results demonstrate that the proposed SCCCMM aids in estimating the level of mitigation. The participants in the case study also appreciated the suggested SCCCMM for its practicality, user-friendliness, and overall usefulness. When it comes to the sustainability of their software products, we believe that organizations involved in cloud computing can benefit from the suggested SCCCMM. Additionally, researchers and industry practitioners can expect the proposed model to provide a strong foundation for developing new sustainable methods and tools for cloud computing.},
}
@article {pmid39345166,
year = {2024},
author = {Nakamura, T and Nomura, T and Endo, M and Sakaguchi, A and Ruofan, H and Kashiwazaki, T and Umeki, T and Takase, K and Asavanant, W and Yoshikawa, JI and Furusawa, A},
title = {Long-term stability of squeezed light in a fiber-based system using automated alignment.},
journal = {The Review of scientific instruments},
volume = {95},
number = {9},
pages = {},
doi = {10.1063/5.0203988},
pmid = {39345166},
issn = {1089-7623},
abstract = {Providing a cloud service for optical quantum computing requires stabilizing the optical system for extended periods. It is advantageous to construct a fiber-based system, which does not require spatial alignment. However, fiber-based systems are instead subject to fiber-specific instabilities. For instance, there are phase drifts due to ambient temperature changes and external disturbances and polarization fluctuations due to the finite polarization extinction ratio of fiber components. Here, we report the success of measuring squeezed light with a fiber system for 24 h. To do this, we introduce stabilization mechanics to suppress fluctuations in the fiber system and an integrated controller to automatically align the entire system. The squeezed light at a wavelength of 1545.3 nm is measured every 2 min, where automated alignments are inserted every 30 min. The squeezing levels with an average of -4.42 dB are recorded with an extremely small standard deviation of 0.08 dB over 24 h. With the technologies developed here, we can build complicated optical setups with the fiber-based system and operate them automatically for extended periods, which is promising for cloud service of quantum computation.},
}
@article {pmid39338834,
year = {2024},
author = {López-Baldominos, I and Pospelova, V and Fernández-Sanz, L and Castillo-Martínez, A},
title = {Modeling and Analyzing the Availability of Technical Professional Profiles for the Success of Smart Cities Projects in Europe.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {18},
pages = {},
pmid = {39338834},
issn = {1424-8220},
support = {101052513//European Commission/ ; },
abstract = {The success of developing and implementing Smart Cities (SC) projects depends on a varied set of factors, where the availability of a qualified technical workforce is a critical one. The combination of ICT requirements, like the effectiveness and quality of solutions merging IoT, cloud computing, sensors, and communications with the work from many varied disciplines (e.g., civil engineering, architecture, etc.), mixed with aspects of environmental and business sustainability, makes the management of these projects really challenging. Reports forecast a scarcity of qualified candidates, given this complexity and the growth of activity in SC projects. The European project SMACITE has addressed the requirements of the qualification of an ICT workforce with an analysis of multiples sources of information from the labor market, feedback from involved stakeholders, and the literature. The goal was the development of two occupational ICT profiles as a reference for training and for the availability of candidates for job vacancies. The result is two ICT role profiles for engineers and technicians, mapped with the European skills frameworks ESCO and EN16234. The profiles determined the whole set of requirements, including not only the technical areas and soft skills, but also additional technical areas and sustainability and managerial skills and the analysis of different sources of information. Our work has also determined which existing ESCO occupations are similar to the two reference profiles, so they are better adapted to SC projects. The training activities of SMACITE have also suggested the amount of training expected for a varied sample of candidates who want to be qualified for SC projects.},
}
@article {pmid39338808,
year = {2024},
author = {Kopras, B and Idzikowski, F and Bogucka, H},
title = {A Survey on Reduction of Energy Consumption in Fog Networks-Communications and Computations.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {18},
pages = {},
doi = {10.3390/s24186064},
pmid = {39338808},
issn = {1424-8220},
support = {CYBERSECIDENT/487845/IV/NCBR/2021//National Centre for Research and Development/ ; 2023/05/Y/ST7/00002//National Science Center/ ; },
abstract = {Fog networking has become an established architecture addressing various applications with strict latency, jitter, and bandwidth constraints. Fog Nodes (FNs) allow for flexible and effective computation offloading and content distribution. However, the transmission of computational tasks, the processing of these tasks, and finally sending the results back still incur energy costs. We survey the literature on fog computing, focusing on energy consumption. We take a holistic approach and look at energy consumed by devices located in all network tiers from the things tier through the fog tier to the cloud tier, including communication links between the tiers. Furthermore, fog network modeling is analyzed with particular emphasis on application scenarios and the energy consumed for communication and computation. We perform a detailed analysis of model parameterization, which is crucial for the results presented in the surveyed works. Finally, we survey energy-saving methods, putting them into different classification systems and considering the results presented in the surveyed works. Based on our analysis, we present a classification and comparison of the fog algorithmic models, where energy is spent on communication and computation, and where delay is incurred. We also classify the scenarios examined by the surveyed works with respect to the assumed parameters. Moreover, we systematize methods used to save energy in a fog network. These methods are compared with respect to their scenarios, objectives, constraints, and decision variables. Finally, we discuss future trends in fog networking and how related technologies and economics shall trade their increasing development with energy consumption.},
}
@article {pmid39338747,
year = {2024},
author = {Hyun, G and Oak, J and Kim, D and Kim, K},
title = {The Impact of an Automation System Built with Jenkins on the Efficiency of Container-Based System Deployment.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {18},
pages = {},
doi = {10.3390/s24186002},
pmid = {39338747},
issn = {1424-8220},
support = {NRF-2021R1A2C2013933//Ministry of Science and ICT/ ; S3224694//Ministry of SMEs and Startups/ ; },
abstract = {This paper evaluated deployment efficiency by comparing manual deployment with automated deployment through a CI/CD pipeline using Jenkins. This study involved moving from a manual deployment process to an automated system using Jenkins and experimenting with both deployment methods in a real-world environment. The results showed that the automated deployment system significantly reduced the deployment time compared to manual deployment and significantly reduced the error rate. Manual deployment required human intervention at each step, making it time-consuming and prone to mistakes, while automated deployment using Jenkins automated each step to ensure consistency and maximized time efficiency through parallel processing. Automated testing verified the stability of the code before deployment, minimizing errors. This study demonstrates the effectiveness of adopting a CI/CD pipeline and shows that automated systems can provide high efficiency in real-world production environments. It also highlights the importance of security measures to prevent sensitive information leakage during CI/CD, suggesting the use of secrecy management tools and environment variables and limiting access rights. This research will contribute to exploring the applicability of CI/CD pipelines in different environments and, in doing so, validate the universality of automated systems.},
}
@article {pmid39338710,
year = {2024},
author = {Marković, D and Stamenković, Z and Đorđević, B and Ranđić, S},
title = {Image Processing for Smart Agriculture Applications Using Cloud-Fog Computing.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {18},
pages = {},
doi = {10.3390/s24185965},
pmid = {39338710},
issn = {1424-8220},
support = {16DHBKIO20//Brandenburg/Bayern Initiative for Integration of Artificial Intelligence - Hardware Subjects in University Curriculum (BB-KI-Chips)/ ; },
mesh = {*Agriculture/methods ; *Image Processing, Computer-Assisted/methods ; *Cloud Computing ; *Neural Networks, Computer ; Crops, Agricultural ; Algorithms ; Humans ; Deep Learning ; },
abstract = {The widespread use of IoT devices has led to the generation of a huge amount of data and driven the need for analytical solutions in many areas of human activities, such as the field of smart agriculture. Continuous monitoring of crop growth stages enables timely interventions, such as control of weeds and plant diseases, as well as pest control, ensuring optimal development. Decision-making systems in smart agriculture involve image analysis with the potential to increase productivity, efficiency and sustainability. By applying Convolutional Neural Networks (CNNs), state recognition and classification can be performed based on images from specific locations. Thus, we have developed a solution for early problem detection and resource management optimization. The main concept of the proposed solution relies on a direct connection between Cloud and Edge devices, which is achieved through Fog computing. The goal of our work is creation of a deep learning model for image classification that can be optimized and adapted for implementation on devices with limited hardware resources at the level of Fog computing. This could increase the importance of image processing in the reduction of agricultural operating costs and manual labor. As a result of the off-load data processing at Edge and Fog devices, the system responsiveness can be improved, the costs associated with data transmission and storage can be reduced, and the overall system reliability and security can be increased. The proposed solution can choose classification algorithms to find a trade-off between size and accuracy of the model optimized for devices with limited hardware resources. After testing our model for tomato disease classification compiled for execution on FPGA, it was found that the decrease in test accuracy is as small as 0.83% (from 96.29% to 95.46%).},
}
@article {pmid39338693,
year = {2024},
author = {Nur, A and Muanenda, Y},
title = {Design and Evaluation of Real-Time Data Storage and Signal Processing in a Long-Range Distributed Acoustic Sensing (DAS) Using Cloud-Based Services.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {18},
pages = {},
doi = {10.3390/s24185948},
pmid = {39338693},
issn = {1424-8220},
abstract = {In cloud-based Distributed Acoustic Sensing (DAS) sensor data management, we are confronted with two primary challenges. First, the development of efficient storage mechanisms capable of handling the enormous volume of data generated by these sensors poses a challenge. To solve this issue, we propose a method to address the issue of handling the large amount of data involved in DAS by designing and implementing a pipeline system to efficiently send the big data to DynamoDB in order to fully use the low latency of the DynamoDB data storage system for a benchmark DAS scheme for performing continuous monitoring over a 100 km range at a meter-scale spatial resolution. We employ the DynamoDB functionality of Amazon Web Services (AWS), which allows highly expandable storage capacity with latency of access of a few tens of milliseconds. The different stages of DAS data handling are performed in a pipeline, and the scheme is optimized for high overall throughput with reduced latency suitable for concurrent, real-time event extraction as well as the minimal storage of raw and intermediate data. In addition, the scalability of the DynamoDB-based data storage scheme is evaluated for linear and nonlinear variations of number of batches of access and a wide range of data sample sizes corresponding to sensing ranges of 1-110 km. The results show latencies of 40 ms per batch of access with low standard deviations of a few milliseconds, and latency per sample decreases for increasing the sample size, paving the way toward the development of scalable, cloud-based data storage services integrating additional post-processing for more precise feature extraction. The technique greatly simplifies DAS data handling in key application areas requiring continuous, large-scale measurement schemes. In addition, the processing of raw traces in a long-distance DAS for real-time monitoring requires the careful design of computational resources to guarantee requisite dynamic performance. Now, we will focus on the design of a system for the performance evaluation of cloud computing systems for diverse computations on DAS data. This system is aimed at unveiling valuable insights into performance metrics and operational efficiencies of computations on the data in the cloud, which will provide a deeper understanding of the system's performance, identify potential bottlenecks, and suggest areas for improvement. To achieve this, we employ the CloudSim framework. The analysis reveals that the virtual machine (VM) performance decreases significantly the processing time with more capable VMs, influenced by Processing Elements (PEs) and Million Instructions Per Second (MIPS). The results also reflect that, although a larger number of computations is required as the fiber length increases, with the subsequent increase in processing time, the overall speed of computation is still suitable for continuous real-time monitoring. We also see that VMs with lower performance in terms of processing speed and number of CPUs have more inconsistent processing times compared to those with higher performance, while not incurring significantly higher prices. Additionally, the impact of VM parameters on computation time is explored, highlighting the importance of resource optimization in the DAS system design for efficient performance. The study also observes a notable trend in processing time, showing a significant decrease for every additional 50,000 columns processed as the length of the fiber increases. This finding underscores the efficiency gains achieved with larger computational loads, indicating improved system performance and capacity utilization as the DAS system processes more extensive datasets.},
}
@article {pmid39320977,
year = {2024},
author = {Senthilkumar, G and Anandamurugan, S},
title = {Energy and time-aware scheduling in diverse virtualized cloud computing environments using optimized self-attention progressive generative adversarial network.},
journal = {Network (Bristol, England)},
volume = {},
number = {},
pages = {1-20},
doi = {10.1080/0954898X.2024.2391401},
pmid = {39320977},
issn = {1361-6536},
abstract = {The rapid growth of cloud computing has led to the widespread adoption of heterogeneous virtualized environments, offering scalable and flexible resources to meet diverse user demands. However, the increasing complexity and variability in workload characteristics pose significant challenges in optimizing energy consumption. Many scheduling algorithms have been suggested to address this. Therefore, a self-attention-based progressive generative adversarial network optimized with Dwarf Mongoose algorithm adopted Energy and Deadline Aware Scheduling in heterogeneous virtualized cloud computing (SAPGAN-DMA-DAS-HVCC) is proposed in this paper. Here, a self-attention based progressive generative adversarial network (SAPGAN) is proposed to schedule activities in a cloud environment with an objective function of makespan and energy consumption. Then Dwarf Mongoose algorithm is proposed to optimize the weight parameters of SAPGAN. Outcome of proposed approach SAPGAN-DMA-DAS-HVCC contains 32.77%, 34.83% and 35.76% higher right skewed makespan, 31.52%, 33.28% and 29.14% lower cost when analysed to the existing models, like task scheduling in heterogeneous cloud environment utilizing mean grey wolf optimization approach, energy and performance-efficient task scheduling in heterogeneous virtualized Energy and Performance Efficient Task Scheduling Algorithm, energy and make span aware scheduling of deadline sensitive tasks on the cloud environment, respectively.},
}
@article {pmid39315479,
year = {2024},
author = {Ghani, A and Heinrich, H and Brown, T and Schellhorn, K},
title = {Enhancing EEG Data Quality and Precision for Cloud-Based Clinical Applications: An Evaluation of the SLOG Framework.},
journal = {Biomedical physics & engineering express},
volume = {},
number = {},
pages = {},
doi = {10.1088/2057-1976/ad7e2d},
pmid = {39315479},
issn = {2057-1976},
abstract = {Automation is revamping our preprocessing pipelines, and accelerating the delivery of personalized digital medicine. It improves efficiency, reduces costs, and allows clinicians to treat patients without significant delays. However, the influx of multimodal data highlights the need to protect sensitive information, such as clinical data, and safeguard data fidelity. One of the neuroimaging modalities that produces large amounts of time-series data is Electroencephalography (EEG). It captures the neural dynamics in a task or resting brain state with high temporal resolution. EEG electrodes placed on the scalp acquire electrical activity from the brain. These electrical potentials attenuate as they cross multiple layers of brain tissue and fluid yielding relatively weaker signals than noise - low signal-to-noise ratio. EEG signals are further distorted by internal physiological artifacts, such as eye movements (EOG) or heartbeat (ECG), and external noise, such as line noise 50 Hz. EOG artefacts, due to their proximity to the frontal brain regions, are particularly challenging to eliminate. Therefore, a widely used EOG rejection method, independent component analysis (ICA), demands manual inspection of the marked EOG components before they are rejected from the EEG data. We underscore the inaccuracy of automatized ICA rejection and provide an auxiliary algorithm - Second Layer Inspection for EOG (SLOG) in the clinical environment. SLOG based on spatial and temporal patterns of eye movements, re-examines the already marked EOG artifacts and confirms no EEG-related activity is mistakenly eliminated in this artifact rejection step. SLOG achieved a 99% precision rate on the simulated dataset while 85% precision on the real EEG dataset. One of the primary considerations for cloud-based applications are operational costs, including computing power. Algorithms like SLOG allow us to maintain data fidelity and precision without overloading the cloud platforms and maxing out our budgets.},
}
@article {pmid39314732,
year = {2024},
author = {Khan, S and Jiangbin, Z and Ullah, F and Pervez Akhter, M and Khan, S and Awwad, FA and Ismail, EAA},
title = {Hybrid computing framework security in dynamic offloading for IoT-enabled smart home system.},
journal = {PeerJ. Computer science},
volume = {10},
number = {},
pages = {e2211},
pmid = {39314732},
issn = {2376-5992},
abstract = {In the distributed computing era, cloud computing has completely changed organizational operations by facilitating simple access to resources. However, the rapid development of the IoT has led to collaborative computing, which raises scalability and security challenges. To fully realize the potential of the Internet of Things (IoT) in smart home technologies, there is still a need for strong data security solutions, which are essential in dynamic offloading in conjunction with edge, fog, and cloud computing. This research on smart home challenges covers in-depth examinations of data security, privacy, processing speed, storage capacity restrictions, and analytics inside networked IoT devices. We introduce the Trusted IoT Big Data Analytics (TIBDA) framework as a comprehensive solution to reshape smart living. Our primary focus is mitigating pervasive data security and privacy issues. TIBDA incorporates robust trust mechanisms, prioritizing data privacy and reliability for secure processing and user information confidentiality within the smart home environment. We achieve this by employing a hybrid cryptosystem that combines Elliptic Curve Cryptography (ECC), Post Quantum Cryptography (PQC), and Blockchain technology (BCT) to protect user privacy and confidentiality. Additionally, we comprehensively compared four prominent Artificial Intelligence anomaly detection algorithms (Isolation Forest, Local Outlier Factor, One-Class SVM, and Elliptic Envelope). We utilized machine learning classification algorithms (random forest, k-nearest neighbors, support vector machines, linear discriminant analysis, and quadratic discriminant analysis) for detecting malicious and non-malicious activities in smart home systems. Furthermore, the main part of the research is with the help of an artificial neural network (ANN) dynamic algorithm; the TIBDA framework designs a hybrid computing system that integrates edge, fog, and cloud architecture and efficiently supports numerous users while processing data from IoT devices in real-time. The analysis shows that TIBDA outperforms these systems significantly across various metrics. In terms of response time, TIBDA demonstrated a reduction of 10-20% compared to the other systems under varying user loads, device counts, and transaction volumes. Regarding security, TIBDA's AUC values were consistently higher by 5-15%, indicating superior protection against threats. Additionally, TIBDA exhibited the highest trustworthiness with an uptime percentage 10-12% greater than its competitors. TIBDA's Isolation Forest algorithm achieved an accuracy of 99.30%, and the random forest algorithm achieved an accuracy of 94.70%, outperforming other methods by 8-11%. Furthermore, our ANN-based offloading decision-making model achieved a validation accuracy of 99% and reduced loss to 0.11, demonstrating significant improvements in resource utilization and system performance.},
}
@article {pmid39312829,
year = {2024},
author = {Lai, L and Liu, Y and Zhang, Y and Cao, Z and Yin, Y and Chen, X and Jin, J and Wu, S},
title = {Long-term spatiotemporal mapping in lacustrine environment by remote sensing:Review with case study, challenges, and future directions.},
journal = {Water research},
volume = {267},
number = {},
pages = {122457},
doi = {10.1016/j.watres.2024.122457},
pmid = {39312829},
issn = {1879-2448},
abstract = {Satellite remote sensing, unlike traditional ship-based sampling, possess the advantage of revisit capabilities and provides over 40 years of data support for observing lake environments at local, regional, and global scales. In recent years, global freshwater and coastal waters have faced adverse environmental issues, including harmful phytoplankton blooms, eutrophication, and extreme temperatures. To comprehensively address the goal of 'reviewing the past, assessing the present, and predicting the future', research increasingly focuses on developing and producing algorithms and products for long-term and large-scale mapping. This paper provides a comprehensive review of related research, evaluating the current status, shortcomings, and future trends of remote sensing datasets, monitoring targets, technical methods, and data processing platforms. The analysis demonstrated that the long-term spatiotemporal dynamic lake monitoring transition is thriving: (i) evolving from single data sources to satellite collaborative observations to keep a trade-off between temporal and spatial resolutions, (ii) shifting from single research targets to diversified and multidimensional objectives, (iii) progressing from empirical/mechanism models to machine/deep/transfer learning algorithms, (iv) moving from local processing to cloud-based platforms and parallel computing. Future directions include, but are not limited to: (i) establishing a global sampling data-sharing platform, (ii) developing precise atmospheric correction algorithms, (iii) building next-generation ocean color sensors and virtual constellation networks, (iv) introducing Interpretable Machine Learning (IML) and Explainable Artificial Intelligence (XAI) models, (v) integrating cloud computing, big data/model/computer, and Internet of Things (IoT) technologies, (vi) crossing disciplines with earth sciences, hydrology, computer science, and human geography, etc. In summary, this work offers valuable references and insights for academic research and government decision-making, which are crucial for enhancing the long-term tracking of aquatic ecological environment and achieving the Sustainable Development Goals (SDGs).},
}
@article {pmid39312513,
year = {2024},
author = {Shahzad, A and Chen, W and Shaheen, M and Zhang, Y and Ahmad, F},
title = {A robust algorithm for authenticated health data access via blockchain and cloud computing.},
journal = {PloS one},
volume = {19},
number = {9},
pages = {e0307039},
doi = {10.1371/journal.pone.0307039},
pmid = {39312513},
issn = {1932-6203},
abstract = {In modern healthcare, providers increasingly use cloud services to store and share electronic medical records. However, traditional cloud hosting, which depends on intermediaries, poses risks to privacy and security, including inadequate control over access, data auditing, and tracking data origins. Additionally, current schemes face significant limitations such as scalability concerns, high computational overhead, practical implementation challenges, and issues with interoperability and data standardization. Unauthorized data access by cloud providers further exacerbates these concerns. Blockchain technology, known for its secure and decentralized nature, offers a solution by enabling secure data auditing in sharing systems. This research integrates blockchain into healthcare for efficient record management. We proposed a blockchain-based method for secure EHR management and integrated Ciphertext-Policy Attribute-Based Encryption (CP-ABE) for fine-grained access control. The proposed algorithm combines blockchain and smart contracts with a cloud-based healthcare Service Management System (SMS) to ensure secure and accessible EHRs. Smart contracts automate key management, encryption, and decryption processes, enhancing data security and integrity. The blockchain ledger authenticates data transactions, while the cloud provides scalability. The SMS manages access requests, enhancing resource allocation and response times. A dual authentication system confirms patient keys before granting data access, with failed attempts leading to access revocation and incident logging. Our analyses show that this algorithm significantly improves the security and efficiency of health data exchanges. By combining blockchain's decentralized structure with the cloud's scalability, this approach significantly improves EHR security protocols in modern healthcare setting.},
}
@article {pmid39311476,
year = {2024},
author = {Watson, A and Wozniak-O'Connor, V},
title = {The promise of artificial intelligence in health: Portrayals of emerging healthcare technologies.},
journal = {Sociology of health & illness},
volume = {},
number = {},
pages = {},
doi = {10.1111/1467-9566.13840},
pmid = {39311476},
issn = {1467-9566},
support = {CE200100005//Australian Research Council/ ; },
abstract = {Emerging technologies of artificial intelligence (AI) and automated decision-making (ADM) promise to advance many industries. Healthcare is a key locus for new developments, where operational improvements are magnified by the bigger-picture promise of improved care and outcomes for patients. Forming the zeitgeist of contemporary sociotechnical innovation in healthcare, media portrayals of these technologies can shape how they are implemented, experienced and understood across healthcare systems. This article identifies current applications of AI and ADM within Australian healthcare contexts and analyses how these technologies are being portrayed within news and industry media. It offers a categorisation of leading applications of AI and ADM: monitoring and tracking, data management and analysis, cloud computing, and robotics. Discussing how AI and ADM are depicted in relation to health and care practices, it examines the sense of promise that is enlivened in these representations. The article concludes by considering the implications of promissory discourses for how technologies are understood and integrated into practices and sites of healthcare.},
}
@article {pmid39311198,
year = {2024},
author = {Vitorino, R},
title = {Transforming Clinical Research: The Power of High-Throughput Omics Integration.},
journal = {Proteomes},
volume = {12},
number = {3},
pages = {},
doi = {10.3390/proteomes12030025},
pmid = {39311198},
issn = {2227-7382},
abstract = {High-throughput omics technologies have dramatically changed biological research, providing unprecedented insights into the complexity of living systems. This review presents a comprehensive examination of the current landscape of high-throughput omics pipelines, covering key technologies, data integration techniques and their diverse applications. It looks at advances in next-generation sequencing, mass spectrometry and microarray platforms and highlights their contribution to data volume and precision. In addition, this review looks at the critical role of bioinformatics tools and statistical methods in managing the large datasets generated by these technologies. By integrating multi-omics data, researchers can gain a holistic understanding of biological systems, leading to the identification of new biomarkers and therapeutic targets, particularly in complex diseases such as cancer. The review also looks at the integration of omics data into electronic health records (EHRs) and the potential for cloud computing and big data analytics to improve data storage, analysis and sharing. Despite significant advances, there are still challenges such as data complexity, technical limitations and ethical issues. Future directions include the development of more sophisticated computational tools and the application of advanced machine learning techniques, which are critical for addressing the complexity and heterogeneity of omics datasets. This review aims to serve as a valuable resource for researchers and practitioners, highlighting the transformative potential of high-throughput omics technologies in advancing personalized medicine and improving clinical outcomes.},
}
@article {pmid39309907,
year = {2024},
author = {Alruwaili, O and Tanveer, M and Alotaibi, FM and Abdelfattah, W and Armghan, A and Alserhani, FM},
title = {Securing the IoT-enabled smart healthcare system: A PUF-based resource-efficient authentication mechanism.},
journal = {Heliyon},
volume = {10},
number = {18},
pages = {e37577},
pmid = {39309907},
issn = {2405-8440},
abstract = {As the Internet of Things (IoT) continues its rapid expansion, cloud computing has become integral to various smart healthcare applications. However, the proliferation of digital health services raises significant concerns regarding security and data privacy, making the protection of sensitive medical information paramount. To effectively tackle these challenges, it is crucial to establish resilient network infrastructure and data storage systems capable of defending against malicious entities and permitting access exclusively to authorized users. This requires the deployment of a robust authentication mechanism, wherein medical IoT devices, users (such as doctors or nurses), and servers undergo registration with a trusted authority. The process entails users retrieving data from the cloud server, while IoT devices collect patient data. Before granting access to data retrieval or storage, the cloud server verifies the authenticity of both the user and the IoT device, ensuring secure and authorized interactions within the system. With millions of interconnected smart medical IoT devices autonomously gathering and analyzing vital patient data, the importance of robust security measures becomes increasingly evident. Standard security protocols are fundamental in fortifying smart healthcare applications against potential threats. To confront these issues, this paper introduces a secure and resource-efficient cloud-enabled authentication mechanism. Through empirical analysis, it is demonstrated that our authentication mechanism effectively reduces computational and communication overheads, thereby improving overall system efficiency. Furthermore, both informal and formal analyses affirm the mechanism's resilience against potential cyberattacks, highlighting its effectiveness in safeguarding smart healthcare applications.},
}
@article {pmid39307708,
year = {2024},
author = {Mai, KT and Liu, XT and Lin, XY and Liu, SY and Zhao, CK and Du, JB},
title = {[Progress in application of machine learning in epidemiology].},
journal = {Zhonghua liu xing bing xue za zhi = Zhonghua liuxingbingxue zazhi},
volume = {45},
number = {9},
pages = {1321-1326},
doi = {10.3760/cma.j.cn112338-20240322-00148},
pmid = {39307708},
issn = {0254-6450},
support = {2021YFC2700705//National Key Research and Development Program of China/ ; 202310312014Z//Undergraduate Innovation and Entrepreneurship Training Program/ ; },
mesh = {*Machine Learning ; Humans ; China/epidemiology ; Artificial Intelligence ; Data Mining/methods ; Algorithms ; Big Data ; Epidemiology ; },
abstract = {Population based health data collection and analysis are important in epidemiological research. In recent years, with the rapid development of big data, Internet and cloud computing, artificial intelligence has gradually attracted attention of epidemiological researchers. More and more researchers are trying to use artificial intelligence algorithms for genome sequencing and medical image data mining, and for disease diagnosis, risk prediction and others. In recent years, machine learning, a branch of artificial intelligence, has been widely used in epidemiological research. This paper summarizes the key fields and progress in the application of machine learning in epidemiology, reviews the development history of machine learning, analyzes the classic cases and current challenges in its application in epidemiological research, and introduces the current application scenarios and future development trends of machine learning and artificial intelligence algorithms for the better exploration of the epidemiological research value of massive medical health data in China.},
}
@article {pmid39300104,
year = {2024},
author = {Mangalampalli, S and Karri, GR and Ratnamani, MV and Mohanty, SN and Jabr, BA and Ali, YA and Ali, S and Abdullaeva, BS},
title = {Efficient deep reinforcement learning based task scheduler in multi cloud environment.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {21850},
pmid = {39300104},
issn = {2045-2322},
abstract = {Task scheduling problem (TSP) is huge challenge in cloud computing paradigm as number of tasks comes to cloud application platform vary from time to time and all the tasks consists of variable length, runtime capacities. All these tasks may generated from various heterogeneous resources which comes onto cloud console directly effects the performance of cloud paradigm with increase in makespan, energy consumption, resource costs. Traditional task scheduling algorithms cannot handle these type of complex workloads in cloud paradigm. Many authors developed Task Scheduling algorithms by using metaheuristic techniques, hybrid approaches but all these algorithms give near optimal solutions but still TSP is a highly challenging and dynamic scenario as it resembles NP hard problem. Therefore, to tackle the TSP in cloud computing paradigm and schedule the tasks in an effective way in cloud paradigm, we formulated Adaptive Task scheduler which segments all the tasks comes to cloud console as sub tasks and fed these to the scheduler which is modeled by Improved Asynchronous Advantage Actor Critic Algorithm(IA3C) to generate schedules. This scheduling process is carried out in two stages. In first stage, all incoming tasks are segmented as sub tasks. After segmentation, all these sub tasks according to their size, execution time, communication time are grouped together and fed to the (ATSIA3C) scheduler. In the second stage, it checks for the above said constraints and disperse them onto the corresponding suitable processing capacity VMs resided in datacenters. Proposed ATSIA3C is simulated on Cloudsim. Extensive simulations are conducted using both fabricated worklogs and as well as realtime supercomputing worklogs. Our proposed mechanism evaluated over baseline algorithms i.e. RATS-HM, AINN-BPSO, MOABCQ. From results it is evident that our proposed ATSIA3C outperforms existing task schedulers by improving makespan by 70.49%. Resource cost is improved by 77.42%. Energy Consumption is improved over compared algorithms 74.24% in multi cloud environment by proposed ATSIA3C.},
}
@article {pmid39296026,
year = {2024},
author = {Azizi, MA and Niknam, T and Dehghani, M and Jokar, H},
title = {Cloud-fog architecture-based control of smart island microgrid in master-slave organization using disturbance observer-based hybrid backstepping sliding mode controller.},
journal = {Heliyon},
volume = {10},
number = {17},
pages = {e37453},
pmid = {39296026},
issn = {2405-8440},
abstract = {Distributed control is an effective method to coordinate the microgrid with various components, and also in a smart microgrid, communication graph layouts are essential since changing the topology unexpectedly could disrupt the operation of the distributed controllers, and also an imbalance may occur between the production and load. Hence, reducing the exchanged data between units and system operator is essential in order to reduce the transmitted data volume and computational burden. For this purpose, an islanded microgrid with multiple agents which is using cloud-fog computing is proposed here, in order to reduce the computing burden on the central control unit as well as reducing data exchange among units. To balance the production power and loads in a smart island with a stable voltage/frequency, a hybrid backstepping sliding mode controller (BSMC) with disturbance observer (DO) is suggested to control voltage/frequency and current in the MG-based master-slave organization. Therefore, this paper proposes a DO-driven BSMC for controlling voltage/frequency, and power of energy sources within a Master-Slave organization; in addition, the study proposes a clod-fog computing for enhancing performance, reducing transferred data volume, and processing information on time. In the extensive simulations, the suggested controller shows a reduction in steady-state error, a fast response, and a lower total harmonic distortion (THD) for nonlinear and linear loads less than 0.33 %. The fog layer serves as a local processing level, so it reduces the exchanged data between cloud and fog nodes.},
}
@article {pmid39295428,
year = {2024},
author = {Badorrek, S and Franklin, J and McBride, KA and Conway, L and Williams, K},
title = {Primary care practitioner and patient perspectives on care following bariatric surgery: A meta-synthesis of qualitative research.},
journal = {Obesity reviews : an official journal of the International Association for the Study of Obesity},
volume = {},
number = {},
pages = {e13829},
doi = {10.1111/obr.13829},
pmid = {39295428},
issn = {1467-789X},
support = {//University of Sydney/ ; },
abstract = {Primary care is central to ongoing health care following bariatric surgery and patients indicate a preference for receiving follow-up support by their primary care practitioner (PCP). This meta-synthesis investigates the perspectives of both PCPs and patients in post-bariatric surgery care provided by PCPs. The aim was to synthesize themes from qualitative research to recommend improvements in post-bariatric surgery clinical care in primary care settings. Systematic searches of Scopus, Medline, EMBASE, PsycINFO, the Cochrane Library, and Google Scholar resulted in the inclusion of eight papers in the meta-synthesis. Papers were critiqued using the Critical Appraisal Skills Program (CASP) and thematically coded in Quirkos Cloud. Seven themes were reached by author consensus including stigma and judgment; clinician barriers and facilitators; patient-related support needs; communication considerations; patient context or determinants; health care setting; and adapting to life after surgery. PCPs reported barriers including poor communication and guidance from bariatric surgery centers, limited knowledge and training in bariatric patient care, and patients who may have unrealistic outcomes and poor health literacy. Patients seek comprehensive care from their PCP, however, barriers hindering the provision of this care include adverse surgical outcomes, a poor relationship with their PCP, and limited and short-term follow-up care from the PCP. Insights from this meta-synthesis offer actionable recommendations for PCPs and bariatric surgery centers to enhance patient care immediately.},
}
@article {pmid39281853,
year = {2024},
author = {Cruz-Almeida, Y and Mehta, B and Haelterman, NA and Johnson, AJ and Heiting, C and Ernberg, M and Orange, D and Lotz, M and Boccanfuso, J and Smith, SB and Pela, M and Boline, J and Otero, M and Allen, K and Perez, D and Donnelly, C and Almarza, A and Olmer, M and Balkhi, H and Wagenaar, J and Martone, M and , },
title = {Clinical and biobehavioral phenotypic assessments and data harmonization for the RE-JOIN research consortium: Recommendations for common data element selection.},
journal = {Neurobiology of pain (Cambridge, Mass.)},
volume = {16},
number = {},
pages = {100163},
pmid = {39281853},
issn = {2452-073X},
abstract = {BACKGROUND: The Restoring Joint Health and Function to Reduce Pain (RE-JOIN) Consortium is part of the Helping to End Addiction Long-term® (HEAL) Initiative. HEAL is an ambitious, NIH-wide initiative to speed scientific solutions to stem the national opioid public health crisis. The RE-JOIN consortium's over-arching goal is to define how chronic joint pain-mediating neurons innervate different articular and peri-articular tissues, with a focus on the knee and temporomandibular joints (TMJ) across species employing the latest neuroscience approaches. The aim of this manuscript is to elucidate the human data gathered by the RE-JOIN consortium, as well as to expound upon its underlying rationale and the methodologies and protocols for harmonization and standardization that have been instituted by the RE-JOIN Consortium.
METHODS: The consortium-wide human models working subgroup established the RE-JOIN minimal harmonized data elements that will be collected across all human studies and set the stage to develop parallel pre-clinical data collection standards. Data harmonization considerations included requirements from the HEAL program and recommendations from the consortium's researchers and experts on informatics, knowledge management, and data curation.
RESULTS: Multidisciplinary experts - including preclinical and clinical researchers, with both clinician-scientists- developed the RE-JOIN's Minimal Human Data Standard with required domains and outcome measures to be collected across projects and institutions. The RE-JOIN minimal data standard will include HEAL Common Data Elements (CDEs) (e.g., standardized demographics, general pain, psychosocial and functional measures), and RE-JOIN common data elements (R-CDE) (i.e., both general and joint-specific standardized and clinically important self-reported pain and function measures, as well as pressure pain thresholds part of quantitative sensory testing). In addition, discretionary, site-specific measures will be collected by individual institutions (e.g., expanded quantitative sensory testing and gait biomechanical assessments), specific to the knee or TMJ. Research teams will submit datasets of standardized metadata to the RE-JOIN Data Coordinating Center (DCG) via a secure cloud-based central data repository and computing infrastructure for researchers to share and conduct analyses on data collected by or acquired for RE-JOIN. RE-JOIN datasets will have protected health information (PHI) removed and be publicly available on the SPARC portal and accessible through the HEAL Data Ecosystem.
CONCLUSION: Data Harmonization efforts provide the multidisciplinary consortium with an opportunity to effectively collaborate across decentralized research teams, and data standardization sets the framework for efficient future analyses of RE-JOIN data collected by the consortium. The harmonized phenotypic information obtained will significantly enhance our understanding of the neurobiology of the pain-pathology relationships in humans, providing valuable insights for comparison with pre-clinical models.},
}
@article {pmid39278954,
year = {2024},
author = {Manogaran, N and Nandagopal, M and Abi, NE and Seerangan, K and Balusamy, B and Selvarajan, S},
title = {Integrating meta-heuristic with named data networking for secure edge computing in IoT enabled healthcare monitoring system.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {21532},
pmid = {39278954},
issn = {2045-2322},
mesh = {*Internet of Things ; *Computer Security ; Humans ; *Cloud Computing ; Heuristics ; Algorithms ; Delivery of Health Care ; Computer Communication Networks ; },
abstract = {The advancement in technology, with the "Internet of Things (IoT) is continuing a crucial task to accomplish distance medical care observation, where the effective and secure healthcare information retrieval is complex. However, the IoT systems have restricted resources hence it is complex to attain effective and secure healthcare information acquisition. The idea of smart healthcare has developed in diverse regions, where small-scale implementations of medical facilities are evaluated. In the IoT-aided medical devices, the security of the IoT systems and related information is highly essential on the other hand, the edge computing is a significant framework that rectifies their processing and computational issues. The edge computing is inexpensive, and it is a powerful framework to offer low latency information assistance by enhancing the computation and the transmission speed of the IoT systems in the medical sectors. The main intention of this work is to design a secure framework for Edge computing in IoT-enabled healthcare systems using heuristic-based authentication and "Named Data Networking (NDN)". There are three layers in the proposed model. In the first layer, many IoT devices are connected together, and using the cluster head formation, the patients are transmitting their data to the edge cloud layer. The edge cloud layer is responsible for storage and computing resources for rapidly caching and providing medical data. Hence, the patient layer is a new heuristic-based sanitization algorithm called Revised Position of Cat Swarm Optimization (RPCSO) with NDN for hiding the sensitive data that should not be leaked to unauthorized users. This authentication procedure is adopted as a multi-objective function key generation procedure considering constraints like hiding failure rate, information preservation rate, and degree of modification. Further, the data from the edge cloud layer is transferred to the user layer, where the optimal key generation with NDN-based restoration is adopted, thus achieving efficient and secure medical data retrieval. The framework is evaluated quantitatively on diverse healthcare datasets from University of California (UCI) and Kaggle repository and experimental analysis shows the superior performance of the proposed model in terms of latency and cost when compared to existing solutions. The proposed model performs the comparative analysis of the existing algorithms such as Cat Swarm Optimization (CSO), Osprey Optimization Algorithm (OOA), Mexican Axolotl Optimization (MAO), Single candidate optimizer (SCO). Similarly, the cryptography tasks like "Rivest-Shamir-Adleman (RSA), Advanced Encryption Standard (AES), Elliptic Curve Cryptography (ECC), and Data sanitization and Restoration (DSR) are applied and compared with the RPCSO in the proposed work. The results of the proposed model is compared on the basis of the best, worst, mean, median and standard deviation. The proposed RPCSO outperforms all other models with values of 0.018069361, 0.50564046, 0.112643119, 0.018069361, 0.156968355 and 0.283597992, 0.467442652, 0.32920734, 0.328581887, 0.063687386 for both dataset 1 and dataset 2 respectively.},
}
@article {pmid39275461,
year = {2024},
author = {Alharthi, S and Alshamsi, A and Alseiari, A and Alwarafy, A},
title = {Auto-Scaling Techniques in Cloud Computing: Issues and Research Directions.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {17},
pages = {},
pmid = {39275461},
issn = {1424-8220},
support = {12T047//United Arab Emirates University/ ; },
abstract = {In the dynamic world of cloud computing, auto-scaling stands as a beacon of efficiency, dynamically aligning resources with fluctuating demands. This paper presents a comprehensive review of auto-scaling techniques, highlighting significant advancements and persisting challenges in the field. First, we overview the fundamental principles and mechanisms of auto-scaling, including its role in improving cost efficiency, performance, and energy consumption in cloud services. We then discuss various strategies employed in auto-scaling, ranging from threshold-based rules and queuing theory to sophisticated machine learning and time series analysis approaches. After that, we explore the critical issues in auto-scaling practices and review several studies that demonstrate how these challenges can be addressed. We then conclude by offering insights into several promising research directions, emphasizing the development of predictive scaling mechanisms and the integration of advanced machine learning techniques to achieve more effective and efficient auto-scaling solutions.},
}
@article {pmid39269931,
year = {2024},
author = {Coleman, JR and Baker, JN and Ketkar, S and Butler, AM and Williams, L and Hammonds-Odie, L and Atkinson, EG and Murray, DD and Lee, B and Worley, KC},
title = {Development and evaluation of a training curriculum to engage researchers on accessing and analyzing the All of Us data.},
journal = {Journal of the American Medical Informatics Association : JAMIA},
volume = {},
number = {},
pages = {},
doi = {10.1093/jamia/ocae240},
pmid = {39269931},
issn = {1527-974X},
support = {OT2 OD031932/NH/NIH HHS/United States ; 1 OT2 OD026549//Office of the Director: Regional Medical Centers/ ; HHSN 263201600085U//Federally Qualified Health Centers/ ; 5 U2C OD023196//Data and Research Center/ ; 1 U24 OD023163//Participant Technology Systems Center/ ; 3 OT2 OD023205//Communications and Engagement/ ; },
abstract = {OBJECTIVE: The All of Us Evenings with Genetics (EwG) Research Program at Baylor College of Medicine (BCM), funded to engage research scholars to work with the All of Us data, developed a training curriculum for the Researcher Workbench, the platform to access and analyze All of Us data. All of Us EwG developed the curriculum so that it could teach scholars regardless of their skills and background in programming languages and cloud computing. All of Us EwG delivered this curriculum at the first annual All of Us EwG Faculty Summit in May 2022. The curriculum was evaluated both during and after the Faculty Summit so that it could be improved for future training.
MATERIALS AND METHODS: Surveys were administered to assess scholars' familiarity with the programming languages and computational tools required to use the Researcher Workbench. The curriculum was developed using backward design and was informed by the survey results, a review of available resources for training users on the Researcher Workbench, and All of Us EwG members' collective experience training students. The curriculum was evaluated using feedback surveys during the Faculty Summit as well as virtual meetings and emails following the Faculty Summit.
RESULTS: The evaluation results demonstrated the success of the curriculum and identified areas for improvement.
DISCUSSION AND CONCLUSION: The curriculum has been adapted and improved in response to evaluations and in response to changes to the All of Us data and infrastructure to train more researchers through this program and other scholarly programs.},
}
@article {pmid39268148,
year = {2024},
author = {Rorden, C and Webster, M and Drake, C and Jenkinson, M and Clayden, JD and Li, N and Hanayik, T},
title = {niimath and fslmaths: replication as a method to enhance popular neuroimaging tools.},
journal = {Aperture neuro},
volume = {4},
number = {},
pages = {},
pmid = {39268148},
issn = {2957-3963},
abstract = {Neuroimaging involves the acquisition of extensive 3D images and 4D time series data to gain insights into brain structure and function. The analysis of such data necessitates both spatial and temporal processing. In this context, "fslmaths" has established itself as a foundational software tool within our field, facilitating domain-specific image processing. Here, we introduce "niimath," a clone of fslmaths. While the term "clone" often carries negative connotations, we illustrate the merits of replicating widely-used tools, touching on aspects of licensing, performance optimization, and portability. For instance, our work enables the popular functions of fslmaths to be disseminated in various forms, such as a high-performance compiled R package known as "imbibe", a Windows executable, and a WebAssembly plugin compatible with JavaScript. This versatility is demonstrated through our NiiVue live demo web page. This application allows 'edge computing' where image processing can be done with a zero-footprint tool that runs on any web device without requiring private data to be shared to the cloud. Furthermore, our efforts have contributed back to FSL, which has integrated the optimizations that we've developed. This synergy has enhanced the overall transparency, utility and efficiency of tools widely relied upon in the neuroimaging community.},
}
@article {pmid39266450,
year = {2024},
author = {Gnimpieba, EZ and Hartman, TW and Do, T and Zylla, J and Aryal, S and Haas, SJ and Agany, DDM and Gurung, BDS and Doe, V and Yosufzai, Z and Pan, D and Campbell, R and Huber, VC and Sani, R and Gadhamshetty, V and Lushbough, C},
title = {Biofilm marker discovery with cloud-based dockerized metagenomics analysis of microbial communities.},
journal = {Briefings in bioinformatics},
volume = {25},
number = {Supplement_1},
pages = {},
doi = {10.1093/bib/bbae429},
pmid = {39266450},
issn = {1477-4054},
support = {#1849206//National Science Foundation/ ; //Institutional Development Award/ ; /GM/NIGMS NIH HHS/United States ; P20GM103443/NH/NIH HHS/United States ; },
mesh = {*Biofilms/growth & development ; *Metagenomics/methods ; Microbiota/genetics ; Cloud Computing ; Humans ; Computational Biology/methods ; },
abstract = {In an environment, microbes often work in communities to achieve most of their essential functions, including the production of essential nutrients. Microbial biofilms are communities of microbes that attach to a nonliving or living surface by embedding themselves into a self-secreted matrix of extracellular polymeric substances. These communities work together to enhance their colonization of surfaces, produce essential nutrients, and achieve their essential functions for growth and survival. They often consist of diverse microbes including bacteria, viruses, and fungi. Biofilms play a critical role in influencing plant phenotypes and human microbial infections. Understanding how these biofilms impact plant health, human health, and the environment is important for analyzing genotype-phenotype-driven rule-of-life functions. Such fundamental knowledge can be used to precisely control the growth of biofilms on a given surface. Metagenomics is a powerful tool for analyzing biofilm genomes through function-based gene and protein sequence identification (functional metagenomics) and sequence-based function identification (sequence metagenomics). Metagenomic sequencing enables a comprehensive sampling of all genes in all organisms present within a biofilm sample. However, the complexity of biofilm metagenomic study warrants the increasing need to follow the Findability, Accessibility, Interoperability, and Reusable (FAIR) Guiding Principles for scientific data management. This will ensure that scientific findings can be more easily validated by the research community. This study proposes a dockerized, self-learning bioinformatics workflow to increase the community adoption of metagenomics toolkits in a metagenomics and meta-transcriptomics investigation. Our biofilm metagenomics workflow self-learning module includes integrated learning resources with an interactive dockerized workflow. This module will allow learners to analyze resources that are beneficial for aggregating knowledge about biofilm marker genes, proteins, and metabolic pathways as they define the composition of specific microbial communities. Cloud and dockerized technology can allow novice learners-even those with minimal knowledge in computer science-to use complicated bioinformatics tools. Our cloud-based, dockerized workflow splits biofilm microbiome metagenomics analyses into four easy-to-follow submodules. A variety of tools are built into each submodule. As students navigate these submodules, they learn about each tool used to accomplish the task. The downstream analysis is conducted using processed data obtained from online resources or raw data processed via Nextflow pipelines. This analysis takes place within Vertex AI's Jupyter notebook instance with R and Python kernels. Subsequently, results are stored and visualized in Google Cloud storage buckets, alleviating the computational burden on local resources. The result is a comprehensive tutorial that guides bioinformaticians of any skill level through the entire workflow. It enables them to comprehend and implement the necessary processes involved in this integrated workflow from start to finish. This manuscript describes the development of a resource module that is part of a learning platform named "NIGMS Sandbox for Cloud-based Learning" https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox [1] at the beginning of this Supplement. This module delivers learning materials on the analysis of bulk and single-cell ATAC-seq data in an interactive format that uses appropriate cloud resources for data access and analyses.},
}
@article {pmid39261726,
year = {2024},
author = {Sharma, D and Rath, SP and Kundu, B and Korkmaz, A and S, H and Thompson, D and Bhat, N and Goswami, S and Williams, RS and Goswami, S},
title = {Linear symmetric self-selecting 14-bit kinetic molecular memristors.},
journal = {Nature},
volume = {},
number = {},
pages = {},
pmid = {39261726},
issn = {1476-4687},
abstract = {Artificial Intelligence (AI) is the domain of large resource-intensive data centres that limit access to a small community of developers[1,2]. Neuromorphic hardware promises greatly improved space and energy efficiency for AI but is presently only capable of low-accuracy operations, such as inferencing in neural networks[3-5]. Core computing tasks of signal processing, neural network training and natural language processing demand far higher computing resolution, beyond that of individual neuromorphic circuit elements[6-8]. Here we introduce an analog molecular memristor based on a Ru-complex of an azo-aromatic ligand with 14-bit resolution. Precise kinetic control over a transition between two thermodynamically stable molecular electronic states facilitates 16,520 distinct analog conductance levels, which can be linearly and symmetrically updated or written individually in one time step, substantially simplifying the weight update procedure over existing neuromorphic platforms[3]. The circuit elements are unidirectional, facilitating a selector-less 64 × 64 crossbar-based dot-product engine that enables vector-matrix multiplication, including Fourier transform, in a single time step. We achieved more than 73 dB signal-to-noise-ratio, four orders of magnitude improvement over the state-of-the-art methods[9-11], while consuming 460× less energy than digital computers[12,13]. Accelerators leveraging these molecular crossbars could transform neuromorphic computing, extending it beyond niche applications and augmenting the core of digital electronics from the cloud to the edge[12,13].},
}
@article {pmid39261468,
year = {2024},
author = {Saad, F and Burnim, J and Carroll, C and Patton, B and Köster, U and A Saurous, R and Hoffman, M},
title = {Scalable spatiotemporal prediction with Bayesian neural fields.},
journal = {Nature communications},
volume = {15},
number = {1},
pages = {7942},
pmid = {39261468},
issn = {2041-1723},
abstract = {Spatiotemporal datasets, which consist of spatially-referenced time series, are ubiquitous in diverse applications, such as air pollution monitoring, disease tracking, and cloud-demand forecasting. As the scale of modern datasets increases, there is a growing need for statistical methods that are flexible enough to capture complex spatiotemporal dynamics and scalable enough to handle many observations. This article introduces the Bayesian Neural Field (BAYESNF), a domain-general statistical model that infers rich spatiotemporal probability distributions for data-analysis tasks including forecasting, interpolation, and variography. BAYESNF integrates a deep neural network architecture for high-capacity function estimation with hierarchical Bayesian inference for robust predictive uncertainty quantification. Evaluations against prominent baselines show that BAYESNF delivers improvements on prediction problems from climate and public health data containing tens to hundreds of thousands of measurements. Accompanying the paper is an open-source software package (https://github.com/google/bayesnf) that runs on GPU and TPU accelerators through the JAX machine learning platform.},
}
@article {pmid39258203,
year = {2024},
author = {Sun, W and Tohirovich Dedahanov, A and Li, WP and Young Shin, H},
title = {Sanctions and opportunities: Factors affecting China's high-tech SMEs adoption of artificial intelligence computing leasing business.},
journal = {Heliyon},
volume = {10},
number = {16},
pages = {e36620},
doi = {10.1016/j.heliyon.2024.e36620},
pmid = {39258203},
issn = {2405-8440},
abstract = {Due to sanctions, more Chinese high-tech SMEs are turning to rent AI computing power through cloud service providers. Therefore, it is necessary to give a variety of suggestions for China's high-tech SMEs to better develop AI applications through computing power leasing. Because traditional theories are difficult to explain this new technology adoption behavior, this research combines and extends TTF and UTAUT2 theories to take an empirical research. A total of 387 questionnaires were received, of which incomplete questionnaires and invalid questionnaires were issued, leaving 281 valid questionnaires. The results indicate that SME innovativeness, perceived risk, performance expectancy, price value and task technology fit are all significantly related to usage, whereas task technology fit moderates the other relationships significantly. Results give a variety of suggestions for China's high-tech SMEs to better develop AI applications through computing power leasing in the context of sanctions. This study not only suggests ways to increase the competitiveness of SMEs by optimizing leasing services but also give directions in investors' investment decisions. The findings are also applicable to the large-scale application of China's domestic AI chips in computing power leasing scenarios in the future.},
}
@article {pmid39253244,
year = {2024},
author = {Wang, H and Zhang, Y and Wang, XA and Yang, X},
title = {An improved identity-based public audit protocol for cloud storage.},
journal = {Heliyon},
volume = {10},
number = {16},
pages = {e36273},
doi = {10.1016/j.heliyon.2024.e36273},
pmid = {39253244},
issn = {2405-8440},
abstract = {With the rapid development of informatization, a vast amount of data is continuously generated and accumulated, leading to the emergence of cloud storage services. However, data stored in the cloud is beyond the control of users, posing various security risks. Cloud data auditing technology enables the inspection of data integrity in the cloud without the necessity of data downloading. Among these, public auditing schemes have experienced rapid development due to their ability to avoid additional user auditing expenses. However, malicious third-party auditors can compromise data privacy. This paper proposes an improved identity-based cloud auditing scheme that can resist malicious auditors. This scheme is also constructed on an identity-based public auditing scheme using blockchain to prevent malicious auditing. We found the scheme is not secure because a malicious cloud server can forge authentication tags for outsourced data blocks, while our scheme has not these security flaws. Through security proofs and performance analysis, we further demonstrate that our scheme is secure and efficient. Additionally, our scheme has typical application scenarios.},
}
@article {pmid39253170,
year = {2024},
author = {Mingwei, YU and Feng, LI and Yonggang, GUO and Libin, SU and Deshun, QIN},
title = {Study of the patterns of variations in ice lakes and the factors influencing these changes on the southeastern Tibetan plateau.},
journal = {Heliyon},
volume = {10},
number = {16},
pages = {e36406},
doi = {10.1016/j.heliyon.2024.e36406},
pmid = {39253170},
issn = {2405-8440},
abstract = {The ice lakes in the southeastern Qinghai-Tibet Plateau have exhibited a pronounced expansion against the backdrop of global warming, consequently amplifying the local risk of ice lake outburst disasters. However, surveys of ice lake changes in the entire region have consistently been incomplete due to the prevalent high cloud density. On the basis of Landsat remote sensing images and the Google Earth Engine (GEE) cloud computing platform, in this study, the full convolution segmentation algorithm is utilized to accurately and comprehensively map the regional distribution of ice lakes in southeastern Tibet at consistent time intervals in 1993, 2008, and 2023. Furthermore, the formation, distribution, and dynamic changes in these ice lakes are investigated. The numbers of ice lakes discovered in 1993, 2008, and 2023 were 2520, 3198, and 3877, respectively. These lakes covered areas of approximately 337.64 ± 36.86 km[2], 363.92 ± 40.90 km[2], and 395.74 ± 22.72 km[2], respectively. These ice lakes are located primarily between altitudes of 4442 m and 4909 m. The total area experienced an annual growth rate of approximately 0.57 % from 1993 to 2023. In the present study, the long-term variations in ice lakes in each district and county are examined. These findings indicate that between 1993 and 2023, the expansion of ice lakes was more pronounced in regions with a large number of marine glaciers. Notably, Basu County presented the highest annual growth rate of the ice lake population, at 6.23 %, followed by Bomi County, at 4.28 %, and finally, Zayul County, at 2.94 %. The accelerated shrinkage of marine glaciers induced by global warming is the primary driver behind the expansion of ice lakes. The results obtained from this research will enhance our overall understanding of the complex dynamics and mechanisms that govern the formation of ice lakes while also offering valuable perspectives on the potential risks linked to their expansion in this particular area.},
}
@article {pmid39252255,
year = {2024},
author = {Zhao, H and Zhang, Z and Tang, J},
title = {Enhancing rural healthcare through internet-based remote collaborative outpatient services: A comprehensive evaluation in Changzhi, Shanxi Province.},
journal = {Medicine},
volume = {103},
number = {36},
pages = {e39614},
doi = {10.1097/MD.0000000000039614},
pmid = {39252255},
issn = {1536-5964},
support = {HPYJ202202//Heping Hospital Affiliated to Changzhi Medical College Faculty Research Fund/ ; },
mesh = {Humans ; China ; *Rural Health Services/organization & administration ; *Telemedicine ; *Internet ; Male ; Female ; *Patient Satisfaction ; Adult ; Middle Aged ; Health Services Accessibility ; Ambulatory Care/methods/organization & administration ; Rural Population ; Aged ; Young Adult ; Adolescent ; },
abstract = {BACKGROUND: The advancement of digital technology, particularly telemedicine, has become crucial in improving healthcare access in rural areas. By integrating cloud computing and mHealth technologies, Internet-based Collaborative Outpatient Clinics offer a promising solution to overcome the limitations of traditional healthcare delivery in underserved communities.
METHODS: A trial was conducted in 4 counties of Changzhi City in Shanxi Province, China. The system extended to 495 rural communities and served over 5000 rural residents. Deep learning algorithms were employed to analyze medical data patterns to increase the accuracy of diagnoses and the quality of personalized treatment recommendations.
RESULTS: After the implementation of the system, there was a significant improvement in the satisfaction levels of rural residents regarding medical services; the accuracy of medical consultations increased by 30%, and the convenience of medical access improved by 50%. There was also a notable enhancement in overall health management. Satisfaction rates among healthcare professionals and rural inhabitants were over 90% and 85%, respectively, indicating that the system has had a significant positive impact on the quality of health-care services.
CONCLUSION: The study confirms the feasibility of implementing telemedicine services in rural areas and offers evidence and an operational framework for promoting innovative healthcare models on a large scale.},
}
@article {pmid39242829,
year = {2024},
author = {Tanade, C and Khan, NS and Rakestraw, E and Ladd, WD and Draeger, EW and Randles, A},
title = {Establishing the longitudinal hemodynamic mapping framework for wearable-driven coronary digital twins.},
journal = {NPJ digital medicine},
volume = {7},
number = {1},
pages = {236},
pmid = {39242829},
issn = {2398-6352},
support = {DP1AG082343//U.S. Department of Health & Human Services | National Institutes of Health (NIH)/ ; 164486//National Science Foundation (NSF)/ ; DP1AG082343//U.S. Department of Health & Human Services | National Institutes of Health (NIH)/ ; DP1AG082343//U.S. Department of Health & Human Services | National Institutes of Health (NIH)/ ; },
abstract = {Understanding the evolving nature of coronary hemodynamics is crucial for early disease detection and monitoring progression. We require digital twins that mimic a patient's circulatory system by integrating continuous physiological data and computing hemodynamic patterns over months. Current models match clinical flow measurements but are limited to single heartbeats. To this end, we introduced the longitudinal hemodynamic mapping framework (LHMF), designed to tackle critical challenges: (1) computational intractability of explicit methods; (2) boundary conditions reflecting varying activity states; and (3) accessible computing resources for clinical translation. We show negligible error (0.0002-0.004%) between LHMF and explicit data of 750 heartbeats. We deployed LHMF across traditional and cloud-based platforms, demonstrating high-throughput simulations on heterogeneous systems. Additionally, we established LHMFC, where hemodynamically similar heartbeats are clustered to avoid redundant simulations, accurately reconstructing longitudinal hemodynamic maps (LHMs). This study captured 3D hemodynamics over 4.5 million heartbeats, paving the way for cardiovascular digital twins.},
}
@article {pmid39237930,
year = {2024},
author = {Vesselle, H and Chiramal, JA and Hawes, SE and Schulze, E and Nguyen, T and Ndumia, R and Vinayak, S},
title = {Development of an online authentic radiology viewing and reporting platform to test the skills of radiology trainees in Low- and Middle-Income Countries.},
journal = {BMC medical education},
volume = {24},
number = {1},
pages = {969},
pmid = {39237930},
issn = {1472-6920},
mesh = {Humans ; *Radiology/education ; *Developing Countries ; Kenya ; *Internship and Residency ; *Clinical Competence ; *Radiology Information Systems ; Tomography, X-Ray Computed ; },
abstract = {BACKGROUND: Diagnostic radiology residents in low- and middle-income countries (LMICs) may have to provide significant contributions to the clinical workload before the completion of their residency training. Because of time constraints inherent to the delivery of acute care, some of the most clinically impactful diagnostic radiology errors arise from the use of Computed Tomography (CT) in the management of acutely ill patients. As a result, it is paramount to ensure that radiology trainees reach adequate skill levels prior to assuming independent on-call responsibilities. We partnered with the radiology residency program at the Aga Khan University Hospital in Nairobi (Kenya) to evaluate a novel cloud-based testing method that provides an authentic radiology viewing and interpretation environment. It is based on Lifetrack, a unique Google Chrome-based Picture Archiving and Communication System, that enables a complete viewing environment for any scan, and provides a novel report generation tool based on Active Templates which are a patented structured reporting method. We applied it to evaluate the skills of AKUHN trainees on entire CT scans representing the spectrum of acute non-trauma abdominal pathology encountered in a typical on-call setting. We aimed to demonstrate the feasibility of remotely testing the authentic practice of radiology and to show that important observations can be made from such a Lifetrack-based testing approach regarding the radiology skills of an individual practitioner or of a cohort of trainees.
METHODS: A total of 13 anonymized trainees with experience from 12 months to over 4 years took part in the study. Individually accessing the Lifetrack tool they were tested on 37 abdominal CT scans (including one normal scan) over six 2-hour sessions on consecutive days. All cases carried the same clinical history of acute abdominal pain. During each session the trainees accessed the corresponding Lifetrack test set using clinical workstations, reviewed the CT scans, and formulated an opinion for the acute diagnosis, any secondary pathology, and incidental findings on the scan. Their scan interpretations were composed using the Lifetrack report generation system based on active templates in which segments of text can be selected to assemble a detailed report. All reports generated by the trainees were scored on four different interpretive components: (a) acute diagnosis, (b) unrelated secondary diagnosis, (c) number of missed incidental findings, and (d) number of overcalls. A 3-score aggregate was defined from the first three interpretive elements. A cumulative score modified the 3-score aggregate for the negative effect of interpretive overcalls.
RESULTS: A total of 436 scan interpretations and scores were available from 13 trainees tested on 37 cases. The acute diagnosis score ranged from 0 to 1 with a mean of 0.68 ± 0.36 and median of 0.78 (IQR: 0.5-1), and there were 436 scores. An unrelated secondary diagnosis was present in 11 cases, resulting in 130 secondary diagnosis scores. The unrelated secondary diagnosis score ranged from 0 to 1, with mean score of 0.48 ± 0.46 and median of 0.5 (IQR: 0-1). There were 32 cases with incidental findings, yielding 390 scores for incidental findings. The number of missed incidental findings ranged from 0 to 5 with a median at 1 (IQR: 1-2). The incidental findings score ranged from 0 to 1 with a mean of 0.4 ± 0.38 and median of 0.33 (IQR: 0- 0.66). The number of overcalls ranged from 0 to 3 with a median at 0 (IQR: 0-1) and a mean of 0.36 ± 0.63. The 3-score aggregate ranged from 0 to 100 with a mean of 65.5 ± 32.5 and median of 77.3 (IQR: 45.0, 92.5). The cumulative score ranged from - 30 to 100 with a mean of 61.9 ± 35.5 and median of 71.4 (IQR: 37.4, 92.0). The mean acute diagnosis scores and SD by training period were 0.62 ± 0.03, 0.80 ± 0.05, 0.71 ± 0.05, 0.58 ± 0.07, and 0.66 ± 0.05 for trainees with ≤ 12 months, 12-24 months, 24-36 months, 36-48 months and > 48 months respectively. The mean acute diagnosis score of 12-24 months training was the only statistically significant greater score when compared to ≤ 12 months by the ANOVA with Tukey testing (p = 0.0002). We found a similar trend with distribution of 3-score aggregates and cumulative scores. There were no significant associations when the training period was categorized as less than and more than 2 years. We looked at the distribution of the 3-score aggregate versus the number of overcalls by trainee, and we found that the 3-score aggregate was inversely related to the number of overcalls. Heatmaps and raincloud plots provided an illustrative means to visualize the relative performance of trainees across cases.
CONCLUSION: We demonstrated the feasibility of remotely testing the authentic practice of radiology and showed that important observations can be made from our Lifetrack-based testing approach regarding radiology skills of an individual or a cohort. From observed weaknesses areas for targeted teaching can be implemented, and retesting could reveal their impact. This methodology can be customized to different LMIC environments and expanded to board certification examinations.},
}
@article {pmid39234702,
year = {2024},
author = {Holtz, A and Liebe, JD},
title = {Cloud Readiness of German Hospitals: Development and Application of an Evaluation Scale.},
journal = {Studies in health technology and informatics},
volume = {317},
number = {},
pages = {11-19},
doi = {10.3233/SHTI240832},
pmid = {39234702},
issn = {1879-8365},
mesh = {Germany ; *Cloud Computing ; Hospitals ; Computer Security ; Humans ; Surveys and Questionnaires ; },
abstract = {BACKGROUND: In the context of the telematics infrastructure, new data usage regulations, and the growing potential of artificial intelligence, cloud computing plays a key role in driving the digitalization in the German hospital sector.
METHODS: Against this background, the study aims to develop and validate a scale for assessing the cloud readiness of German hospitals. It uses the TPOM (Technology, People, Organization, Macro-Environment) framework to create a scoring system. A survey involving 110 Chief Information Officers (CIOs) from German hospitals was conducted, followed by an exploratory factor analysis and reliability testing to refine the items, resulting in a final set of 30 items.
RESULTS: The analysis confirmed the statistical robustness and identified key factors contributing to cloud readiness. These include IT security in the dimension "technology", collaborative research and acceptance for the need to make high quality data available in the dimension "people", scalability of IT resources in the dimension "organization", and legal aspects in the dimension "macroenvironment". The macroenvironment dimension emerged as particularly stable, highlighting the critical role of regulatory compliance in the healthcare sector.
CONCLUSION: The findings suggest a certain degree of cloud readiness among German hospitals, with potential for improvement in all four dimensions. Systemically, legal requirements and a challenging political environment are top concerns for CIOs, impacting their cloud readiness.},
}
@article {pmid39232132,
year = {2024},
author = {Said, G and Ghani, A and Ullah, A and Alzahrani, A and Azeem, M and Ahmad, R and Kim, DH},
title = {Fog-assisted de-duplicated data exchange in distributed edge computing networks.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {20595},
pmid = {39232132},
issn = {2045-2322},
abstract = {The Internet of Things (IoT) generates substantial data through sensors for diverse applications, such as healthcare services. This article addresses the challenge of efficiently utilizing resources in resource-scarce IoT-enabled sensors to enhance data collection, transmission, and storage. Redundant data transmission from sensors covering overlapping areas incurs additional communication and storage costs. Existing schemes, namely Asymmetric Extremum (AE) and Rapid Asymmetric Maximum (RAM), employ fixed and variable-sized windows during chunking. However, these schemes face issues while selecting the index value to decide the variable window size, which may remain zero or very low, resulting in poor deduplication. This article resolves this issue in the proposed Controlled Cut-point Identification Algorithm (CCIA), designed to restrict the variable-sized window to a certain threshold. The index value for deciding the threshold will always be larger than the half size of the fixed window. It helps to find more duplicates, but the upper limit offset is also applied to avoid the unnecessarily large-sized window, which may cause extensive computation costs. The extensive simulations are performed by deploying Windows Communication Foundation services in the Azure cloud. The results demonstrate the superiority of CCIA in various metrics, including chunk number, average chunk size, minimum and maximum chunk number, variable chunking size, and probability of failure for cut point identification. In comparison to its competitors, RAM and AE, CCIA exhibits better performance across key parameters. Specifically, CCIA outperforms in total number of chunks (6.81%, 14.17%), average number of chunks (4.39%, 18.45%), and minimum chunk size (153%, 190%). These results highlight the effectiveness of CCIA in optimizing data transmission and storage within IoT systems, showcasing its potential for improved resource utilization and reduced operational costs.},
}
@article {pmid39232070,
year = {2024},
author = {Jang, H and Koh, H},
title = {A unified web cloud computing platform MiMedSurv for microbiome causal mediation analysis with survival responses.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {20650},
pmid = {39232070},
issn = {2045-2322},
support = {2021R1C1C1013861//National Research Foundation of Korea/ ; },
mesh = {Humans ; *Microbiota ; *Cloud Computing ; *Internet ; Software ; Survival Analysis ; },
abstract = {In human microbiome studies, mediation analysis has recently been spotlighted as a practical and powerful analytic tool to survey the causal roles of the microbiome as a mediator to explain the observed relationships between a medical treatment/environmental exposure and a human disease. We also note that, in a clinical research, investigators often trace disease progression sequentially in time; as such, time-to-event (e.g., time-to-disease, time-to-cure) responses, known as survival responses, are prevalent as a surrogate variable for human health or disease. In this paper, we introduce a web cloud computing platform, named as microbiome mediation analysis with survival responses (MiMedSurv), for comprehensive microbiome mediation analysis with survival responses on user-friendly web environments. MiMedSurv is an extension of our prior web cloud computing platform, named as microbiome mediation analysis (MiMed), for survival responses. The two main features that are well-distinguished are as follows. First, MiMedSurv conducts some baseline exploratory non-mediational survival analysis, not involving microbiome, to survey the disparity in survival response between medical treatments/environmental exposures. Then, MiMedSurv identifies the mediating roles of the microbiome in various aspects: (i) as a microbial ecosystem using ecological indices (e.g., alpha and beta diversity indices) and (ii) as individual microbial taxa in various hierarchies (e.g., phyla, classes, orders, families, genera, species). To illustrate its use, we survey the mediating roles of the gut microbiome between antibiotic treatment and time-to-type 1 diabetes. MiMedSurv is freely available on our web server (http://mimedsurv.micloud.kr).},
}
@article {pmid39220162,
year = {2024},
author = {Blazhynska, M and Lagardère, L and Liu, C and Adjoua, O and Ren, P and Piquemal, JP},
title = {Water-glycan interactions drive the SARS-CoV-2 spike dynamics: insights into glycan-gate control and camouflage mechanisms.},
journal = {Chemical science},
volume = {},
number = {},
pages = {},
pmid = {39220162},
issn = {2041-6520},
abstract = {To develop therapeutic strategies against COVID-19, we introduce a high-resolution all-atom polarizable model capturing many-body effects of protein, glycan, solvent, and membrane components in SARS-CoV-2 spike protein open and closed states. Employing μs-long molecular dynamics simulations powered by high-performance cloud-computing and unsupervised density-driven adaptive sampling, we investigated the differences in bulk-solvent-glycan and protein-solvent-glycan interfaces between these states. We unraveled a sophisticated solvent-glycan polarization interaction network involving the N165/N343 glycan-gate patterns that provide structural support for the open state and identified key water molecules that could potentially be targeted to destabilize this configuration. In the closed state, the reduced solvent polarization diminishes the overall N165/N343 dipoles, yet internal interactions and a reorganized sugar coat stabilize this state. Despite variations, our glycan-solvent accessibility analysis reveals the glycan shield capability to conserve constant interactions with the solvent, effectively camouflaging the virus from immune detection in both states. The presented insights advance our comprehension of viral pathogenesis at an atomic level, offering potential to combat COVID-19.},
}
@article {pmid39217900,
year = {2024},
author = {Cian, F and Delgado Blasco, JM and Ivanescu, C},
title = {Improving rapid flood impact assessment: An enhanced multi-sensor approach including a new flood mapping method based on Sentinel-2 data.},
journal = {Journal of environmental management},
volume = {369},
number = {},
pages = {122326},
doi = {10.1016/j.jenvman.2024.122326},
pmid = {39217900},
issn = {1095-8630},
abstract = {Rapid flood impact assessment methods need complete and accurate flood maps to provide reliable information for disaster risk management, in particular for emergency response and recovery and reconstruction plans. With the aim of improving the rapid assessment of flood impacts, this work presents a new impact assessment method characterized by an enhanced satellite multi-sensor approach for flood mapping, which improves the characterization of the hazard. This includes a novel flood mapping method based on the new multi-temporal Modified Normalized Difference Water Index (MNDWI) that uses multi-temporal statistics computed on time-series of Sentinel-2 multi-spectral satellite images. The multi-temporal aspect of the MNDWI improves characterization of land cover over time and enhances the temporary flooded areas, which can be extracted through a thresholding technique, allowing the delineation of more precise and complete flood maps. The methodology, if implemented in cloud-based environments such as Google Earth Engine (GEE), is computationally light and robust, allowing the derivation of flood maps in matters of minutes, also for large areas. The flood mapping and impact assessment method has been applied to the seasonal flood occurred in South Sudan in 2020, using Sentinel-1, Sentinel-2 and PlanetScope satellite imagery. Flood impacts were assessed considering damages to buildings, roads, and cropland. The multi-sensor approach estimated an impact of 57.4 million USD (considering a middle-bound scenario), higher than what estimated by using Sentinel-1 data only, and Sentinel-2 data only (respectively 24% and 78% of the estimation resulting from the multi-sensor approach). This work highlights the effectiveness and importance of considering multi-source satellite data for flood mapping in a context of disaster risk management, to better inform disaster response, recovery and reconstruction plans.},
}
@article {pmid39205138,
year = {2024},
author = {Kontogiannis, S},
title = {Beehive Smart Detector Device for the Detection of Critical Conditions That Utilize Edge Device Computations and Deep Learning Inferences.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {16},
pages = {},
pmid = {39205138},
issn = {1424-8220},
abstract = {This paper presents a new edge detection process implemented in an embedded IoT device called Bee Smart Detection node to detect catastrophic apiary events. Such events include swarming, queen loss, and the detection of Colony Collapse Disorder (CCD) conditions. Two deep learning sub-processes are used for this purpose. The first uses a fuzzy multi-layered neural network of variable depths called fuzzy-stranded-NN to detect CCD conditions based on temperature and humidity measurements inside the beehive. The second utilizes a deep learning CNN model to detect swarming and queen loss cases based on sound recordings. The proposed processes have been implemented into autonomous Bee Smart Detection IoT devices that transmit their measurements and the detection results to the cloud over Wi-Fi. The BeeSD devices have been tested for easy-to-use functionality, autonomous operation, deep learning model inference accuracy, and inference execution speeds. The author presents the experimental results of the fuzzy-stranded-NN model for detecting critical conditions and deep learning CNN models for detecting swarming and queen loss. From the presented experimental results, the stranded-NN achieved accuracy results up to 95%, while the ResNet-50 model presented accuracy results up to 99% for detecting swarming or queen loss events. The ResNet-18 model is also the fastest inference speed replacement of the ResNet-50 model, achieving up to 93% accuracy results. Finally, cross-comparison of the deep learning models with machine learning ones shows that deep learning models can provide at least 3-5% better accuracy results.},
}
@article {pmid39205076,
year = {2024},
author = {Celik, AE and Rodriguez, I and Ayestaran, RG and Yavuz, SC},
title = {Decentralized System Synchronization among Collaborative Robots via 5G Technology.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {16},
pages = {},
doi = {10.3390/s24165382},
pmid = {39205076},
issn = {1424-8220},
support = {RYC-2020-030676-I//Ministerio de Ciencia, Innovación y Universidades/ ; },
abstract = {In this article, we propose a distributed synchronization solution to achieve decentralized coordination in a system of collaborative robots. This is done by leveraging cloud-based computing and 5G technology to exchange causal ordering messages between the robots, eliminating the need for centralized control entities or programmable logic controllers in the system. The proposed solution is described, mathematically formulated, implemented in software, and validated over realistic network conditions. Further, the performance of the decentralized solution via 5G technology is compared to that achieved with traditional coordinated/uncoordinated cabled control systems. The results indicate that the proposed decentralized solution leveraging cloud-based 5G wireless is scalable to systems of up to 10 collaborative robots with comparable efficiency to that from standard cabled systems. The proposed solution has direct application in the control of producer-consumer and automated assembly line robotic applications.},
}
@article {pmid39205014,
year = {2024},
author = {Dauda, A and Flauzac, O and Nolot, F},
title = {A Survey on IoT Application Architectures.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {16},
pages = {},
doi = {10.3390/s24165320},
pmid = {39205014},
issn = {1424-8220},
support = {1711/20//Petroleum Technology Development Fund (PTDF) Nigeria/ ; },
abstract = {The proliferation of the IoT has led to the development of diverse application architectures to optimize IoT systems' deployment, operation, and maintenance. This survey provides a comprehensive overview of the existing IoT application architectures, highlighting their key features, strengths, and limitations. The architectures are categorized based on their deployment models, such as cloud, edge, and fog computing approaches, each offering distinct advantages regarding scalability, latency, and resource efficiency. Cloud architectures leverage centralized data processing and storage capabilities to support large-scale IoT applications but often suffer from high latency and bandwidth constraints. Edge architectures mitigate these issues by bringing computation closer to the data source, enhancing real-time processing, and reducing network congestion. Fog architectures combine the strengths of both cloud and edge paradigms, offering a balanced solution for complex IoT environments. This survey also examines emerging trends and technologies in IoT application management, such as the solutions provided by the major IoT service providers like Intel, AWS, Microsoft Azure, and GCP. Through this study, the survey identifies latency, privacy, and deployment difficulties as key areas for future research. It highlights the need to advance IoT Edge architectures to reduce network traffic, improve data privacy, and enhance interoperability by developing multi-application and multi-protocol edge gateways for efficient IoT application management.},
}
@article {pmid39205003,
year = {2024},
author = {Rigas, S and Tzouveli, P and Kollias, S},
title = {An End-to-End Deep Learning Framework for Fault Detection in Marine Machinery.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {16},
pages = {},
doi = {10.3390/s24165310},
pmid = {39205003},
issn = {1424-8220},
support = {ATHINAIKI RIVIERA - ATTP4-0325990//Greece and European Union: Attica 2014-2020/ ; },
abstract = {The Industrial Internet of Things has enabled the integration and analysis of vast volumes of data across various industries, with the maritime sector being no exception. Advances in cloud computing and deep learning (DL) are continuously reshaping the industry, particularly in optimizing maritime operations such as Predictive Maintenance (PdM). In this study, we propose a novel DL-based framework focusing on the fault detection task of PdM in marine operations, leveraging time-series data from sensors installed on shipboard machinery. The framework is designed as a scalable and cost-efficient software solution, encompassing all stages from data collection and pre-processing at the edge to the deployment and lifecycle management of DL models. The proposed DL architecture utilizes Graph Attention Networks (GATs) to extract spatio-temporal information from the time-series data and provides explainable predictions through a feature-wise scoring mechanism. Additionally, a custom evaluation metric with real-world applicability is employed, prioritizing both prediction accuracy and the timeliness of fault identification. To demonstrate the effectiveness of our framework, we conduct experiments on three types of open-source datasets relevant to PdM: electrical data, bearing datasets, and data from water circulation experiments.},
}
@article {pmid39204979,
year = {2024},
author = {Adame, T and Amri, E and Antonopoulos, G and Azaiez, S and Berne, A and Camargo, JS and Kakoulidis, H and Kleisarchaki, S and Llamedo, A and Prasinos, M and Psara, K and Shumaiev, K},
title = {Presenting the COGNIFOG Framework: Architecture, Building Blocks and Road toward Cognitive Connectivity.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {16},
pages = {},
doi = {10.3390/s24165283},
pmid = {39204979},
issn = {1424-8220},
support = {101092968//European Union/ ; },
abstract = {In the era of ubiquitous computing, the challenges imposed by the increasing demand for real-time data processing, security, and energy efficiency call for innovative solutions. The emergence of fog computing has provided a promising paradigm to address these challenges by bringing computational resources closer to data sources. Despite its advantages, the fog computing characteristics pose challenges in heterogeneous environments in terms of resource allocation and management, provisioning, security, and connectivity, among others. This paper introduces COGNIFOG, a novel cognitive fog framework currently under development, which was designed to leverage intelligent, decentralized decision-making processes, machine learning algorithms, and distributed computing principles to enable the autonomous operation, adaptability, and scalability across the IoT-edge-cloud continuum. By integrating cognitive capabilities, COGNIFOG is expected to increase the efficiency and reliability of next-generation computing environments, potentially providing a seamless bridge between the physical and digital worlds. Preliminary experimental results with a limited set of connectivity-related COGNIFOG building blocks show promising improvements in network resource utilization in a real-world-based IoT scenario. Overall, this work paves the way for further developments on the framework, which are aimed at making it more intelligent, resilient, and aligned with the ever-evolving demands of next-generation computing environments.},
}
@article {pmid39204967,
year = {2024},
author = {Krishnamurthy, B and Shiva, SG},
title = {Integral-Valued Pythagorean Fuzzy-Set-Based Dyna Q+ Framework for Task Scheduling in Cloud Computing.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {16},
pages = {},
doi = {10.3390/s24165272},
pmid = {39204967},
issn = {1424-8220},
abstract = {Task scheduling is a critical challenge in cloud computing systems, greatly impacting their performance. Task scheduling is a nondeterministic polynomial time hard (NP-Hard) problem that complicates the search for nearly optimal solutions. Five major uncertainty parameters, i.e., security, traffic, workload, availability, and price, influence task scheduling decisions. The primary rationale for selecting these uncertainty parameters lies in the challenge of accurately measuring their values, as empirical estimations often diverge from the actual values. The integral-valued Pythagorean fuzzy set (IVPFS) is a promising mathematical framework to deal with parametric uncertainties. The Dyna Q+ algorithm is the updated form of the Dyna Q agent designed specifically for dynamic computing environments by providing bonus rewards to non-exploited states. In this paper, the Dyna Q+ agent is enriched with the IVPFS mathematical framework to make intelligent task scheduling decisions. The performance of the proposed IVPFS Dyna Q+ task scheduler is tested using the CloudSim 3.3 simulator. The execution time is reduced by 90%, the makespan time is also reduced by 90%, the operation cost is below 50%, and the resource utilization rate is improved by 95%, all of these parameters meeting the desired standards or expectations. The results are also further validated using an expected value analysis methodology that confirms the good performance of the task scheduler. A better balance between exploration and exploitation through rigorous action-based learning is achieved by the Dyna Q+ agent.},
}
@article {pmid39190508,
year = {2024},
author = {Wang, J and Lu, X and Wang, M and Hou, F and He, Y},
title = {Learning Implicit Fields for Point Cloud Filtering.},
journal = {IEEE transactions on visualization and computer graphics},
volume = {PP},
number = {},
pages = {},
doi = {10.1109/TVCG.2024.3450699},
pmid = {39190508},
issn = {1941-0506},
abstract = {Since point clouds acquired by scanners inevitably contain noise, recovering a clean version from a noisy point cloud is essential for further 3D geometry processing applications. Several data-driven approaches have been recently introduced to overcome the drawbacks of traditional filtering algorithms, such as less robust preservation of sharp features and tedious tuning for multiple parameters. Most of these methods achieve filtering by directly regressing the position/displacement of each point, which may blur detailed features and is prone to uneven distribution. In this paper, we propose a novel data-driven method that explores the implicit fields. Our assumption is that the given noisy points implicitly define a surface, and we attempt to obtain a point's movement direction and distance separately based on the predicted signed distance fields (SDFs). Taking a noisy point cloud as input, we first obtain a consistent alignment by incorporating the global points into local patches. We then feed them into an encoder-decoder structure and predict a 7D vector consisting of SDFs. Subsequently, the distance can be obtained directly from the first element in the vector, and the movement direction can be obtained by computing the gradient descent from the last six elements (i.e., six surrounding SDFs). We finally obtain the filtered results by moving each point with its predicted distance along its movement direction. Our method can produce feature-preserving results without requiring explicit normals. Experiments demonstrate that our method visually outperforms state-of-the-art methods and generally produces better quantitative results than position-based methods (both learning and non-learning).},
}
@article {pmid39189264,
year = {2024},
author = {Radu, MC and Armean, MS and Pop-Tudose, M and Medar, C and Manolescu, LSC},
title = {Exploring Factors Influencing Pregnant Women's Perceptions and Attitudes Towards Midwifery Care in Romania: Implications for Maternal Health Education Strategies.},
journal = {Nursing reports (Pavia, Italy)},
volume = {14},
number = {3},
pages = {1807-1818},
doi = {10.3390/nursrep14030134},
pmid = {39189264},
issn = {2039-4403},
abstract = {BACKGROUND: Midwives are strong advocates for vaginal births. However, their visibility and accessibility are poorly perceived by women in Romania. Consequently, the women's options are limited to a single direction when pregnancy occurs, involving the family doctor, the obstetrician, and often an interventional technical approach at the time of birth. The aim of this research is to identify specific variables that affect the perceptions and attitudes of pregnant women towards the care provided by midwives. This knowledge could contribute to the development of more effective education and information strategies within maternal health services.
METHODS: A cross-sectional observational analytical survey was conducted in Romania among pregnant women from the general population. Data were collected through a self-administered questionnaire, with informed consent obtained from each participating pregnant woman. The questionnaire was administered online using the cloud-based Google Forms platform and was available on the internet for seven months, from January to July 2023. The questionnaire was distributed through various media channels, both individually and in communication groups, in the form of a link. All questions were mandatory, and the questionnaire could only be submitted after answering all questions.
RESULTS: A total of 1301 individual responses were collected. The analysis of the socio-demographic and obstetrical profile of the pregnant women revealed that approximately half, 689 (52.95%), of the participants were aged between 18-29 years, and 1060 (81.47%) of the participants were married. Among our group of 1301 pregnant women, 973 (74.78%) had higher education, and 987 (75.86%) had a regular job. A majority of the survey participants, 936 (71.94%), lived in an urban geographic area, while 476 (36.58%) had attended childbirth education courses, and 791 (60.79%) were in the third trimester of pregnancy. A total of 298 (22.9%) respondents did not want to give birth in a hospital, and one-third, 347 (26.67%), did not place significant importance on control over the childbirth process.
CONCLUSIONS: The main factors influencing women's decisions regarding perinatal care and the importance of midwives as a component of the maternal-infant care team are modifiable, and thorough educational and psychological preparation would reduce the increasing predominance of preference for cesarean section, thereby promoting healthier and more woman- and child-centered perinatal care.},
}
@article {pmid39187555,
year = {2024},
author = {Farooq, O and Shahid, M and Arshad, S and Altaf, A and Iqbal, F and Vera, YAM and Flores, MAL and Ashraf, I},
title = {An enhanced approach for predicting air pollution using quantum support vector machine.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {19521},
pmid = {39187555},
issn = {2045-2322},
abstract = {The essence of quantum machine learning is to optimize problem-solving by executing machine learning algorithms on quantum computers and exploiting potent laws such as superposition and entanglement. Support vector machine (SVM) is widely recognized as one of the most effective classification machine learning techniques currently available. Since, in conventional systems, the SVM kernel technique tends to sluggish down and even fail as datasets become increasingly complex or jumbled. To compare the execution time and accuracy of conventional SVM classification to that of quantum SVM classification, the appropriate quantum features for mapping need to be selected. As the dataset grows complex, the importance of selecting an appropriate feature map that outperforms or performs as well as the classification grows. This paper utilizes conventional SVM to select an optimal feature map and benchmark dataset for predicting air quality. Experimental evidence demonstrates that the precision of quantum SVM surpasses that of classical SVM for air quality assessment. Using quantum labs from IBM's quantum computer cloud, conventional and quantum computing have been compared. When applied to the same dataset, the conventional SVM achieved an accuracy of 91% and 87% respectively, whereas the quantum SVM demonstrated an accuracy of 97% and 94% respectively for air quality prediction. The study introduces the use of quantum Support Vector Machines (SVM) for predicting air quality. It emphasizes the novel method of choosing the best quantum feature maps. Through the utilization of quantum-enhanced feature mapping, our objective is to exceed the constraints of classical SVM and achieve unparalleled levels of precision and effectiveness. We conduct precise experiments utilizing IBM's state-of-the-art quantum computer cloud to compare the performance of conventional and quantum SVM algorithms on a shared dataset.},
}
@article {pmid39184128,
year = {2024},
author = {Nagi, SC and Ashraf, F and Miles, A and Donnelly, MJ},
title = {AnoPrimer: Primer Design in malaria vectors informed by range-wide genomic variation.},
journal = {Wellcome open research},
volume = {9},
number = {},
pages = {255},
pmid = {39184128},
issn = {2398-502X},
abstract = {The major malaria mosquitoes, Anopheles gambiae s.l and Anopheles funestus, are some of the most studied organisms in medical research and also some of the most genetically diverse. When designing polymerase chain reaction (PCR) or hybridisation-based molecular assays, reliable primer and probe design is crucial. However, single nucleotide polymorphisms (SNPs) in primer binding sites can prevent primer binding, leading to null alleles, or bind suboptimally, leading to preferential amplification of specific alleles. Given the extreme genetic diversity of Anopheles mosquitoes, researchers need to consider this genetic variation when designing primers and probes to avoid amplification problems. In this note, we present a Python package, AnoPrimer, which exploits the Ag1000G and Af1000 datasets and allows users to rapidly design primers in An. gambiae or An. funestus, whilst summarising genetic variation in the primer binding sites and visualising the position of primer pairs. AnoPrimer allows the design of both genomic DNA and cDNA primers and hybridisation probes. By coupling this Python package with Google Colaboratory, AnoPrimer is an open and accessible platform for primer and probe design, hosted in the cloud for free. AnoPrimer is available here https://github.com/sanjaynagi/AnoPrimer and we hope it will be a useful resource for the community to design probe and primer sets that can be reliably deployed across the An. gambiae and funestus species ranges.},
}
@article {pmid39183823,
year = {2024},
author = {Chen, M and Qi, P and Chu, Y and Wang, B and Wang, F and Cao, J},
title = {Genetic algorithm with skew mutation for heterogeneous resource-aware task offloading in edge-cloud computing.},
journal = {Heliyon},
volume = {10},
number = {12},
pages = {e32399},
doi = {10.1016/j.heliyon.2024.e32399},
pmid = {39183823},
issn = {2405-8440},
abstract = {Recent years, edge-cloud computing has attracted more and more attention due to benefits from the combination of edge and cloud computing. Task scheduling is still one of the major challenges for improving service quality and resource efficiency of edge-clouds. Though several researches have studied on the scheduling problem, there remains issues needed to be addressed for their applications, e.g., ignoring resource heterogeneity, focusing on only one kind of requests. Therefore, in this paper, we aim at providing a heterogeneity aware task scheduling algorithm to improve task completion rate and resource utilization for edge-clouds with deadline constraints. Due to NP-hardness of the scheduling problem, we exploit genetic algorithm (GA), one of the most representative and widely used meta-heuristic algorithms, to solve the problem considering task completion rate and resource utilization as major and minor optimization objectives, respectively. In our GA-based scheduling algorithm, a gene indicates which resource that its corresponding task is processed by. To improve the performance of GA, we propose to exploit a skew mutation operator where genes are associated to resource heterogeneity during the population evolution. We conduct extensive experiments to evaluate the performance of our algorithm, and results verify the performance superiority of our algorithm in task completion rate, compared with other thirteen classical and up-to-date scheduling algorithms.},
}
@article {pmid39179595,
year = {2024},
author = {Huang, Y and Lu, Y and Li, W and Xu, X and Jiang, X and Ma, R and Chen, L and Ruan, N and Wu, Q and Xu, J},
title = {Giant Kerr nonlinearity of terahertz waves mediated by stimulated phonon polaritons in a microcavity chip.},
journal = {Light, science & applications},
volume = {13},
number = {1},
pages = {212},
pmid = {39179595},
issn = {2047-7538},
support = {11974192//National Natural Science Foundation of China (National Science Foundation of China)/ ; 62205158//National Natural Science Foundation of China (National Science Foundation of China)/ ; },
abstract = {Optical Kerr effect, in which input light intensity linearly alters the refractive index, has enabled the generation of optical solitons, supercontinuum spectra, and frequency combs, playing vital roles in the on-chip devices, fiber communications, and quantum manipulations. Especially, terahertz Kerr effect, featuring fascinating prospects in future high-rate computing, artificial intelligence, and cloud-based technologies, encounters a great challenge due to the rather low power density and feeble Kerr response. Here, we demonstrate a giant terahertz frequency Kerr nonlinearity mediated by stimulated phonon polaritons. Under the influences of the giant Kerr nonlinearity, the power-dependent refractive index change would result in a frequency shift in the microcavity, which was experimentally demonstrated via the measurement of the resonant mode of a chip-scale lithium niobate Fabry-Pérot microcavity. Attributed to the existence of stimulated phonon polaritons, the nonlinear coefficient extracted from the frequency shifts is orders of magnitude larger than that of visible and infrared light, which is also theoretically demonstrated by nonlinear Huang equations. This work opens an avenue for many rich and fruitful terahertz Kerr effect based physical, chemical, and biological systems that have terahertz fingerprints.},
}
@article {pmid39176842,
year = {2024},
author = {Mammas, CS and Mamma, AS},
title = {Remote Monitoring, AI, Machine Learning and Mobile Ultrasound Integration upon 5G Internet in the Prehospital Care to Support the Golden Hour Principle and Optimize Outcomes in Severe Trauma and Emergency Surgery.},
journal = {Studies in health technology and informatics},
volume = {316},
number = {},
pages = {1807-1811},
doi = {10.3233/SHTI240782},
pmid = {39176842},
issn = {1879-8365},
mesh = {Humans ; *Machine Learning ; *Ultrasonography ; *Emergency Medical Services ; *Wounds and Injuries/diagnostic imaging/therapy ; Telemedicine ; Artificial Intelligence ; Internet ; Feasibility Studies ; Reproducibility of Results ; },
abstract = {AIM: Feasibility and reliability evaluation of 5G internet networks (5G IN) upon Artificial Intelligence (AI)/Machine Learning (ML), of telemonitoring and mobile ultrasound (m u/s) in an ambulance car (AC)- integrated in the pre-hospital setting (PS)- to support the Golden Hour Principle (GHP) and optimize outcomes in severe trauma (TRS).
MATERIAL AND METHODS: (PS) organization and care upon (5G IN) high bandwidths (10 GB/s) mobile tele-communication (mTC) experimentation by using the experimental Cobot PROMETHEUS III, pn:100016 by simulation upon six severe trauma clinical cases by ten (N1=10) experts: Four professional rescuers (n1=4), three trauma surgeons (n2=3), a radiologist (n3=1) and two information technology specialists (n4=2) to evaluate feasibility, reliability and clinical usability for instant risk, prognosis and triage computation, decision support and treatment planning by (AI)/(ML) computations in (PS) of (TRS) as well as by performing (PS) (m u/s).
RESULTS: A. Trauma severity scales instant computations by the Cobot PROMETHEUS III, pn 100016)) based on AI and ML complex algorithms and Cloud Computing, telemonitoring and r showed very high feasibility and reliability upon (5GIN) under specific, technological, training and ergonomic prerequisites B. Measured be-directional (m u/s) images data sharing between (AC) and (ED/TC) showed very high feasibility and reliability upon (5G IN) under specific, technological and ergonomic conditions in (TRS).
CONCLUSION: Integration of (PS) tele-monitoring with (AI)/(ML) and (PS) (m u/s) upon (5GIN) via the Cobot PROMETHEUS III, (pn 100016) in severe (TRS/ES), seems feasible and under specific prerequisites reliable to support the (GHP) and optimize outcomes in adult and pediatric (TRS/ES).},
}
@article {pmid39163538,
year = {2024},
author = {Komarasamy, D and Ramaganthan, SM and Kandaswamy, DM and Mony, G},
title = {Deep learning and optimization enabled multi-objective for task scheduling in cloud computing.},
journal = {Network (Bristol, England)},
volume = {},
number = {},
pages = {1-30},
doi = {10.1080/0954898X.2024.2391395},
pmid = {39163538},
issn = {1361-6536},
abstract = {In cloud computing (CC), task scheduling allocates the task to best suitable resource for execution. This article proposes a model for task scheduling utilizing the multi-objective optimization and deep learning (DL) model. Initially, the multi-objective task scheduling is carried out by the incoming user utilizing the proposed hybrid fractional flamingo beetle optimization (FFBO) which is formed by integrating dung beetle optimization (DBO), flamingo search algorithm (FSA) and fractional calculus (FC). Here, the fitness function depends on reliability, cost, predicted energy, and makespan, the predicted energy is forecasted by a deep residual network (DRN). Thereafter, task scheduling is accomplished based on DL using the proposed deep feedforward neural network fused long short-term memory (DFNN-LSTM), which is the combination of DFNN and LSTM. Moreover, when scheduling the workflow, the task parameters and the virtual machine's (VM) live parameters are taken into consideration. Task parameters are earliest finish time (EFT), earliest start time (EST), task length, task priority, and actual task running time, whereas VM parameters include memory utilization, bandwidth utilization, capacity, and central processing unit (CPU). The proposed model DFNN-LSTM+FFBO has achieved superior makespan, energy, and resource utilization of 0.188, 0.950J, and 0.238, respectively.},
}
@article {pmid39160778,
year = {2024},
author = {Jat, AS and Grønli, TM and Ghinea, G and Assres, G},
title = {Evolving Software Architecture Design in Telemedicine: A PRISMA-based Systematic Review.},
journal = {Healthcare informatics research},
volume = {30},
number = {3},
pages = {184-193},
doi = {10.4258/hir.2024.30.3.184},
pmid = {39160778},
issn = {2093-3681},
support = {//Kristiania University College/ ; },
abstract = {OBJECTIVES: This article presents a systematic review of recent advancements in telemedicine architectures for continuous monitoring, providing a comprehensive overview of the evolving software engineering practices underpinning these systems. The review aims to illuminate the critical role of telemedicine in delivering healthcare services, especially during global health crises, and to emphasize the importance of effectiveness, security, interoperability, and scalability in these systems.
METHODS: A systematic review methodology was employed, adhering to the Preferred Reporting Items for Systematic Reviews and Meta-Analyses framework. As the primary research method, the PubMed, IEEE Xplore, and Scopus databases were searched to identify articles relevant to telemedicine architectures for continuous monitoring. Seventeen articles were selected for analysis, and a methodical approach was employed to investigate and synthesize the findings.
RESULTS: The review identified a notable trend towards the integration of emerging technologies into telemedicine architectures. Key areas of focus include interoperability, security, and scalability. Innovations such as cognitive radio technology, behavior-based control architectures, Health Level Seven International (HL7) Fast Healthcare Interoperability Resources (FHIR) standards, cloud computing, decentralized systems, and blockchain technology are addressing challenges in remote healthcare delivery and continuous monitoring.
CONCLUSIONS: This review highlights major advancements in telemedicine architectures, emphasizing the integration of advanced technologies to improve interoperability, security, and scalability. The findings underscore the successful application of cognitive radio technology, behavior-based control, HL7 FHIR standards, cloud computing, decentralized systems, and blockchain in advancing remote healthcare delivery.},
}
@article {pmid39156807,
year = {2024},
author = {Cosic, K and Kopilas, V and Jovanovic, T},
title = {War, emotions, mental health, and artificial intelligence.},
journal = {Frontiers in psychology},
volume = {15},
number = {},
pages = {1394045},
pmid = {39156807},
issn = {1664-1078},
abstract = {During the war time dysregulation of negative emotions such as fear, anger, hatred, frustration, sadness, humiliation, and hopelessness can overrule normal societal values, culture, and endanger global peace and security, and mental health in affected societies. Therefore, it is understandable that the range and power of negative emotions may play important roles in consideration of human behavior in any armed conflict. The estimation and assessment of dominant negative emotions during war time are crucial but are challenged by the complexity of emotions' neuro-psycho-physiology. Currently available natural language processing (NLP) tools have comprehensive computational methods to analyze and understand the emotional content of related textual data in war-inflicted societies. Innovative AI-driven technologies incorporating machine learning, neuro-linguistic programming, cloud infrastructure, and novel digital therapeutic tools and applications present an immense potential to enhance mental health care worldwide. This advancement could make mental health services more cost-effective and readily accessible. Due to the inadequate number of psychiatrists and limited psychiatric resources in coping with mental health consequences of war and traumas, new digital therapeutic wearable devices supported by AI tools and means might be promising approach in psychiatry of future. Transformation of negative dominant emotional maps might be undertaken by the simultaneous combination of online cognitive behavioral therapy (CBT) on individual level, as well as usage of emotionally based strategic communications (EBSC) on a public level. The proposed positive emotional transformation by means of CBT and EBSC may provide important leverage in efforts to protect mental health of civil population in war-inflicted societies. AI-based tools that can be applied in design of EBSC stimuli, like Open AI Chat GPT or Google Gemini may have great potential to significantly enhance emotionally based strategic communications by more comprehensive understanding of semantic and linguistic analysis of available text datasets of war-traumatized society. Human in the loop enhanced by Chat GPT and Gemini can aid in design and development of emotionally annotated messages that resonate among targeted population, amplifying the impact of strategic communications in shaping human dominant emotional maps into a more positive by CBT and EBCS.},
}
@article {pmid39151500,
year = {2024},
author = {Ouyang, T and Yang, J and Gu, Z and Zhang, L and Wang, D and Wang, Y and Yang, Y},
title = {Research on privacy protection in the context of healthcare data based on knowledge map.},
journal = {Medicine},
volume = {103},
number = {33},
pages = {e39370},
doi = {10.1097/MD.0000000000039370},
pmid = {39151500},
issn = {1536-5964},
support = {Grant No.2023Ah040102//Major Scientific Research Project of Anhui Provincial Department of Education/ ; Grant No.2022Ah010038 and No.2023sdxx027//Anhui Province quality projects/ ; Grant no.2021rwzd12//Key humanities projects of Anhui University of Traditional Chinese Medicine/ ; Grant No.JNFX2023020//Middle-aged Young Teacher Training Action Project of Anhui Provincial Department of Education/ ; Grant No.2023jyxm0370//General Project of Teaching Research in Anhui Province/ ; },
mesh = {Humans ; *Big Data ; *Computer Security ; *Privacy ; *Confidentiality ; Bibliometrics ; },
abstract = {With the rapid development of emerging information technologies such as artificial intelligence, cloud computing, and the Internet of Things, the world has entered the era of big data. In the face of growing medical big data, research on the privacy protection of personal information has attracted more and more attention, but few studies have analyzed and forecasted the research hotspots and future development trends on the privacy protection. Presently, to systematically and comprehensively summarize the relevant privacy protection literature in the context of big healthcare data, a bibliometric analysis was conducted to clarify the spatial and temporal distribution and research hotspots of privacy protection using the information visualization software CiteSpace. The literature papers related to privacy protection in the Web of Science were collected from 2012 to 2023. Through analysis of the time, author and countries distribution of relevant publications, we found that after 2013, research on the privacy protection has received increasing attention and the core institution of privacy protection research is the university, but the countries show weak cooperation. Additionally, keywords like privacy, big data, internet, challenge, care, and information have high centralities and frequency, indicating the research hotspots and research trends in the field of the privacy protection. All the findings will provide a comprehensive privacy protection research knowledge structure for scholars in the field of privacy protection research under the background of health big data, which can help them quickly grasp the research hotspots and choose future research projects.},
}
@article {pmid39150579,
year = {2024},
author = {Gehlhaar, DK and Mermelstein, DJ},
title = {FitScore: a fast machine learning-based score for 3D virtual screening enrichment.},
journal = {Journal of computer-aided molecular design},
volume = {38},
number = {1},
pages = {29},
pmid = {39150579},
issn = {1573-4951},
mesh = {*Machine Learning ; Ligands ; *Molecular Docking Simulation ; Binding Sites ; Humans ; Protein Binding ; Proteins/chemistry/metabolism ; Software ; Drug Evaluation, Preclinical/methods ; Drug Discovery/methods ; },
abstract = {Enhancing virtual screening enrichment has become an urgent problem in computational chemistry, driven by increasingly large databases of commercially available compounds, without a commensurate drop in in vitro screening costs. Docking these large databases is possible with cloud-scale computing. However, rapid docking necessitates compromises in scoring, often leading to poor enrichment and an abundance of false positives in docking results. This work describes a new scoring function composed of two parts - a knowledge-based component that predicts the probability of a particular atom type being in a particular receptor environment, and a tunable weight matrix that converts the probability predictions into a dimensionless score suitable for virtual screening enrichment. This score, the FitScore, represents the compatibility between the ligand and the binding site and is capable of a high degree of enrichment across standardized docking test sets.},
}
@article {pmid39149018,
year = {2024},
author = {Kim, C and Lee, J},
title = {Discovering patterns and trends in customer service technologies patents using large language model.},
journal = {Heliyon},
volume = {10},
number = {14},
pages = {e34701},
doi = {10.1016/j.heliyon.2024.e34701},
pmid = {39149018},
issn = {2405-8440},
abstract = {The definition of service has evolved from a focus on material value in manufacturing before the 2000s to a customer-centric value based on the significant growth of the service industry. Digital transformation has become essential for companies in the service industry due to the incorporation of digital technology through the Fourth Industrial Revolution and COVID-19. This study utilised Bidirectional Encoder Representations from Transformer (BERT) to analyse 3029 international patents related to the customer service industry and digital transformation registered between 2000 and 2022. Through topic modelling, this study identified 10 major topics in the customer service industry and analysed their yearly trends. Our findings show that as of 2022, the trend with the highest frequency is user-centric network service design, while cloud computing has experienced the steepest increase in the last five years. User-centric network services have been steadily developing since the inception of the Internet. Cloud computing is one of the key technologies being developed intensively in 2023 for the digital transformation of customer service. This study identifies time series trends of customer service industry patents and suggests the effectiveness of using BERTopic to predict future trends in technology.},
}
@article {pmid39147188,
year = {2024},
author = {Miguel, S and Ruiz-Benito, P and Rebollo, P and Viana-Soto, A and Mihai, MC and García-Martín, A and Tanase, M},
title = {Forest disturbance regimes and trends in continental Spain (1985- 2023) using dense Landsat time series.},
journal = {Environmental research},
volume = {},
number = {},
pages = {119802},
doi = {10.1016/j.envres.2024.119802},
pmid = {39147188},
issn = {1096-0953},
abstract = {Forest disturbance regimes across biomes are being altered by interactive effects of global change. Establishing baselines for assessing change requires detailed quantitative data on past disturbance events, but such data are scarce and difficult to obtain over large spatial and temporal scales. The integration of remote sensing with dense time series analysis and cloud computing platforms is enhancing the ability to monitor historical disturbances, and especially non-stand replacing events along climatic gradients. Since the integration of such tools is still scarce in Mediterranean regions, here, we combine dense Landsat time series and the Continuous Change Detection and Classification - Spectral Mixture Analysis (CCDC-SMA) method to monitor forest disturbance in continental Spain from 1985 to 2023. We adapted the CCDC-SMA method for improved disturbance detection creating new spectral libraries representative of the study region, and quantified the year, month, severity, return interval, and type of disturbance (stand replacing, non-stand replacing) at a 30 m resolution. In addition, we characterised forest disturbance regimes and trends (patch size and severity, and frequency of events) of events larger than 0.5 ha at the national scale by biome (Mediterranean and temperate) and forest type (broadleaf, needleleaf and mixed). We quantified more than 2.9 million patches of disturbed forest, covering 4.6 Mha over the region and period studied. Forest disturbances were on average larger but less severe in the Mediterranean than in the temperate biome, and significantly larger and more severe in needleleaf than in mixed and broadleaf forests. Since the late 1980s, forest disturbances have decreased in size and severity while increasing in frequency across all biomes and forest types. These results have important implications as they confirm that disturbance regimes in continental Spain are changing and should therefore be considered in forest strategic planning for policy development and implementation.},
}
@article {pmid39146286,
year = {2024},
author = {Zohora, MF and Farhin, F and Kaiser, MS},
title = {An enhanced round robin using dynamic time quantum for real-time asymmetric burst length processes in cloud computing environment.},
journal = {PloS one},
volume = {19},
number = {8},
pages = {e0304517},
doi = {10.1371/journal.pone.0304517},
pmid = {39146286},
issn = {1932-6203},
mesh = {*Cloud Computing ; *Algorithms ; Time Factors ; },
abstract = {Cloud computing is a popular, flexible, scalable, and cost-effective technology in the modern world that provides on-demand services dynamically. The dynamic execution of user requests and resource-sharing facilities require proper task scheduling among the available virtual machines, which is a significant issue and plays a crucial role in developing an optimal cloud computing environment. Round Robin is a prevalent scheduling algorithm for fair distribution of resources with a balanced contribution in minimized response time and turnaround time. This paper introduced a new enhanced round-robin approach for task scheduling in cloud computing systems. The proposed algorithm generates and keeps updating a dynamic quantum time for process execution, considering the available number of process in the system and their burst length. Since our method dynamically runs processes, it is appropriate for a real-time environment like cloud computing. The notable part of this approach is the capability of scheduling tasks with asymmetric distribution of burst time, avoiding the convoy effect. The experimental result indicates that the proposed algorithm has outperformed the existing improved round-robin task scheduling approaches in terms of minimized average waiting time, average turnaround time, and number of context switches. Comparing the method against five other enhanced round robin approaches, it reduced average waiting times by 15.77% and context switching by 20.68% on average. After executing the experiment and comparative study, it can be concluded that the proposed enhanced round-robin scheduling algorithm is optimal, acceptable, and relatively better suited for cloud computing environments.},
}
@article {pmid39140427,
year = {2024},
author = {Cao, Y and Zhang, Z and Qin, BW and Sang, W and Li, H and Wang, T and Tan, F and Gan, Y and Zhang, X and Liu, T and Xiang, D and Lin, W and Liu, Q},
title = {Physical Reservoir Computing Using van der Waals Ferroelectrics for Acoustic Keyword Spotting.},
journal = {ACS nano},
volume = {},
number = {},
pages = {},
doi = {10.1021/acsnano.4c06144},
pmid = {39140427},
issn = {1936-086X},
abstract = {Acoustic keyword spotting (KWS) plays a pivotal role in the voice-activated systems of artificial intelligence (AI), allowing for hands-free interactions between humans and smart devices through information retrieval of the voice commands. The cloud computing technology integrated with the artificial neural networks has been employed to execute the KWS tasks, which however suffers from propagation delay and the risk of privacy breach. Here, we report a single-node reservoir computing (RC) system based on the CuInP2S6 (CIPS)/graphene heterostructure planar device for implementing the KWS task with low computation cost. Through deliberately tuning the Schottky barrier height at the ferroelectric CIPS interfaces for the thermionic injection and transport of the electrons, the typical nonlinear current response and fading memory characteristics are achieved in the device. Additionally, the device exhibits diverse synaptic plasticity with an excellent separation capability of the temporal information. We construct a RC system through employing the ferroelectric device as the physical node to spot the acoustic keywords, i.e., the natural numbers from 1 to 9 based on simulation, in which the system demonstrates outstanding performance with high accuracy rate (>94.6%) and recall rate (>92.0%). Our work promises physical RC in single-node configuration as a prospective computing platform to process the acoustic keywords, promoting its applications in the artificial auditory system at the edge.},
}
@article {pmid39138951,
year = {2024},
author = {Guide, A and Garbett, S and Feng, X and Mapes, BM and Cook, J and Sulieman, L and Cronin, RM and Chen, Q},
title = {Balancing efficacy and computational burden: weighted mean, multiple imputation, and inverse probability weighting methods for item non-response in reliable scales.},
journal = {Journal of the American Medical Informatics Association : JAMIA},
volume = {},
number = {},
pages = {},
doi = {10.1093/jamia/ocae217},
pmid = {39138951},
issn = {1527-974X},
support = {3OT2OD035404/NH/NIH HHS/United States ; },
abstract = {IMPORTANCE: Scales often arise from multi-item questionnaires, yet commonly face item non-response. Traditional solutions use weighted mean (WMean) from available responses, but potentially overlook missing data intricacies. Advanced methods like multiple imputation (MI) address broader missing data, but demand increased computational resources. Researchers frequently use survey data in the All of Us Research Program (All of Us), and it is imperative to determine if the increased computational burden of employing MI to handle non-response is justifiable.
OBJECTIVES: Using the 5-item Physical Activity Neighborhood Environment Scale (PANES) in All of Us, this study assessed the tradeoff between efficacy and computational demands of WMean, MI, and inverse probability weighting (IPW) when dealing with item non-response.
MATERIALS AND METHODS: Synthetic missingness, allowing 1 or more item non-response, was introduced into PANES across 3 missing mechanisms and various missing percentages (10%-50%). Each scenario compared WMean of complete questions, MI, and IPW on bias, variability, coverage probability, and computation time.
RESULTS: All methods showed minimal biases (all <5.5%) for good internal consistency, with WMean suffered most with poor consistency. IPW showed considerable variability with increasing missing percentage. MI required significantly more computational resources, taking >8000 and >100 times longer than WMean and IPW in full data analysis, respectively.
DISCUSSION AND CONCLUSION: The marginal performance advantages of MI for item non-response in highly reliable scales do not warrant its escalated cloud computational burden in All of Us, particularly when coupled with computationally demanding post-imputation analyses. Researchers using survey scales with low missingness could utilize WMean to reduce computing burden.},
}
@article {pmid39138215,
year = {2024},
author = {Bontempi, D and Nuernberg, L and Pai, S and Krishnaswamy, D and Thiriveedhi, V and Hosny, A and Mak, RH and Farahani, K and Kikinis, R and Fedorov, A and Aerts, HJWL},
title = {End-to-end reproducible AI pipelines in radiology using the cloud.},
journal = {Nature communications},
volume = {15},
number = {1},
pages = {6931},
pmid = {39138215},
issn = {2041-1723},
support = {866504//EC | EU Framework Programme for Research and Innovation H2020 | H2020 Priority Excellent Science | H2020 European Research Council (H2020 Excellent Science - European Research Council)/ ; HHSN261201500003l//Foundation for the National Institutes of Health (Foundation for the National Institutes of Health, Inc.)/ ; },
mesh = {*Cloud Computing ; Humans ; *Artificial Intelligence ; Reproducibility of Results ; Deep Learning ; Radiology/methods/standards ; Algorithms ; Neoplasms/diagnostic imaging ; Image Processing, Computer-Assisted/methods ; },
abstract = {Artificial intelligence (AI) algorithms hold the potential to revolutionize radiology. However, a significant portion of the published literature lacks transparency and reproducibility, which hampers sustained progress toward clinical translation. Although several reporting guidelines have been proposed, identifying practical means to address these issues remains challenging. Here, we show the potential of cloud-based infrastructure for implementing and sharing transparent and reproducible AI-based radiology pipelines. We demonstrate end-to-end reproducibility from retrieving cloud-hosted data, through data pre-processing, deep learning inference, and post-processing, to the analysis and reporting of the final results. We successfully implement two distinct use cases, starting from recent literature on AI-based biomarkers for cancer imaging. Using cloud-hosted data and computing, we confirm the findings of these studies and extend the validation to previously unseen data for one of the use cases. Furthermore, we provide the community with transparent and easy-to-extend examples of pipelines impactful for the broader oncology field. Our approach demonstrates the potential of cloud resources for implementing, sharing, and using reproducible and transparent AI pipelines, which can accelerate the translation into clinical solutions.},
}
@article {pmid39129832,
year = {2024},
author = {Ju, D and Kim, S},
title = {Volatile tin oxide memristor for neuromorphic computing.},
journal = {iScience},
volume = {27},
number = {8},
pages = {110479},
pmid = {39129832},
issn = {2589-0042},
abstract = {The rise of neuromorphic systems has addressed the shortcomings of current computing architectures, especially regarding energy efficiency and scalability. These systems use cutting-edge technologies such as Pt/SnOx/TiN memristors, which efficiently mimic synaptic behavior and provide potential solutions to modern computing challenges. Moreover, their unipolar resistive switching ability enables precise modulation of the synaptic weights, facilitating energy-efficient parallel processing that is similar to biological synapses. Additionally, memristors' spike-rate-dependent plasticity enhances the adaptability of neural circuits, offering promising applications in intelligent computing. Integrating memristors into edge computing architectures further highlights their importance in tackling the security and efficiency issues associated with conventional cloud computing models.},
}
@article {pmid39124116,
year = {2024},
author = {Pazhanivel, DB and Velu, AN and Palaniappan, BS},
title = {Design and Enhancement of a Fog-Enabled Air Quality Monitoring and Prediction System: An Optimized Lightweight Deep Learning Model for a Smart Fog Environmental Gateway.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {15},
pages = {},
pmid = {39124116},
issn = {1424-8220},
abstract = {Effective air quality monitoring and forecasting are essential for safeguarding public health, protecting the environment, and promoting sustainable development in smart cities. Conventional systems are cloud-based, incur high costs, lack accurate Deep Learning (DL)models for multi-step forecasting, and fail to optimize DL models for fog nodes. To address these challenges, this paper proposes a Fog-enabled Air Quality Monitoring and Prediction (FAQMP) system by integrating the Internet of Things (IoT), Fog Computing (FC), Low-Power Wide-Area Networks (LPWANs), and Deep Learning (DL) for improved accuracy and efficiency in monitoring and forecasting air quality levels. The three-layered FAQMP system includes a low-cost Air Quality Monitoring (AQM) node transmitting data via LoRa to the Fog Computing layer and then the cloud layer for complex processing. The Smart Fog Environmental Gateway (SFEG) in the FC layer introduces efficient Fog Intelligence by employing an optimized lightweight DL-based Sequence-to-Sequence (Seq2Seq) Gated Recurrent Unit (GRU) attention model, enabling real-time processing, accurate forecasting, and timely warnings of dangerous AQI levels while optimizing fog resource usage. Initially, the Seq2Seq GRU Attention model, validated for multi-step forecasting, outperformed the state-of-the-art DL methods with an average RMSE of 5.5576, MAE of 3.4975, MAPE of 19.1991%, R[2] of 0.6926, and Theil's U1 of 0.1325. This model is then made lightweight and optimized using post-training quantization (PTQ), specifically dynamic range quantization, which reduced the model size to less than a quarter of the original, improved execution time by 81.53% while maintaining forecast accuracy. This optimization enables efficient deployment on resource-constrained fog nodes like SFEG by balancing performance and computational efficiency, thereby enhancing the effectiveness of the FAQMP system through efficient Fog Intelligence. The FAQMP system, supported by the EnviroWeb application, provides real-time AQI updates, forecasts, and alerts, aiding the government in proactively addressing pollution concerns, maintaining air quality standards, and fostering a healthier and more sustainable environment.},
}
@article {pmid39123976,
year = {2024},
author = {Villar, E and Martín Toral, I and Calvo, I and Barambones, O and Fernández-Bustamante, P},
title = {Architectures for Industrial AIoT Applications.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {15},
pages = {},
doi = {10.3390/s24154929},
pmid = {39123976},
issn = {1424-8220},
abstract = {Industry 4.0 introduced new concepts, technologies, and paradigms, such as Cyber Physical Systems (CPSs), Industrial Internet of Things (IIoT) and, more recently, Artificial Intelligence of Things (AIoT). These paradigms ease the creation of complex systems by integrating heterogeneous devices. As a result, the structure of the production systems is changing completely. In this scenario, the adoption of reference architectures based on standards may guide designers and developers to create complex AIoT applications. This article surveys the main reference architectures available for industrial AIoT applications, analyzing their key characteristics, objectives, and benefits; it also presents some use cases that may help designers create new applications. The main goal of this review is to help engineers identify the alternative that best suits every application. The authors conclude that existing reference architectures are a necessary tool for standardizing AIoT applications, since they may guide developers in the process of developing new applications. However, the use of reference architectures in real AIoT industrial applications is still incipient, so more development effort is needed in order for it to be widely adopted.},
}
@article {pmid39116433,
year = {2024},
author = {Sibanda, K and Ndayizigamiye, P and Twinomurinzi, H},
title = {Industry 4.0 Technologies in Maternal Health Care: Bibliometric Analysis and Research Agenda.},
journal = {JMIR pediatrics and parenting},
volume = {7},
number = {},
pages = {e47848},
doi = {10.2196/47848},
pmid = {39116433},
issn = {2561-6722},
abstract = {BACKGROUND: Industry 4.0 (I4.0) technologies have improved operations in health care facilities by optimizing processes, leading to efficient systems and tools to assist health care personnel and patients.
OBJECTIVE: This study investigates the current implementation and impact of I4.0 technologies within maternal health care, explicitly focusing on transforming care processes, treatment methods, and automated pregnancy monitoring. Additionally, it conducts a thematic landscape mapping, offering a nuanced understanding of this emerging field. Building on this analysis, a future research agenda is proposed, highlighting critical areas for future investigations.
METHODS: A bibliometric analysis of publications retrieved from the Scopus database was conducted to examine how the research into I4.0 technologies in maternal health care evolved from 1985 to 2022. A search strategy was used to screen the eligible publications using the abstract and full-text reading. The most productive and influential journals; authors', institutions', and countries' influence on maternal health care; and current trends and thematic evolution were computed using the Bibliometrix R package (R Core Team).
RESULTS: A total of 1003 unique papers in English were retrieved using the search string, and 136 papers were retained after the inclusion and exclusion criteria were implemented, covering 37 years from 1985 to 2022. The annual growth rate of publications was 9.53%, with 88.9% (n=121) of the publications observed in 2016-2022. In the thematic analysis, 4 clusters were identified-artificial neural networks, data mining, machine learning, and the Internet of Things. Artificial intelligence, deep learning, risk prediction, digital health, telemedicine, wearable devices, mobile health care, and cloud computing remained the dominant research themes in 2016-2022.
CONCLUSIONS: This bibliometric analysis reviews the state of the art in the evolution and structure of I4.0 technologies in maternal health care and how they may be used to optimize the operational processes. A conceptual framework with 4 performance factors-risk prediction, hospital care, health record management, and self-care-is suggested for process improvement. a research agenda is also proposed for governance, adoption, infrastructure, privacy, and security.},
}
@article {pmid39111449,
year = {2024},
author = {Wan, L and Kendall, AD and Rapp, J and Hyndman, DW},
title = {Mapping agricultural tile drainage in the US Midwest using explainable random forest machine learning and satellite imagery.},
journal = {The Science of the total environment},
volume = {},
number = {},
pages = {175283},
doi = {10.1016/j.scitotenv.2024.175283},
pmid = {39111449},
issn = {1879-1026},
abstract = {There has been an increase in tile drained area across the US Midwest and other regions worldwide due to agricultural expansion, intensification, and climate variability. Despite this growth, spatially explicit tile drainage maps remain scarce, which limits the accuracy of hydrologic modeling and implementation of nutrient reduction strategies. Here, we developed a machine-learning model to provide a Spatially Explicit Estimate of Tile Drainage (SEETileDrain) across the US Midwest in 2017 at a 30-m resolution. This model used 31 satellite-derived and environmental features after removing less important and highly correlated features. It was trained with 60,938 tile and non-tile ground truth points within the Google Earth Engine cloud-computing platform. We also used multiple feature importance metrics and Accumulated Local Effects to interpret the machine learning model. The results show that our model achieved good accuracy, with 96 % of points classified correctly and an F1 score of 0.90. When tile drainage area is aggregated to the county scale, it agreed well (r[2] = 0.69) with the reported area from the Ag Census. We found that Land Surface Temperature (LST) along with climate- and soil-related features were the most important factors for classification. The top-ranked feature is the median summer nighttime LST, followed by median summer soil moisture percent. This study demonstrates the potential of applying satellite remote sensing to map spatially explicit agricultural tile drainage across large regions. The results should be useful for land use change monitoring and hydrologic and nutrient models, including those designed to achieve cost-effective agricultural water and nutrient management strategies. The algorithms developed here should also be applicable for other remote sensing mapping applications.},
}
@article {pmid39107423,
year = {2024},
author = {Ramdani, F and Setiani, P and Sianturi, R},
title = {Towards understanding climate change impacts: monitoring the vegetation dynamics of terrestrial national parks in Indonesia.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {18257},
pmid = {39107423},
issn = {2045-2322},
mesh = {*Climate Change ; Indonesia ; *Parks, Recreational ; *Conservation of Natural Resources ; Seasons ; Environmental Monitoring/methods ; Ecosystem ; Plants ; },
abstract = {Monitoring vegetation dynamics in terrestrial national parks (TNPs) is crucial for ensuring sustainable environmental management and mitigating the potential negative impacts of short- and long-term disturbances understanding the effect of climate change within natural and protected areas. This study aims to monitor the vegetation dynamics of TNPs in Indonesia by first categorizing them into the regions of Sumatra, Jawa, Kalimantan, Sulawesi, and Eastern Indonesia and then applying ready-to-use MODIS EVI time-series imageries (MOD13Q1) taken from 2000 to 2022 on the GEE cloud-computing platform. Specifically, this research investigates the greening and browning fraction trends using Sen's slope, considers seasonality by analyzing the maximum and minimum EVI values, and assesses anomalous years by comparing the annual time series and long-term median EVI value. The findings reveal significantly increasing greening trends in most TNPs, except Danau Sentarum, from 2000 to 2022. The seasonality analysis shows that most TNPs exhibit peak and trough greenness at the end of the rainy and dry seasons, respectively, as the vegetation response to precipitation increases and decreases. Anomalies in seasonality that is affected by climate change was detected in all of the regions. To increase TNPs resilience, suggested measures include active reforestation and implementation of Assisted Natural Regeneration, strengthen the enforcement of fundamental managerial task, and forest fire management.},
}
@article {pmid39101486,
year = {2024},
author = {Ruprecht, NA and Kennedy, JD and Bansal, B and Singhal, S and Sens, D and Maggio, A and Doe, V and Hawkins, D and Campbel, R and O'Connell, K and Gill, JS and Schaefer, K and Singhal, SK},
title = {Transcriptomics and epigenetic data integration learning module on Google Cloud.},
journal = {Briefings in bioinformatics},
volume = {25},
number = {Supplement_1},
pages = {},
pmid = {39101486},
issn = {1477-4054},
support = {P20GM103442//National Institute of General Medical Sciences of the National Institutes of Health/ ; },
mesh = {Humans ; *Cloud Computing ; *Epigenomics/methods ; Epigenesis, Genetic ; Transcriptome ; Computational Biology/methods ; Gene Expression Profiling/methods ; Software ; Data Mining/methods ; },
abstract = {Multi-omics (genomics, transcriptomics, epigenomics, proteomics, metabolomics, etc.) research approaches are vital for understanding the hierarchical complexity of human biology and have proven to be extremely valuable in cancer research and precision medicine. Emerging scientific advances in recent years have made high-throughput genome-wide sequencing a central focus in molecular research by allowing for the collective analysis of various kinds of molecular biological data from different types of specimens in a single tissue or even at the level of a single cell. Additionally, with the help of improved computational resources and data mining, researchers are able to integrate data from different multi-omics regimes to identify new prognostic, diagnostic, or predictive biomarkers, uncover novel therapeutic targets, and develop more personalized treatment protocols for patients. For the research community to parse the scientifically and clinically meaningful information out of all the biological data being generated each day more efficiently with less wasted resources, being familiar with and comfortable using advanced analytical tools, such as Google Cloud Platform becomes imperative. This project is an interdisciplinary, cross-organizational effort to provide a guided learning module for integrating transcriptomics and epigenetics data analysis protocols into a comprehensive analysis pipeline for users to implement in their own work, utilizing the cloud computing infrastructure on Google Cloud. The learning module consists of three submodules that guide the user through tutorial examples that illustrate the analysis of RNA-sequence and Reduced-Representation Bisulfite Sequencing data. The examples are in the form of breast cancer case studies, and the data sets were procured from the public repository Gene Expression Omnibus. The first submodule is devoted to transcriptomics analysis with the RNA sequencing data, the second submodule focuses on epigenetics analysis using the DNA methylation data, and the third submodule integrates the two methods for a deeper biological understanding. The modules begin with data collection and preprocessing, with further downstream analysis performed in a Vertex AI Jupyter notebook instance with an R kernel. Analysis results are returned to Google Cloud buckets for storage and visualization, removing the computational strain from local resources. The final product is a start-to-finish tutorial for the researchers with limited experience in multi-omics to integrate transcriptomics and epigenetics data analysis into a comprehensive pipeline to perform their own biological research.This manuscript describes the development of a resource module that is part of a learning platform named ``NIGMS Sandbox for Cloud-based Learning'' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox [16] at the beginning of this Supplement. This module delivers learning materials on the analysis of bulk and single-cell ATAC-seq data in an interactive format that uses appropriate cloud resources for data access and analyses.},
}
@article {pmid39098886,
year = {2024},
author = {John, J and John Singh, K},
title = {Trust value evaluation of cloud service providers using fuzzy inference based analytical process.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {18028},
pmid = {39098886},
issn = {2045-2322},
abstract = {Users can purchase virtualized computer resources using the cloud computing concept, which is a novel and innovative way of computing. It offers numerous advantages for IT and healthcare industries over traditional methods. However, a lack of trust between CSUs and CSPs is hindering the widespread adoption of cloud computing across industries. Since cloud computing offers a wide range of trust models and strategies, it is essential to analyze the service using a detailed methodology in order to choose the appropriate cloud service for various user types. Finding a wide variety of comprehensive elements that are both required and sufficient for evaluating any cloud service is vital in order to achieve that. As a result, this study suggests an accurate, fuzzy logic-based trust evaluation model for evaluating the trustworthiness of a cloud service provider. Here, we examine how fuzzy logic raises the efficiency of trust evaluation. Trust is assessed using Quality of Service (QoS) characteristics like security, privacy, dynamicity, data integrity, and performance. The outcomes of a MATLAB simulation demonstrate the viability of the suggested strategy in a cloud setting.},
}
@article {pmid39097607,
year = {2024},
author = {Zhang, H and Li, J and Yang, H},
title = {Cloud computing load prediction method based on CNN-BiLSTM model under low-carbon background.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {18004},
pmid = {39097607},
issn = {2045-2322},
support = {XJ2023004301//Basic scientific research business fee of central colleges and universities/ ; },
abstract = {With the establishment of the "double carbon" goal, various industries are actively exploring ways to reduce carbon emissions. Cloud data centers, represented by cloud computing, often have the problem of mismatch between load requests and resource supply, resulting in excessive carbon emissions. Based on this, this paper proposes a complete method for cloud computing carbon emission prediction. Firstly, the convolutional neural network and bidirectional long-term and short-term memory neural network (CNN-BiLSTM) combined model are used to predict the cloud computing load. The real-time prediction power is obtained by real-time prediction load of cloud computing, and then the carbon emission prediction is obtained by power calculation. Develop a dynamic server carbon emission prediction model, so that the server carbon emission can change with the change of CPU utilization, so as to achieve the purpose of low carbon emission reduction. In this paper, Google cluster data is used to predict the load. The experimental results show that the CNN-BiLSTM combined model has good prediction effect. Compared with the multi-layer feed forward neural network model (BP), long short-term memory network model (LSTM), bidirectional long short-term memory network model (BiLSTM), modal decomposition and convolution long time series neural network model (CEEMDAN-ConvLSTM), the MSE index decreased by 52 % , 50 % , 34 % and 45 % respectively.},
}
@article {pmid39092509,
year = {2024},
author = {Okoniewski, MJ and Wiegand, A and Schmid, DC and Bolliger, C and Bovino, C and Belluco, M and Wüst, T and Byrde, O and Maffioletti, S and Rinn, B},
title = {Leonhard Med, a trusted research environment for processing sensitive research data.},
journal = {Journal of integrative bioinformatics},
volume = {},
number = {},
pages = {},
pmid = {39092509},
issn = {1613-4516},
abstract = {This paper provides an overview of the development and operation of the Leonhard Med Trusted Research Environment (TRE) at ETH Zurich. Leonhard Med gives scientific researchers the ability to securely work on sensitive research data. We give an overview of the user perspective, the legal framework for processing sensitive data, design history, current status, and operations. Leonhard Med is an efficient, highly secure Trusted Research Environment for data processing, hosted at ETH Zurich and operated by the Scientific IT Services (SIS) of ETH. It provides a full stack of security controls that allow researchers to store, access, manage, and process sensitive data according to Swiss legislation and ETH Zurich Data Protection policies. In addition, Leonhard Med fulfills the BioMedIT Information Security Policies and is compatible with international data protection laws and therefore can be utilized within the scope of national and international collaboration research projects. Initially designed as a "bare-metal" High-Performance Computing (HPC) platform to achieve maximum performance, Leonhard Med was later re-designed as a virtualized, private cloud platform to offer more flexibility to its customers. Sensitive data can be analyzed in secure, segregated spaces called tenants. Technical and Organizational Measures (TOMs) are in place to assure the confidentiality, integrity, and availability of sensitive data. At the same time, Leonhard Med ensures broad access to cutting-edge research software, especially for the analysis of human -omics data and other personalized health applications.},
}
@article {pmid39088558,
year = {2024},
author = {Tawfik, M},
title = {Optimized intrusion detection in IoT and fog computing using ensemble learning and advanced feature selection.},
journal = {PloS one},
volume = {19},
number = {8},
pages = {e0304082},
pmid = {39088558},
issn = {1932-6203},
mesh = {*Cloud Computing ; *Internet of Things ; Computer Security ; Neural Networks, Computer ; Algorithms ; Machine Learning ; },
abstract = {The proliferation of Internet of Things (IoT) devices and fog computing architectures has introduced major security and cyber threats. Intrusion detection systems have become effective in monitoring network traffic and activities to identify anomalies that are indicative of attacks. However, constraints such as limited computing resources at fog nodes render conventional intrusion detection techniques impractical. This paper proposes a novel framework that integrates stacked autoencoders, CatBoost, and an optimised transformer-CNN-LSTM ensemble tailored for intrusion detection in fog and IoT networks. Autoencoders extract robust features from high-dimensional traffic data while reducing the dimensionality of the efficiency at fog nodes. CatBoost refines features through predictive selection. The ensemble model combines self-attention, convolutions, and recurrence for comprehensive traffic analysis in the cloud. Evaluations of the NSL-KDD, UNSW-NB15, and AWID benchmarks demonstrate an accuracy of over 99% in detecting threats across traditional, hybrid enterprises and wireless environments. Integrated edge preprocessing and cloud-based ensemble learning pipelines enable efficient and accurate anomaly detection. The results highlight the viability of securing real-world fog and the IoT infrastructure against continuously evolving cyber-attacks.},
}
@article {pmid39081193,
year = {2024},
author = {Mer, P and Limbachiya, C},
title = {Electron-driven molecular processes for cyanopolyacetylenes HC2n+1N (n = 3, 4, and 5).},
journal = {Physical chemistry chemical physics : PCCP},
volume = {},
number = {},
pages = {},
doi = {10.1039/d4cp02665a},
pmid = {39081193},
issn = {1463-9084},
abstract = {Linear carbon series cyanopolyacetylenes (HC2n+1N) (n = 3, 4, and 5) are astromolecules found in the atmosphere of Titan and interstellar media such as TMC-1 (Taurus molecular cloud-1). All these compounds are also detected in IRC + 10 216. In the present work, we comprehensively investigate electron interaction with important cyanopolyacetylene compounds, viz. HC7N (cyano-tri-acetylene), HC9N (cyano-tetra-acetylene), and HC11N (cyano-penta-acetylene). The study covers incident electron energies ranging from the ionization threshold to 5 keV. Various electron-driven molecular processes are quantified in terms of total cross-sections. The quantum spherical complex optical potential (SCOP) is used to determine elastic (Qel) and inelastic (Qinel) cross-sections. Ionization is the most important inelastic effect that opens various chemical pathways for the generation of different molecular species; we computed the ionization cross-section (Qion) and discrete electronic excitation cross-section (ΣQexc) using the complex scattering potential-ionization contribution (CSP-ic) method. The cyanopolyacetylene compounds are difficult to handle experimentally owing to the health risks involved. Therefore, there are no prior experimental data available for these molecules; only Qion have been reported theoretically. Thus, the present work is the maiden report on computing Qel, Qinel, ΣQexc, and QT. In order to provide an alternative approach and further validation of the present work, we employed our recently developed two-parameter semi-empirical method (2p-SEM) to compute Qel and QT. Additionally, we predict the polarizability of the HC11N molecule, which has not been reported in the existing literature. This prediction is based on a correlation study of polarizabilities of molecules with Qion values from the same series of molecules.},
}
@article {pmid39078761,
year = {2024},
author = {Lee, SY and Ku, MY and Tseng, WC and Chen, JY},
title = {AI Accelerator with Ultralightweight Time-Period CNN-Based Model for Arrhythmia Classification.},
journal = {IEEE transactions on biomedical circuits and systems},
volume = {PP},
number = {},
pages = {},
doi = {10.1109/TBCAS.2024.3435718},
pmid = {39078761},
issn = {1940-9990},
abstract = {This work proposes a classification system for arrhythmias, aiming to enhance the efficiency of the diagnostic process for cardiologists. The proposed algorithm includes a naive preprocessing procedure for electrocardiography (ECG) data applicable to various ECG databases. Additionally, this work proposes an ultralightweight model for arrhythmia classification based on a convolutional neural network and incorporating R-peak interval features to represent long-term rhythm information, thereby improving the model's classification performance. The proposed model is trained and tested by using the MIT-BIH and NCKU-CBIC databases in accordance with the classification standards of the Association for the Advancement of Medical Instrumentation (AAMI), achieving high accuracies of 98.32% and 97.1%. This work applies the arrhythmia classification algorithm to a web-based system, thus providing a graphical interface. The cloud-based execution of automated artificial intelligence (AI) classification allows cardiologists and patients to view ECG wave conditions instantly, thereby remarkably enhancing the quality of medical examination. This work also designs a customized integrated circuit for the hardware implementation of an AI accelerator. The accelerator utilizes a parallelized processing element array architecture to perform convolution and fully connected layer operations. It introduces proposed hybrid stationary techniques, combining input and weight stationary modes to increase data reuse drastically and reduce hardware execution cycles and power consumption, ultimately achieving high-performance computing. This accelerator is implemented in the form of a chip by using the TSMC 180 nm CMOS process. It exhibits a power consumption of 122 μW, a classification latency of 6.8 ms, and an energy efficiency of 0.83 μJ/classification.},
}
@article {pmid39071997,
year = {2024},
author = {Rehman, SU and Sadek, I and Huang, B and Manickam, S and Mahmoud, LN},
title = {IoT-based emergency cardiac death risk rescue alert system.},
journal = {MethodsX},
volume = {13},
number = {},
pages = {102834},
pmid = {39071997},
issn = {2215-0161},
abstract = {The use of technology in healthcare is one of the most critical application areas today. With the development of medical applications, people's quality of life has improved. However, it is impractical and unnecessary for medium-risk people to receive specialized daily hospital monitoring. Due to their health status, they will be exposed to a high risk of severe health damage or even life-threatening conditions without monitoring. Therefore, remote, real-time, low-cost, wearable, and effective monitoring is ideal for this problem. Many researchers mentioned that their studies could use electrocardiogram (ECG) detection to discover emergencies. However, how to respond to discovered emergencies in household life is still a research gap in this field.•This paper proposes a real-time monitoring of ECG signals and sending them to the cloud for Sudden Cardiac Death (SCD) prediction.•Unlike previous studies, the proposed system has an additional emergency response mechanism to alert nearby community healthcare workers when SCD is predicted to occur.},
}
@article {pmid39056390,
year = {2024},
author = {Bigi, F and Pozdnyakov, SN and Ceriotti, M},
title = {Wigner kernels: Body-ordered equivariant machine learning without a basis.},
journal = {The Journal of chemical physics},
volume = {161},
number = {4},
pages = {},
doi = {10.1063/5.0208746},
pmid = {39056390},
issn = {1089-7690},
abstract = {Machine-learning models based on a point-cloud representation of a physical object are ubiquitous in scientific applications and particularly well-suited to the atomic-scale description of molecules and materials. Among the many different approaches that have been pursued, the description of local atomic environments in terms of their discretized neighbor densities has been used widely and very successfully. We propose a novel density-based method, which involves computing "Wigner kernels." These are fully equivariant and body-ordered kernels that can be computed iteratively at a cost that is independent of the basis used to discretize the density and grows only linearly with the maximum body-order considered. Wigner kernels represent the infinite-width limit of feature-space models, whose dimensionality and computational cost instead scale exponentially with the increasing order of correlations. We present several examples of the accuracy of models based on Wigner kernels in chemical applications, for both scalar and tensorial targets, reaching an accuracy that is competitive with state-of-the-art deep-learning architectures. We discuss the broader relevance of these findings to equivariant geometric machine-learning.},
}
@article {pmid39054942,
year = {2024},
author = {Sharma, S and Tyagi, S},
title = {A fourfold-objective-based cloud privacy preservation model with proposed association rule hiding and deep learning assisted optimal key generation.},
journal = {Network (Bristol, England)},
volume = {},
number = {},
pages = {1-36},
doi = {10.1080/0954898X.2024.2378836},
pmid = {39054942},
issn = {1361-6536},
abstract = {Numerous studies have been conducted in an attempt to preserve cloud privacy, yet the majority of cutting-edge solutions fall short when it comes to handling sensitive data. This research proposes a "privacy preservation model in the cloud environment". The four stages of recommended security preservation methodology are "identification of sensitive data, generation of an optimal tuned key, suggested data sanitization, and data restoration". Initially, owner's data enters the Sensitive data identification process. The sensitive information in the input (owner's data) is identified via Augmented Dynamic Itemset Counting (ADIC) based Associative Rule Mining Model. Subsequently, the identified sensitive data are sanitized via the newly created tuned key. The generated tuned key is formulated with new fourfold objective-hybrid optimization approach-based deep learning approach. The optimally tuned key is generated with LSTM on the basis of fourfold objectives and the new hybrid MUAOA. The created keys, as well as generated sensitive rules, are fed into the deep learning model. The MUAOA technique is a conceptual blend of standard AOA and CMBO, respectively. As a result, unauthorized people will be unable to access information. Finally, comparative evaluation is undergone and proposed LSTM+MUAOA has achieved higher values on privacy about 5.21 compared to other existing models.},
}
@article {pmid39049325,
year = {2024},
author = {Chen, SY and Tu, MH},
title = {Use Mobile Apps to Link to Google Forms to Conduct Online Surveys.},
journal = {Studies in health technology and informatics},
volume = {315},
number = {},
pages = {567-568},
doi = {10.3233/SHTI240219},
pmid = {39049325},
issn = {1879-8365},
mesh = {Taiwan ; Humans ; *Mobile Applications ; Surveys and Questionnaires ; Coronary Artery Disease ; Anxiety ; Male ; Female ; Middle Aged ; Internet ; },
abstract = {The study aimed to evaluate changes in anxiety levels in patients with coronary artery disease before and after cardiac catheterization. The mobile applications LINE and GOOGLE were used to collect online data. A total of 188 patients participated in the study conducted at a regional teaching hospital in eastern Taiwan, and 51 of them completed the questionnaire twice, with a response rate of 27.1%. Although the second study noted the problem of incomplete data and low response rates, this study shows that online research methodology can still be improved and that using electronic questionnaires for data collection and statistical analysis reduces the risk of errors in online research and saves time in documentation. It is recommended to provide clear and detailed instructions when conducting online surveys and to review them carefully upon completion to ensure the completeness of the data collected.},
}
@article {pmid39041916,
year = {2024},
author = {Nguyen, H and Pham, VD and Nguyen, H and Tran, B and Petereit, J and Nguyen, T},
title = {CCPA: cloud-based, self-learning modules for consensus pathway analysis using GO, KEGG and Reactome.},
journal = {Briefings in bioinformatics},
volume = {25},
number = {Supplement_1},
pages = {},
doi = {10.1093/bib/bbae222},
pmid = {39041916},
issn = {1477-4054},
support = {2343019 and 2203236//National Science Foundation/ ; 80NSSC22M0255/NASA/NASA/United States ; GM103440 and 1R44GM152152-01/GM/NIGMS NIH HHS/United States ; 1U01CA274573-01A1/CA/NCI NIH HHS/United States ; },
mesh = {*Cloud Computing ; *Software ; Humans ; Computational Biology/methods/education ; Animals ; Gene Ontology ; },
abstract = {This manuscript describes the development of a resource module that is part of a learning platform named 'NIGMS Sandbox for Cloud-based Learning' (https://github.com/NIGMS/NIGMS-Sandbox). The module delivers learning materials on Cloud-based Consensus Pathway Analysis in an interactive format that uses appropriate cloud resources for data access and analyses. Pathway analysis is important because it allows us to gain insights into biological mechanisms underlying conditions. But the availability of many pathway analysis methods, the requirement of coding skills, and the focus of current tools on only a few species all make it very difficult for biomedical researchers to self-learn and perform pathway analysis efficiently. Furthermore, there is a lack of tools that allow researchers to compare analysis results obtained from different experiments and different analysis methods to find consensus results. To address these challenges, we have designed a cloud-based, self-learning module that provides consensus results among established, state-of-the-art pathway analysis techniques to provide students and researchers with necessary training and example materials. The training module consists of five Jupyter Notebooks that provide complete tutorials for the following tasks: (i) process expression data, (ii) perform differential analysis, visualize and compare the results obtained from four differential analysis methods (limma, t-test, edgeR, DESeq2), (iii) process three pathway databases (GO, KEGG and Reactome), (iv) perform pathway analysis using eight methods (ORA, CAMERA, KS test, Wilcoxon test, FGSEA, GSA, SAFE and PADOG) and (v) combine results of multiple analyses. We also provide examples, source code, explanations and instructional videos for trainees to complete each Jupyter Notebook. The module supports the analysis for many model (e.g. human, mouse, fruit fly, zebra fish) and non-model species. The module is publicly available at https://github.com/NIGMS/Consensus-Pathway-Analysis-in-the-Cloud. This manuscript describes the development of a resource module that is part of a learning platform named ``NIGMS Sandbox for Cloud-based Learning'' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox [1] at the beginning of this Supplement. This module delivers learning materials on the analysis of bulk and single-cell ATAC-seq data in an interactive format that uses appropriate cloud resources for data access and analyses.},
}
@article {pmid39041915,
year = {2024},
author = {Woessner, AE and Anjum, U and Salman, H and Lear, J and Turner, JT and Campbell, R and Beaudry, L and Zhan, J and Cornett, LE and Gauch, S and Quinn, KP},
title = {Identifying and training deep learning neural networks on biomedical-related datasets.},
journal = {Briefings in bioinformatics},
volume = {25},
number = {Supplement_1},
pages = {},
doi = {10.1093/bib/bbae232},
pmid = {39041915},
issn = {1477-4054},
support = {R01EB031032/NH/NIH HHS/United States ; NIH P20GM139768//Arkansas Integrative Metabolic Research Center/ ; 3P20GM103429-21S2//National Institutes of General Medical Sciences (NIGMS)/ ; },
mesh = {*Deep Learning ; *Neural Networks, Computer ; Humans ; Biomedical Research ; Algorithms ; Cloud Computing ; },
abstract = {This manuscript describes the development of a resources module that is part of a learning platform named 'NIGMS Sandbox for Cloud-based Learning' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox at the beginning of this Supplement. This module delivers learning materials on implementing deep learning algorithms for biomedical image data in an interactive format that uses appropriate cloud resources for data access and analyses. Biomedical-related datasets are widely used in both research and clinical settings, but the ability for professionally trained clinicians and researchers to interpret datasets becomes difficult as the size and breadth of these datasets increases. Artificial intelligence, and specifically deep learning neural networks, have recently become an important tool in novel biomedical research. However, use is limited due to their computational requirements and confusion regarding different neural network architectures. The goal of this learning module is to introduce types of deep learning neural networks and cover practices that are commonly used in biomedical research. This module is subdivided into four submodules that cover classification, augmentation, segmentation and regression. Each complementary submodule was written on the Google Cloud Platform and contains detailed code and explanations, as well as quizzes and challenges to facilitate user training. Overall, the goal of this learning module is to enable users to identify and integrate the correct type of neural network with their data while highlighting the ease-of-use of cloud computing for implementing neural networks. This manuscript describes the development of a resource module that is part of a learning platform named ``NIGMS Sandbox for Cloud-based Learning'' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox [1] at the beginning of this Supplement. This module delivers learning materials on the analysis of bulk and single-cell ATAC-seq data in an interactive format that uses appropriate cloud resources for data access and analyses.},
}
@article {pmid39041914,
year = {2024},
author = {O'Connell, KA and Kopchick, B and Carlson, T and Belardo, D and Byrum, SD},
title = {Understanding proteome quantification in an interactive learning module on Google Cloud Platform.},
journal = {Briefings in bioinformatics},
volume = {25},
number = {Supplement_1},
pages = {},
doi = {10.1093/bib/bbae235},
pmid = {39041914},
issn = {1477-4054},
support = {//UAMS Winthrop P. Rockefeller Cancer Institute/ ; OIA-1946391//National Science Foundation Award/ ; R24GM137786//National Institutes of Health National Institute of General Medical Sciences (NIH/NIGMS)/ ; },
mesh = {*Cloud Computing ; *Proteome/metabolism ; *Proteomics/methods ; *Software ; Mass Spectrometry ; Humans ; },
abstract = {This manuscript describes the development of a resource module that is part of a learning platform named 'NIGMS Sandbox for Cloud-based Learning' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox at the beginning of this Supplement. This module delivers learning materials on protein quantification in an interactive format that uses appropriate cloud resources for data access and analyses. Quantitative proteomics is a rapidly growing discipline due to the cutting-edge technologies of high resolution mass spectrometry. There are many data types to consider for proteome quantification including data dependent acquisition, data independent acquisition, multiplexing with Tandem Mass Tag reporter ions, spectral counts, and more. As part of the NIH NIGMS Sandbox effort, we developed a learning module to introduce students to mass spectrometry terminology, normalization methods, statistical designs, and basics of R programming. By utilizing the Google Cloud environment, the learning module is easily accessible without the need for complex installation procedures. The proteome quantification module demonstrates the analysis using a provided TMT10plex data set using MS3 reporter ion intensity quantitative values in a Jupyter notebook with an R kernel. The learning module begins with the raw intensities, performs normalization, and differential abundance analysis using limma models, and is designed for researchers with a basic understanding of mass spectrometry and R programming language. Learners walk away with a better understanding of how to navigate Google Cloud Platform for proteomic research, and with the basics of mass spectrometry data analysis at the command line. This manuscript describes the development of a resource module that is part of a learning platform named ``NIGMS Sandbox for Cloud-based Learning'' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox [1] at the beginning of this Supplement. This module delivers learning materials on the analysis of bulk and single-cell ATAC-seq data in an interactive format that uses appropriate cloud resources for data access and analyses.},
}
@article {pmid39041913,
year = {2024},
author = {Qin, Y and Maggio, A and Hawkins, D and Beaudry, L and Kim, A and Pan, D and Gong, T and Fu, Y and Yang, H and Deng, Y},
title = {Whole-genome bisulfite sequencing data analysis learning module on Google Cloud Platform.},
journal = {Briefings in bioinformatics},
volume = {25},
number = {Supplement_1},
pages = {},
doi = {10.1093/bib/bbae236},
pmid = {39041913},
issn = {1477-4054},
support = {P20GM103466/NH/NIH HHS/United States ; },
mesh = {*Cloud Computing ; *DNA Methylation ; *Whole Genome Sequencing/methods ; *Software ; Sulfites/chemistry ; Humans ; Epigenesis, Genetic ; Computational Biology/methods ; },
abstract = {This study describes the development of a resource module that is part of a learning platform named 'NIGMS Sandbox for Cloud-based Learning' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox at the beginning of this Supplement. This module is designed to facilitate interactive learning of whole-genome bisulfite sequencing (WGBS) data analysis utilizing cloud-based tools in Google Cloud Platform, such as Cloud Storage, Vertex AI notebooks and Google Batch. WGBS is a powerful technique that can provide comprehensive insights into DNA methylation patterns at single cytosine resolution, essential for understanding epigenetic regulation across the genome. The designed learning module first provides step-by-step tutorials that guide learners through two main stages of WGBS data analysis, preprocessing and the identification of differentially methylated regions. And then, it provides a streamlined workflow and demonstrates how to effectively use it for large datasets given the power of cloud infrastructure. The integration of these interconnected submodules progressively deepens the user's understanding of the WGBS analysis process along with the use of cloud resources. Through this module, we can enhance the accessibility and adoption of cloud computing in epigenomic research, speeding up the advancements in the related field and beyond. This manuscript describes the development of a resource module that is part of a learning platform named ``NIGMS Sandbox for Cloud-based Learning'' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox [1] at the beginning of this Supplement. This module delivers learning materials on the analysis of bulk and single-cell ATAC-seq data in an interactive format that uses appropriate cloud resources for data access and analyses.},
}
@article {pmid39041912,
year = {2024},
author = {Hemme, CL and Beaudry, L and Yosufzai, Z and Kim, A and Pan, D and Campbell, R and Price, M and Cho, BP},
title = {A cloud-based learning module for biomarker discovery.},
journal = {Briefings in bioinformatics},
volume = {25},
number = {Supplement_1},
pages = {},
doi = {10.1093/bib/bbae126},
pmid = {39041912},
issn = {1477-4054},
support = {P20GM103430/NH/NIH HHS/United States ; },
mesh = {*Cloud Computing ; *Biomarkers/metabolism ; Animals ; Software ; Humans ; Rats ; Machine Learning ; Computational Biology/methods ; },
abstract = {This manuscript describes the development of a resource module that is part of a learning platform named "NIGMS Sandbox for Cloud-based Learning" https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox at the beginning of this Supplement. This module delivers learning materials on basic principles in biomarker discovery in an interactive format that uses appropriate cloud resources for data access and analyses. In collaboration with Google Cloud, Deloitte Consulting and NIGMS, the Rhode Island INBRE Molecular Informatics Core developed a cloud-based training module for biomarker discovery. The module consists of nine submodules covering various topics on biomarker discovery and assessment and is deployed on the Google Cloud Platform and available for public use through the NIGMS Sandbox. The submodules are written as a series of Jupyter Notebooks utilizing R and Bioconductor for biomarker and omics data analysis. The submodules cover the following topics: 1) introduction to biomarkers; 2) introduction to R data structures; 3) introduction to linear models; 4) introduction to exploratory analysis; 5) rat renal ischemia-reperfusion injury case study; (6) linear and logistic regression for comparison of quantitative biomarkers; 7) exploratory analysis of proteomics IRI data; 8) identification of IRI biomarkers from proteomic data; and 9) machine learning methods for biomarker discovery. Each notebook includes an in-line quiz for self-assessment on the submodule topic and an overview video is available on YouTube (https://www.youtube.com/watch?v=2-Q9Ax8EW84). This manuscript describes the development of a resource module that is part of a learning platform named ``NIGMS Sandbox for Cloud-based Learning'' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox [1] at the beginning of this Supplement. This module delivers learning materials on the analysis of bulk and single-cell ATAC-seq data in an interactive format that uses appropriate cloud resources for data access and analyses.},
}
@article {pmid39041911,
year = {2024},
author = {Wilkins, OM and Campbell, R and Yosufzai, Z and Doe, V and Soucy, SM},
title = {Cloud-based introduction to BASH programming for biologists.},
journal = {Briefings in bioinformatics},
volume = {25},
number = {Supplement_1},
pages = {},
doi = {10.1093/bib/bbae244},
pmid = {39041911},
issn = {1477-4054},
support = {P20GM130454//National Institutes of General Medical Science/ ; },
mesh = {*Cloud Computing ; *Software ; *Computational Biology/methods ; Programming Languages ; High-Throughput Nucleotide Sequencing/methods ; Genomics/methods ; Humans ; },
abstract = {This manuscript describes the development of a resource module that is part of a learning platform named 'NIGMS Sandbox for Cloud-based Learning', https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial authored by National Institute of General Medical Sciences: NIGMS Sandbox: A Learning Platform toward Democratizing Cloud Computing for Biomedical Research at the beginning of this supplement. This module delivers learning materials introducing the utility of the BASH (Bourne Again Shell) programming language for genomic data analysis in an interactive format that uses appropriate cloud resources for data access and analyses. The next-generation sequencing revolution has generated massive amounts of novel biological data from a multitude of platforms that survey an ever-growing list of genomic modalities. These data require significant downstream computational and statistical analyses to glean meaningful biological insights. However, the skill sets required to generate these data are vastly different from the skills required to analyze these data. Bench scientists that generate next-generation data often lack the training required to perform analysis of these datasets and require support from bioinformatics specialists. Dedicated computational training is required to empower biologists in the area of genomic data analysis, however, learning to efficiently leverage a command line interface is a significant barrier in learning how to leverage common analytical tools. Cloud platforms have the potential to democratize access to the technical tools and computational resources necessary to work with modern sequencing data, providing an effective framework for bioinformatics education. This module aims to provide an interactive platform that slowly builds technical skills and knowledge needed to interact with genomics data on the command line in the Cloud. The sandbox format of this module enables users to move through the material at their own pace and test their grasp of the material with knowledge self-checks before building on that material in the next sub-module. This manuscript describes the development of a resource module that is part of a learning platform named ``NIGMS Sandbox for Cloud-based Learning'' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox [1] at the beginning of this Supplement. This module delivers learning materials on the analysis of bulk and single-cell ATAC-seq data in an interactive format that uses appropriate cloud resources for data access and analyses.},
}
@article {pmid39041910,
year = {2024},
author = {Veerappa, AM and Rowley, MJ and Maggio, A and Beaudry, L and Hawkins, D and Kim, A and Sethi, S and Sorgen, PL and Guda, C},
title = {CloudATAC: a cloud-based framework for ATAC-Seq data analysis.},
journal = {Briefings in bioinformatics},
volume = {25},
number = {Supplement_1},
pages = {},
doi = {10.1093/bib/bbae090},
pmid = {39041910},
issn = {1477-4054},
support = {NIH/NIGMS P20 GM103427//NOSI supplement to the parent IDeA Networks of Biomedical Research Excellence (INBRE) Program/ ; },
mesh = {*Cloud Computing ; *Software ; *High-Throughput Nucleotide Sequencing/methods ; Humans ; Computational Biology/methods ; Chromatin Immunoprecipitation Sequencing/methods ; Single-Cell Analysis/methods ; Chromatin/genetics/metabolism ; },
abstract = {Assay for transposase-accessible chromatin with high-throughput sequencing (ATAC-seq) generates genome-wide chromatin accessibility profiles, providing valuable insights into epigenetic gene regulation at both pooled-cell and single-cell population levels. Comprehensive analysis of ATAC-seq data involves the use of various interdependent programs. Learning the correct sequence of steps needed to process the data can represent a major hurdle. Selecting appropriate parameters at each stage, including pre-analysis, core analysis, and advanced downstream analysis, is important to ensure accurate analysis and interpretation of ATAC-seq data. Additionally, obtaining and working within a limited computational environment presents a significant challenge to non-bioinformatic researchers. Therefore, we present Cloud ATAC, an open-source, cloud-based interactive framework with a scalable, flexible, and streamlined analysis framework based on the best practices approach for pooled-cell and single-cell ATAC-seq data. These frameworks use on-demand computational power and memory, scalability, and a secure and compliant environment provided by the Google Cloud. Additionally, we leverage Jupyter Notebook's interactive computing platform that combines live code, tutorials, narrative text, flashcards, quizzes, and custom visualizations to enhance learning and analysis. Further, leveraging GPU instances has significantly improved the run-time of the single-cell framework. The source codes and data are publicly available through NIH Cloud lab https://github.com/NIGMS/ATAC-Seq-and-Single-Cell-ATAC-Seq-Analysis. This manuscript describes the development of a resource module that is part of a learning platform named ``NIGMS Sandbox for Cloud-based Learning'' https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox [1] at the beginning of this Supplement. This module delivers learning materials on the analysis of bulk and single-cell ATAC-seq data in an interactive format that uses appropriate cloud resources for data access and analyses.},
}
@article {pmid39040324,
year = {2024},
author = {Almalawi, A and Zafar, A and Unhelkar, B and Hassan, S and Alqurashi, F and Khan, AI and Fahad, A and Alam, MM},
title = {Enhancing security in smart healthcare systems: Using intelligent edge computing with a novel Salp Swarm Optimization and radial basis neural network algorithm.},
journal = {Heliyon},
volume = {10},
number = {13},
pages = {e33792},
pmid = {39040324},
issn = {2405-8440},
abstract = {A smart healthcare system (SHS) is a health service system that employs advanced technologies such as wearable devices, the Internet of Things (IoT), and mobile internet to dynamically access information and connect people and institutions related to healthcare, thereby actively managing and responding to medical ecosystem needs. Edge computing (EC) plays a significant role in SHS as it enables real-time data processing and analysis at the data source, which reduces latency and improves medical intervention speed. However, the integration of patient information, including electronic health records (EHRs), into the SHS framework induces security and privacy concerns. To address these issues, an intelligent EC framework was proposed in this study. The objective of this study is to accurately identify security threats and ensure secure data transmission in the SHS environment. The proposed EC framework leverages the effectiveness of Salp Swarm Optimization and Radial Basis Functional Neural Network (SS-RBFN) for enhancing security and data privacy. The proposed methodology commences with the collection of healthcare information, which is then pre-processed to ensure the consistency and quality of the database for further analysis. Subsequently, the SS-RBFN algorithm was trained using the pre-processed database to distinguish between normal and malicious data streams accurately, offering continuous monitoring in the SHS environment. Additionally, a Rivest-Shamir-Adelman (RSA) approach was applied to safeguard data against security threats during transmission to cloud storage. The proposed model was trained and validated using the IoT-based healthcare database available at Kaggle, and the experimental results demonstrated that it achieved 99.87 % accuracy, 99.76 % precision, 99.49 % f-measure, 98.99 % recall, 97.37 % throughput, and 1.2s latency. Furthermore, the results achieved by the proposed model were compared with the existing models to validate its effectiveness in enhancing security.},
}
@article {pmid39038028,
year = {2024},
author = {Pulido-Gaytan, B and Tchernykh, A},
title = {Self-learning activation functions to increase accuracy of privacy-preserving Convolutional Neural Networks with homomorphic encryption.},
journal = {PloS one},
volume = {19},
number = {7},
pages = {e0306420},
doi = {10.1371/journal.pone.0306420},
pmid = {39038028},
issn = {1932-6203},
mesh = {*Neural Networks, Computer ; *Computer Security ; *Privacy ; Humans ; Algorithms ; Cloud Computing ; },
abstract = {The widespread adoption of cloud computing necessitates privacy-preserving techniques that allow information to be processed without disclosure. This paper proposes a method to increase the accuracy and performance of privacy-preserving Convolutional Neural Networks with Homomorphic Encryption (CNN-HE) by Self-Learning Activation Functions (SLAF). SLAFs are polynomials with trainable coefficients updated during training, together with synaptic weights, for each polynomial independently to learn task-specific and CNN-specific features. We theoretically prove its feasibility to approximate any continuous activation function to the desired error as a function of the SLAF degree. Two CNN-HE models are proposed: CNN-HE-SLAF and CNN-HE-SLAF-R. In the first model, all activation functions are replaced by SLAFs, and CNN is trained to find weights and coefficients. In the second one, CNN is trained with the original activation, then weights are fixed, activation is substituted by SLAF, and CNN is shortly re-trained to adapt SLAF coefficients. We show that such self-learning can achieve the same accuracy 99.38% as a non-polynomial ReLU over non-homomorphic CNNs and lead to an increase in accuracy (99.21%) and higher performance (6.26 times faster) than the state-of-the-art CNN-HE CryptoNets on the MNIST optical character recognition benchmark dataset.},
}
@article {pmid39028603,
year = {2024},
author = {Luo, W and Huang, K and Liang, X and Ren, H and Zhou, N and Zhang, C and Yang, C and Gui, W},
title = {Process Manufacturing Intelligence Empowered by Industrial Metaverse: A Survey.},
journal = {IEEE transactions on cybernetics},
volume = {PP},
number = {},
pages = {},
doi = {10.1109/TCYB.2024.3420958},
pmid = {39028603},
issn = {2168-2275},
abstract = {The intelligent goal of process manufacturing is to achieve high efficiency and greening of the entire production. Whereas the information system it used is functionally independent, resulting to knowledge gaps between each level. Decision-making still requires lots of knowledge workers making manually. The industrial metaverse is a necessary means to bridge the knowledge gaps by sharing and collaborative decision-making. Considering the safety and stability requirements of the process manufacturing, this article conducts a thorough survey on the process manufacturing intelligence empowered by industrial metaverse. First, it analyzes the current status and challenges of process manufacturing intelligence, and then summarizes the latest developments about key enabling technologies of industrial metaverse, such as interconnection technologies, artificial intelligence, cloud-edge computing, digital twin (DT), immersive interaction, and blockchain technology. On this basis, taking into account the characteristics of process manufacturing, a construction approach and architecture for the process industrial metaverse is proposed: a virtual-real fused industrial metaverse construction method that combines DTs with physical avatar, which can effectively ensure the safety of metaverse's application in industrial scenarios. Finally, we conducted preliminary exploration and research, to prove the feasibility of proposed method.},
}
@article {pmid39024163,
year = {2024},
author = {McCoy, ES and Park, SK and Patel, RP and Ryan, DF and Mullen, ZJ and Nesbitt, JJ and Lopez, JE and Taylor-Blake, B and Vanden, KA and Krantz, JL and Hu, W and Garris, RL and Snyder, MG and Lima, LV and Sotocinal, SG and Austin, JS and Kashlan, AD and Shah, S and Trocinski, AK and Pudipeddi, SS and Major, RM and Bazick, HO and Klein, MR and Mogil, JS and Wu, G and Zylka, MJ},
title = {Development of PainFace software to simplify, standardize, and scale up mouse grimace analyses.},
journal = {Pain},
volume = {165},
number = {8},
pages = {1793-1805},
doi = {10.1097/j.pain.0000000000003187},
pmid = {39024163},
issn = {1872-6623},
support = {R01NS114259//National Institute of Neurological Disorders and Stroke, National Science Foundation/ ; },
mesh = {Animals ; Mice ; *Facial Expression ; Female ; *Software/standards ; *Mice, Inbred C57BL ; *Pain Measurement/methods/standards ; Male ; Pain/diagnosis ; },
abstract = {Facial grimacing is used to quantify spontaneous pain in mice and other mammals, but scoring relies on humans with different levels of proficiency. Here, we developed a cloud-based software platform called PainFace (http://painface.net) that uses machine learning to detect 4 facial action units of the mouse grimace scale (orbitals, nose, ears, whiskers) and score facial grimaces of black-coated C57BL/6 male and female mice on a 0 to 8 scale. Platform accuracy was validated in 2 different laboratories, with 3 conditions that evoke grimacing-laparotomy surgery, bilateral hindpaw injection of carrageenan, and intraplantar injection of formalin. PainFace can generate up to 1 grimace score per second from a standard 30 frames/s video, making it possible to quantify facial grimacing over time, and operates at a speed that scales with computing power. By analyzing the frequency distribution of grimace scores, we found that mice spent 7x more time in a "high grimace" state following laparotomy surgery relative to sham surgery controls. Our study shows that PainFace reproducibly quantifies facial grimaces indicative of nonevoked spontaneous pain and enables laboratories to standardize and scale-up facial grimace analyses.},
}
@article {pmid39022436,
year = {2024},
author = {Malakhov, KS},
title = {Innovative Hybrid Cloud Solutions for Physical Medicine and Telerehabilitation Research.},
journal = {International journal of telerehabilitation},
volume = {16},
number = {1},
pages = {e6635},
pmid = {39022436},
issn = {1945-2020},
abstract = {PURPOSE: The primary objective of this study was to develop and implement a Hybrid Cloud Environment for Telerehabilitation (HCET) to enhance patient care and research in the Physical Medicine and Rehabilitation (PM&R) domain. This environment aims to integrate advanced information and communication technologies to support both traditional in-person therapy and digital health solutions.
BACKGROUND: Telerehabilitation is emerging as a core component of modern healthcare, especially within the PM&R field. By applying digital health technologies, telerehabilitation provides continuous, comprehensive support for patient rehabilitation, bridging the gap between traditional therapy, and remote healthcare delivery. This study focuses on the design, and implementation of a hybrid HCET system tailored for the PM&R domain.
METHODS: The study involved the development of a comprehensive architectural and structural organization for the HCET, including a three-layer model (infrastructure, platform, service layers). Core components of the HCET were designed and implemented, such as the Hospital Information System (HIS) for PM&R, the MedRehabBot system, and the MedLocalGPT project. These components were integrated using advanced technologies like large language models (LLMs), word embeddings, and ontology-related approaches, along with APIs for enhanced functionality and interaction.
FINDINGS: The HCET system was successfully implemented and is operational, providing a robust platform for telerehabilitation. Key features include the MVP of the HIS for PM&R, supporting patient profile management, and rehabilitation goal tracking; the MedRehabBot and WhiteBookBot systems; and the MedLocalGPT project, which offers sophisticated querying capabilities, and access to extensive domain-specific knowledge. The system supports both Ukrainian and English languages, ensuring broad accessibility and usability.
INTERPRETATION: The practical implementation, and operation of the HCET system demonstrate its potential to transform telerehabilitation within the PM&R domain. By integrating advanced technologies, and providing comprehensive digital health solutions, the HCET enhances patient care, supports ongoing rehabilitation, and facilitates advanced research. Future work will focus on optimizing services and expanding language support to further improve the system's functionality and impact.},
}
@article {pmid39016361,
year = {2024},
author = {Idalino, FD and Rosa, KKD and Hillebrand, FL and Arigony-Neto, J and Mendes, CW and Simões, JC},
title = {Variability in wet and dry snow radar zones in the North of the Antarctic Peninsula using a cloud computing environment.},
journal = {Anais da Academia Brasileira de Ciencias},
volume = {96},
number = {suppl 2},
pages = {e20230704},
doi = {10.1590/0001-3765202420230704},
pmid = {39016361},
issn = {1678-2690},
mesh = {Antarctic Regions ; *Snow ; *Radar ; *Cloud Computing ; Seasons ; Environmental Monitoring/methods ; Temperature ; },
abstract = {This work investigated the annual variations in dry snow (DSRZ) and wet snow radar zones (WSRZ) in the north of the Antarctic Peninsula between 2015-2023. A specific code for snow zone detection on Sentinel-1 images was created on Google Earth Engine by combining the CryoSat-2 digital elevation model and air temperature data from ERA5. Regions with backscatter coefficients (σ[0]) values exceeding -6.5 dB were considered the extent of surface melt occurrence, and the dry snow line was considered to coincide with the -11 °C isotherm of the average annual air temperature. The annual variation in WSRZ exhibited moderate correlations with annual average air temperature, total precipitation, and the sum of annual degree-days. However, statistical tests indicated low determination coefficients and no significant trend values in DSRZ behavior with atmospheric variables. The results of reducing DSRZ area for 2019/2020 and 2020/2021 compared to 2018/2018 indicated the upward in dry zone line in this AP region. The methodology demonstrated its efficacy for both quantitative and qualitative analyses of data obtained in digital processing environments, allowing for the large-scale spatial and temporal variations monitoring and for the understanding changes in glacier mass loss.},
}
@article {pmid39008420,
year = {2024},
author = {Lee, G and Connor, CW},
title = {"Alexa, Cycle The Blood Pressure": A Voice Control Interface Method for Anesthesia Monitoring.},
journal = {Anesthesia and analgesia},
volume = {},
number = {},
pages = {},
doi = {10.1213/ANE.0000000000007003},
pmid = {39008420},
issn = {1526-7598},
abstract = {BACKGROUND: Anesthesia monitors and devices are usually controlled with some combination of dials, keypads, a keyboard, or a touch screen. Thus, anesthesiologists can operate their monitors only when they are physically close to them, and not otherwise task-loaded with sterile procedures such as line or block placement. Voice recognition technology has become commonplace and may offer advantages in anesthesia practice such as reducing surface contamination rates and allowing anesthesiologists to effect changes in monitoring and therapy when they would otherwise presently be unable to do so. We hypothesized that this technology is practicable and that anesthesiologists would consider it useful.
METHODS: A novel voice-driven prototype controller was designed for the GE Solar 8000M anesthesia patient monitor. The apparatus was implemented using a Raspberry Pi 4 single-board computer, an external conference audio device, a Google Cloud Speech-to-Text platform, and a modified Solar controller to effect commands. Fifty anesthesia providers tested the prototype. Evaluations and surveys were completed in a nonclinical environment to avoid any ethical or safety concerns regarding the use of the device in direct patient care. All anesthesiologists sampled were fluent English speakers; many with inflections from their first language or national origin, reflecting diversity in the population of practicing anesthesiologists.
RESULTS: The prototype was uniformly well-received by anesthesiologists. Ease-of-use, usefulness, and effectiveness were assessed on a Likert scale with means of 9.96, 7.22, and 8.48 of 10, respectively. No population cofactors were associated with these results. Advancing level of training (eg, nonattending versus attending) was not correlated with any preference. Accent of country or region was not correlated with any preference. Vocal pitch register did not correlate with any preference. Statistical analyses were performed with analysis of variance and the unpaired t-test.
CONCLUSIONS: The use of voice recognition to control operating room monitors was well-received anesthesia providers. Additional commands are easily implemented on the prototype controller. No adverse relationship was found between acceptability and level of anesthesia experience, pitch of voice, or presence of accent. Voice recognition is a promising method of controlling anesthesia monitors and devices that could potentially increase usability and situational awareness in circumstances where the anesthesiologist is otherwise out-of-position or task-loaded.},
}
@article {pmid39007702,
year = {2024},
author = {Hsu, WT and Shirts, MR},
title = {Replica Exchange of Expanded Ensembles: A Generalized Ensemble Approach with Enhanced Flexibility and Parallelizability.},
journal = {Journal of chemical theory and computation},
volume = {},
number = {},
pages = {},
doi = {10.1021/acs.jctc.4c00484},
pmid = {39007702},
issn = {1549-9626},
abstract = {Generalized ensemble methods such as Hamiltonian replica exchange (HREX) and expanded ensemble (EE) have been shown effective in free energy calculations for various contexts, given their ability to circumvent free energy barriers via nonphysical pathways defined by states with different modified Hamiltonians. However, both HREX and EE methods come with drawbacks, such as limited flexibility in parameter specification or the lack of parallelizability for more complicated applications. To address this challenge, we present the method of replica exchange of expanded ensembles (REXEE), which integrates the principles of HREX and EE methods by periodically exchanging coordinates of EE replicas sampling different yet overlapping sets of alchemical states. With the solvation free energy calculation of anthracene and binding free energy calculation of the CB7-10 binding complex, we show that the REXEE method achieves the same level of accuracy in free energy calculations as the HREX and EE methods, while offering enhanced flexibility and parallelizability. Additionally, we examined REXEE simulations with various setups to understand how different exchange frequencies and replica configurations influence the sampling efficiency in the fixed-weight phase and the weight convergence in the weight-updating phase. The REXEE approach can be further extended to support asynchronous parallelization schemes, allowing looser communications between larger numbers of loosely coupled processors such as cloud computing and therefore promising much more scalable and adaptive executions of alchemical free energy calculations. All algorithms for the REXEE method are available in the Python package ensemble_md, which offers an interface for REXEE simulation management without modifying the source code in GROMACS.},
}
@article {pmid39003319,
year = {2024},
author = {Nyangaresi, VO and Abduljabbar, ZA and Mutlaq, KA and Bulbul, SS and Ma, J and Aldarwish, AJY and Honi, DG and Al Sibahee, MA and Neamah, HA},
title = {Smart city energy efficient data privacy preservation protocol based on biometrics and fuzzy commitment scheme.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {16223},
pmid = {39003319},
issn = {2045-2322},
support = {GDRC202132//Natural Science Foundation of Top Talent of SZTU/ ; },
abstract = {Advancements in cloud computing, flying ad-hoc networks, wireless sensor networks, artificial intelligence, big data, 5th generation mobile network and internet of things have led to the development of smart cities. Owing to their massive interconnectedness, high volumes of data are collected and exchanged over the public internet. Therefore, the exchanged messages are susceptible to numerous security and privacy threats across these open public channels. Although many security techniques have been designed to address this issue, most of them are still vulnerable to attacks while some deploy computationally extensive cryptographic operations such as bilinear pairings and blockchain. In this paper, we leverage on biometrics, error correction codes and fuzzy commitment schemes to develop a secure and energy efficient authentication scheme for the smart cities. This is informed by the fact that biometric data is cumbersome to reproduce and hence attacks such as side-channeling are thwarted. We formally analyze the security of our protocol using the Burrows-Abadi-Needham logic logic, which shows that our scheme achieves strong mutual authentication among the communicating entities. The semantic analysis of our protocol shows that it mitigates attacks such as de-synchronization, eavesdropping, session hijacking, forgery and side-channeling. In addition, its formal security analysis demonstrates that it is secure under the Canetti and Krawczyk attack model. In terms of performance, our scheme is shown to reduce the computation overheads by 20.7% and hence is the most efficient among the state-of-the-art protocols.},
}
@article {pmid39001087,
year = {2024},
author = {Alwakeel, AM and Alnaim, AK},
title = {Trust Management and Resource Optimization in Edge and Fog Computing Using the CyberGuard Framework.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {13},
pages = {},
doi = {10.3390/s24134308},
pmid = {39001087},
issn = {1424-8220},
support = {XXXXXX//King Faisal University/ ; },
abstract = {The growing importance of edge and fog computing in the modern IT infrastructure is driven by the rise of decentralized applications. However, resource allocation within these frameworks is challenging due to varying device capabilities and dynamic network conditions. Conventional approaches often result in poor resource use and slowed advancements. This study presents a novel strategy for enhancing resource allocation in edge and fog computing by integrating machine learning with the blockchain for reliable trust management. Our proposed framework, called CyberGuard, leverages the blockchain's inherent immutability and decentralization to establish a trustworthy and transparent network for monitoring and verifying edge and fog computing transactions. CyberGuard combines the Trust2Vec model with conventional machine-learning models like SVM, KNN, and random forests, creating a robust mechanism for assessing trust and security risks. Through detailed optimization and case studies, CyberGuard demonstrates significant improvements in resource allocation efficiency and overall system performance in real-world scenarios. Our results highlight CyberGuard's effectiveness, evidenced by a remarkable accuracy, precision, recall, and F1-score of 98.18%, showcasing the transformative potential of our comprehensive approach in edge and fog computing environments.},
}
@article {pmid39001032,
year = {2024},
author = {Alwakeel, AM and Alnaim, AK},
title = {Network Slicing in 6G: A Strategic Framework for IoT in Smart Cities.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {13},
pages = {},
doi = {10.3390/s24134254},
pmid = {39001032},
issn = {1424-8220},
support = {000000//King Faisal University/ ; },
abstract = {The emergence of 6G communication technologies brings both opportunities and challenges for the Internet of Things (IoT) in smart cities. In this paper, we introduce an advanced network slicing framework designed to meet the complex demands of 6G smart cities' IoT deployments. The framework development follows a detailed methodology that encompasses requirement analysis, metric formulation, constraint specification, objective setting, mathematical modeling, configuration optimization, performance evaluation, parameter tuning, and validation of the final design. Our evaluations demonstrate the framework's high efficiency, evidenced by low round-trip time (RTT), minimal packet loss, increased availability, and enhanced throughput. Notably, the framework scales effectively, managing multiple connections simultaneously without compromising resource efficiency. Enhanced security is achieved through robust features such as 256-bit encryption and a high rate of authentication success. The discussion elaborates on these findings, underscoring the framework's impressive performance, scalability, and security capabilities.},
}
@article {pmid39000973,
year = {2024},
author = {Shahid, U and Ahmed, G and Siddiqui, S and Shuja, J and Balogun, AO},
title = {Latency-Sensitive Function Placement among Heterogeneous Nodes in Serverless Computing.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {13},
pages = {},
doi = {10.3390/s24134195},
pmid = {39000973},
issn = {1424-8220},
support = {015LA0-049//Universiti Teknologi Petronas/ ; },
abstract = {Function as a Service (FaaS) is highly beneficial to smart city infrastructure due to its flexibility, efficiency, and adaptability, specifically for integration in the digital landscape. FaaS has serverless setup, which means that an organization no longer has to worry about specific infrastructure management tasks; the developers can focus on how to deploy and create code efficiently. Since FaaS aligns well with the IoT, it easily integrates with IoT devices, thereby making it possible to perform event-based actions and real-time computations. In our research, we offer an exclusive likelihood-based model of adaptive machine learning for identifying the right place of function. We employ the XGBoost regressor to estimate the execution time for each function and utilize the decision tree regressor to predict network latency. By encompassing factors like network delay, arrival computation, and emphasis on resources, the machine learning model eases the selection process of a placement. In replication, we use Docker containers, focusing on serverless node type, serverless node variety, function location, deadlines, and edge-cloud topology. Thus, the primary objectives are to address deadlines and enhance the use of any resource, and from this, we can see that effective utilization of resources leads to enhanced deadline compliance.},
}
@article {pmid39000960,
year = {2024},
author = {Liu, X and Dong, X and Jia, N and Zhao, W},
title = {Federated Learning-Oriented Edge Computing Framework for the IIoT.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {13},
pages = {},
doi = {10.3390/s24134182},
pmid = {39000960},
issn = {1424-8220},
support = {2022YFB3305700//The National Key Research and Development Program of China/ ; },
abstract = {With the maturity of artificial intelligence (AI) technology, applications of AI in edge computing will greatly promote the development of industrial technology. However, the existing studies on the edge computing framework for the Industrial Internet of Things (IIoT) still face several challenges, such as deep hardware and software coupling, diverse protocols, difficult deployment of AI models, insufficient computing capabilities of edge devices, and sensitivity to delay and energy consumption. To solve the above problems, this paper proposes a software-defined AI-oriented three-layer IIoT edge computing framework and presents the design and implementation of an AI-oriented edge computing system, aiming to support device access, enable the acceptance and deployment of AI models from the cloud, and allow the whole process from data acquisition to model training to be completed at the edge. In addition, this paper proposes a time series-based method for device selection and computation offloading in the federated learning process, which selectively offloads the tasks of inefficient nodes to the edge computing center to reduce the training delay and energy consumption. Finally, experiments carried out to verify the feasibility and effectiveness of the proposed method are reported. The model training time with the proposed method is generally 30% to 50% less than that with the random device selection method, and the training energy consumption under the proposed method is generally 35% to 55% less.},
}
@article {pmid38998801,
year = {2024},
author = {Zuo, G and Wang, R and Wan, C and Zhang, Z and Zhang, S and Yang, W},
title = {Unveiling the Evolution of Virtual Reality in Medicine: A Bibliometric Analysis of Research Hotspots and Trends over the Past 12 Years.},
journal = {Healthcare (Basel, Switzerland)},
volume = {12},
number = {13},
pages = {},
doi = {10.3390/healthcare12131266},
pmid = {38998801},
issn = {2227-9032},
support = {SZSM202311012//Sanming Project of Medicine in Shenzen Municipality/ ; },
abstract = {BACKGROUND: Virtual reality (VR), widely used in the medical field, may affect future medical training and treatment. Therefore, this study examined VR's potential uses and research directions in medicine.
METHODS: Citation data were downloaded from the Web of Science Core Collection database (WoSCC) to evaluate VR in medicine in articles published between 1 January 2012 and 31 December 2023. These data were analyzed using CiteSpace 6.2. R2 software. Present limitations and future opportunities were summarized based on the data.
RESULTS: A total of 2143 related publications from 86 countries and regions were analyzed. The country with the highest number of publications is the USA, with 461 articles. The University of London has the most publications among institutions, with 43 articles. The burst keywords represent the research frontier from 2020 to 2023, such as "task analysis", "deep learning", and "machine learning".
CONCLUSION: The number of publications on VR applications in the medical field has been steadily increasing year by year. The USA is the leading country in this area, while the University of London stands out as the most published, and most influential institution. Currently, there is a strong focus on integrating VR and AI to address complex issues such as medical education and training, rehabilitation, and surgical navigation. Looking ahead, the future trend involves integrating VR, augmented reality (AR), and mixed reality (MR) with the Internet of Things (IoT), wireless sensor networks (WSNs), big data analysis (BDA), and cloud computing (CC) technologies to develop intelligent healthcare systems within hospitals or medical centers.},
}
@article {pmid38997128,
year = {2024},
author = {Allers, S and O'Connell, KA and Carlson, T and Belardo, D and King, BL},
title = {Reusable tutorials for using cloud-based computing environments for the analysis of bacterial gene expression data from bulk RNA sequencing.},
journal = {Briefings in bioinformatics},
volume = {25},
number = {4},
pages = {},
doi = {10.1093/bib/bbae301},
pmid = {38997128},
issn = {1477-4054},
support = {P20GM103423//National Institute of General Medical Sciences of the National Institutes of Health to the Maine INBRE Program/ ; },
mesh = {*Cloud Computing ; *Computational Biology/methods ; *Sequence Analysis, RNA/methods ; *Software ; Gene Expression Regulation, Bacterial ; },
abstract = {This manuscript describes the development of a resource module that is part of a learning platform named "NIGMS Sandbox for Cloud-based Learning" https://github.com/NIGMS/NIGMS-Sandbox. The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox at the beginning of this Supplement. This module delivers learning materials on RNA sequencing (RNAseq) data analysis in an interactive format that uses appropriate cloud resources for data access and analyses. Biomedical research is increasingly data-driven, and dependent upon data management and analysis methods that facilitate rigorous, robust, and reproducible research. Cloud-based computing resources provide opportunities to broaden the application of bioinformatics and data science in research. Two obstacles for researchers, particularly those at small institutions, are: (i) access to bioinformatics analysis environments tailored to their research; and (ii) training in how to use Cloud-based computing resources. We developed five reusable tutorials for bulk RNAseq data analysis to address these obstacles. Using Jupyter notebooks run on the Google Cloud Platform, the tutorials guide the user through a workflow featuring an RNAseq dataset from a study of prophage altered drug resistance in Mycobacterium chelonae. The first tutorial uses a subset of the data so users can learn analysis steps rapidly, and the second uses the entire dataset. Next, a tutorial demonstrates how to analyze the read count data to generate lists of differentially expressed genes using R/DESeq2. Additional tutorials generate read counts using the Snakemake workflow manager and Nextflow with Google Batch. All tutorials are open-source and can be used as templates for other analysis.},
}
@article {pmid38992079,
year = {2024},
author = {Kaur, R and Vaithiyanathan, R},
title = {Hybrid YSGOA and neural networks based software failure prediction in cloud systems.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {16035},
pmid = {38992079},
issn = {2045-2322},
abstract = {In the realm of cloud computing, ensuring the dependability and robustness of software systems is paramount. The intricate and evolving nature of cloud infrastructures, however, presents substantial obstacles in the pre-emptive identification and rectification of software anomalies. This study introduces an innovative methodology that amalgamates hybrid optimization algorithms with Neural Networks (NN) to refine the prediction of software malfunctions. The core objective is to augment the purity metric of our method across diverse operational conditions. This is accomplished through the utilization of two distinct optimization algorithms: the Yellow Saddle Goat Fish Algorithm (YSGA), which is instrumental in the discernment of pivotal features linked to software failures, and the Grasshopper Optimization Algorithm (GOA), which further polishes the feature compilation. These features are then processed by Neural Networks (NN), capitalizing on their proficiency in deciphering intricate data patterns and interconnections. The NNs are integral to the classification of instances predicated on the ascertained features. Our evaluation, conducted using the Failure-Dataset-OpenStack database and MATLAB Software, demonstrates that the hybrid optimization strategy employed for feature selection significantly curtails complexity and expedites processing.},
}
@article {pmid38988330,
year = {2024},
author = {Martinez, C and Etxaniz, I and Molinuevo, A and Alonso, J},
title = {MEDINA Catalogue of Cloud Security controls and metrics: Towards Continuous Cloud Security compliance.},
journal = {Open research Europe},
volume = {4},
number = {},
pages = {90},
pmid = {38988330},
issn = {2732-5121},
abstract = {In order to address current challenges on security certification of European ICT products, processes and services, the European Comission, through ENISA (European Union Agency for Cybersecurity), has developed the European Cybersecurity Certification Scheme for Cloud Services (EUCS). This paper presents the overview of the H2020 MEDINA project approach and tools to support the adoption of EUCS and offers a detailed description of one of the core components of the framework, the MEDINA Catalogue of Controls and Metrics. The main objective of the MEDINA Catalogue is to provide automated functionalities for CSPs' compliance managers and auditors to ease the certification process towards EUCS, through the provision of all information and guidance related to the scheme, namely categories, controls, security requirements, assurance levels, etc. The tool has been enhanced with all the research and implementation works performed in MEDINA, such as definition of compliance metrics, suggestion of related implementation guidelines, alignment of similar controls in other schemes, and a set of self-assessment questionnaires, which are presented and discussed in this paper.},
}
@article {pmid38983206,
year = {2024},
author = {Alsadie, D},
title = {Advancements in heuristic task scheduling for IoT applications in fog-cloud computing: challenges and prospects.},
journal = {PeerJ. Computer science},
volume = {10},
number = {},
pages = {e2128},
pmid = {38983206},
issn = {2376-5992},
abstract = {Fog computing has emerged as a prospective paradigm to address the computational requirements of IoT applications, extending the capabilities of cloud computing to the network edge. Task scheduling is pivotal in enhancing energy efficiency, optimizing resource utilization and ensuring the timely execution of tasks within fog computing environments. This article presents a comprehensive review of the advancements in task scheduling methodologies for fog computing systems, covering priority-based, greedy heuristics, metaheuristics, learning-based, hybrid heuristics, and nature-inspired heuristic approaches. Through a systematic analysis of relevant literature, we highlight the strengths and limitations of each approach and identify key challenges facing fog computing task scheduling, including dynamic environments, heterogeneity, scalability, resource constraints, security concerns, and algorithm transparency. Furthermore, we propose future research directions to address these challenges, including the integration of machine learning techniques for real-time adaptation, leveraging federated learning for collaborative scheduling, developing resource-aware and energy-efficient algorithms, incorporating security-aware techniques, and advancing explainable AI methodologies. By addressing these challenges and pursuing these research directions, we aim to facilitate the development of more robust, adaptable, and efficient task-scheduling solutions for fog computing environments, ultimately fostering trust, security, and sustainability in fog computing systems and facilitating their widespread adoption across diverse applications and domains.},
}
@article {pmid38980280,
year = {2024},
author = {Chen, C and Nguyen, DT and Lee, SJ and Baker, NA and Karakoti, AS and Lauw, L and Owen, C and Mueller, KT and Bilodeau, BA and Murugesan, V and Troyer, M},
title = {Accelerating Computational Materials Discovery with Machine Learning and Cloud High-Performance Computing: from Large-Scale Screening to Experimental Validation.},
journal = {Journal of the American Chemical Society},
volume = {},
number = {},
pages = {},
doi = {10.1021/jacs.4c03849},
pmid = {38980280},
issn = {1520-5126},
abstract = {High-throughput computational materials discovery has promised significant acceleration of the design and discovery of new materials for many years. Despite a surge in interest and activity, the constraints imposed by large-scale computational resources present a significant bottleneck. Furthermore, examples of very large-scale computational discovery carried out through experimental validation remain scarce, especially for materials with product applicability. Here, we demonstrate how this vision became reality by combining state-of-the-art machine learning (ML) models and traditional physics-based models on cloud high-performance computing (HPC) resources to quickly navigate through more than 32 million candidates and predict around half a million potentially stable materials. By focusing on solid-state electrolytes for battery applications, our discovery pipeline further identified 18 promising candidates with new compositions and rediscovered a decade's worth of collective knowledge in the field as a byproduct. We then synthesized and experimentally characterized the structures and conductivities of our top candidates, the NaxLi3-xYCl6 (0≤ x≤ 3) series, demonstrating the potential of these compounds to serve as solid electrolytes. Additional candidate materials that are currently under experimental investigation could offer more examples of the computational discovery of new phases of Li- and Na-conducting solid electrolytes. The showcased screening of millions of materials candidates highlights the transformative potential of advanced ML and HPC methodologies, propelling materials discovery into a new era of efficiency and innovation.},
}
@article {pmid38975754,
year = {2024},
author = {Kumar, A and Verma, G},
title = {Multi-level authentication for security in cloud using improved quantum key distribution.},
journal = {Network (Bristol, England)},
volume = {},
number = {},
pages = {1-21},
doi = {10.1080/0954898X.2024.2367480},
pmid = {38975754},
issn = {1361-6536},
abstract = {Cloud computing is an on-demand virtual-based technology to develop, configure, and modify applications online through the internet. It enables the users to handle various operations such as storage, back-up, and recovery of data, data analysis, delivery of software applications, implementation of new services and applications, hosting websites and blogs, and streaming of audio and video files. Thereby, it provides us many benefits although it is backlashed due to problems related to cloud security like data leakage, data loss, cyber attacks, etc. To address the security concerns, researchers have developed a variety of authentication mechanisms. This means that the authentication procedure used in the suggested method is multi-levelled. As a result, a better QKD method is offered to strengthen cloud security against different types of security risks. Key generation for enhanced QKD is based on the ABE public key cryptography approach. Here, an approach named CPABE is used in improved QKD. The Improved QKD scored the reduced KCA attack ratings of 0.3193, this is superior to CMMLA (0.7915), CPABE (0.8916), AES (0.5277), Blowfish (0.6144), and ECC (0.4287), accordingly. Finally, this multi-level authentication using an improved QKD approach is analysed under various measures and validates the enhancement over the state-of-the-art models.},
}
@article {pmid38975165,
year = {2024},
author = {Yan, L and Wang, G and Feng, H and Liu, P and Gao, H and Zhang, W and Hu, H and Pan, F},
title = {Efficient and accountable anti-leakage attribute-based encryption scheme for cloud storage.},
journal = {Heliyon},
volume = {10},
number = {12},
pages = {e32404},
doi = {10.1016/j.heliyon.2024.e32404},
pmid = {38975165},
issn = {2405-8440},
abstract = {To ensure secure and flexible data sharing in cloud storage, attribute-based encryption (ABE) is introduced to meet the requirements of fine-grained access control and secure one-to-many data sharing. However, the computational burden imposed by attribute encryption renders it unsuitable for resource-constrained environments such as the Internet of Things (IoT) and edge computing. Furthermore, the issue of accountability for illegal keys is crucial, as authorized users may actively disclose or sell authorization keys for personal gain, and keys may also passively leak due to management negligence or hacking incidents. Additionally, since all authorization keys are generated by the attribute authorization center, there is a potential risk of unauthorized key forgery. In response to these challenges, this paper proposes an efficient and accountable leakage-resistant scheme based on attribute encryption. The scheme adopts more secure online/offline encryption mechanisms and cloud server-assisted decryption to alleviate the computational burden on resource-constrained devices. For illegal keys, the scheme supports accountability for both users and the authorization center, allowing the revocation of decryption privileges for malicious users. In the case of passively leaked keys, timely key updates and revocation of decryption capabilities for leaked keys are implemented. Finally, the paper provides selective security and accountability proofs for the scheme under standard models. Efficiency analysis and experimental results demonstrate that the proposed scheme enhances encryption/decryption efficiency, and the storage overhead for accountability is also extremely low.},
}
@article {pmid38965311,
year = {2024},
author = {Khazali, M},
title = {Universal terminal for cloud quantum computing.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {15412},
pmid = {38965311},
issn = {2045-2322},
abstract = {To bring the quantum computing capacities to the personal edge devices, the optimum approach is to have simple non-error-corrected personal devices that offload the computational tasks to scalable quantum computers via edge servers with cryogenic components and fault-tolerant schemes. Hence the network elements deploy different encoding protocols. This article proposes quantum terminals that are compatible with different encoding protocols; paving the way for realizing mobile edge-quantum computing. By accommodating the atomic lattice processor inside a cavity, the entangling mechanism is provided by the Rydberg cavity-QED technology. The auxiliary atom, responsible for photon emission, senses the logical qubit state via the long-range Rydberg interaction. In other words, the state of logical qubit determines the interaction-induced level-shift at the central atom and hence derives the system over distinguished eigenstates, featuring photon emission at the early or late times controlled by quantum interference. Applying an entanglement-swapping gate on two emitted photons would make the far-separated logical qubits entangled regardless of their encoding protocols. The proposed scheme provides a universal photonic interface for clustering the processors and connecting them with the quantum memories and quantum cloud compatible with different encoding formats.},
}
@article {pmid38965235,
year = {2024},
author = {Edfeldt, K and Edwards, AM and Engkvist, O and Günther, J and Hartley, M and Hulcoop, DG and Leach, AR and Marsden, BD and Menge, A and Misquitta, L and Müller, S and Owen, DR and Schütt, KT and Skelton, N and Steffen, A and Tropsha, A and Vernet, E and Wang, Y and Wellnitz, J and Willson, TM and Clevert, DA and Haibe-Kains, B and Schiavone, LH and Schapira, M},
title = {A data science roadmap for open science organizations engaged in early-stage drug discovery.},
journal = {Nature communications},
volume = {15},
number = {1},
pages = {5640},
pmid = {38965235},
issn = {2041-1723},
support = {RGPIN-2019-04416//Canadian Network for Research and Innovation in Machining Technology, Natural Sciences and Engineering Research Council of Canada (NSERC Canadian Network for Research and Innovation in Machining Technology)/ ; },
mesh = {*Drug Discovery/methods ; *Machine Learning ; *Data Science/methods ; Humans ; Artificial Intelligence ; Information Dissemination/methods ; Data Mining/methods ; Cloud Computing ; Databases, Factual ; },
abstract = {The Structural Genomics Consortium is an international open science research organization with a focus on accelerating early-stage drug discovery, namely hit discovery and optimization. We, as many others, believe that artificial intelligence (AI) is poised to be a main accelerator in the field. The question is then how to best benefit from recent advances in AI and how to generate, format and disseminate data to enable future breakthroughs in AI-guided drug discovery. We present here the recommendations of a working group composed of experts from both the public and private sectors. Robust data management requires precise ontologies and standardized vocabulary while a centralized database architecture across laboratories facilitates data integration into high-value datasets. Lab automation and opening electronic lab notebooks to data mining push the boundaries of data sharing and data modeling. Important considerations for building robust machine-learning models include transparent and reproducible data processing, choosing the most relevant data representation, defining the right training and test sets, and estimating prediction uncertainty. Beyond data-sharing, cloud-based computing can be harnessed to build and disseminate machine-learning models. Important vectors of acceleration for hit and chemical probe discovery will be (1) the real-time integration of experimental data generation and modeling workflows within design-make-test-analyze (DMTA) cycles openly, and at scale and (2) the adoption of a mindset where data scientists and experimentalists work as a unified team, and where data science is incorporated into the experimental design.},
}
@article {pmid38962905,
year = {2024},
author = {Li, F and Lv, K and Liu, X and Zhou, Y and Liu, K},
title = {Accurately Computing the Interacted Volume of Molecules over Their 3D Mesh Models.},
journal = {Journal of chemical information and modeling},
volume = {},
number = {},
pages = {},
doi = {10.1021/acs.jcim.4c00641},
pmid = {38962905},
issn = {1549-960X},
abstract = {For quickly predicting the rational arrangement of catalysts and substrates, we previously proposed a method to calculate the interacted volumes of molecules over their 3D point cloud models. However, the nonuniform density in molecular point clouds may lead to incomplete contours in some slices, reducing the accuracy of the previous method. In this paper, we propose a two-step method for more accurately computing molecular interacted volumes. First, by employing a prematched mesh slicing method, we layer the 3D triangular mesh models of the electrostatic potential isosurfaces of two molecules globally, transforming the volume calculation into finding the intersecting areas in each layer. Next, by subdividing polygonal edges, we accurately identify intersecting parts within each layer, ensuring precise calculation of interacted volumes. In addition, we present a concise overview for computing intersecting areas in cases of multiple contour intersections and for improving computational efficiency by incorporating bounding boxes at three stages. Experimental results demonstrate that our method maintains high accuracy in different experimental data sets, with an average relative error of 0.16%. On the same experimental setup, our average relative error is 0.07%, which is lower than the previous algorithm's 1.73%, improving the accuracy and stability in calculating interacted volumes.},
}
@article {pmid38941113,
year = {2024},
author = {Seaman, RP and Campbell, R and Doe, V and Yosufzai, Z and Graber, JH},
title = {A cloud-based training module for efficient de novo transcriptome assembly using Nextflow and Google cloud.},
journal = {Briefings in bioinformatics},
volume = {25},
number = {4},
pages = {},
doi = {10.1093/bib/bbae313},
pmid = {38941113},
issn = {1477-4054},
support = {//Administrative Supplement to the Maine INBRE/ ; //Institutional Development Award/ ; P20GM103423//National Institute of General Medical Sciences of the National Institutes of Health/ ; },
mesh = {*Cloud Computing ; *Transcriptome ; Computational Biology/methods/education ; Software ; Humans ; Gene Expression Profiling/methods ; Internet ; },
abstract = {This study describes the development of a resource module that is part of a learning platform named "NIGMS Sandbox for Cloud-based Learning" (https://github.com/NIGMS/NIGMS-Sandbox). The overall genesis of the Sandbox is described in the editorial NIGMS Sandbox at the beginning of this Supplement. This module delivers learning materials on de novo transcriptome assembly using Nextflow in an interactive format that uses appropriate cloud resources for data access and analysis. Cloud computing is a powerful new means by which biomedical researchers can access resources and capacity that were previously either unattainable or prohibitively expensive. To take advantage of these resources, however, the biomedical research community needs new skills and knowledge. We present here a cloud-based training module, developed in conjunction with Google Cloud, Deloitte Consulting, and the NIH STRIDES Program, that uses the biological problem of de novo transcriptome assembly to demonstrate and teach the concepts of computational workflows (using Nextflow) and cost- and resource-efficient use of Cloud services (using Google Cloud Platform). Our work highlights the reduced necessity of on-site computing resources and the accessibility of cloud-based infrastructure for bioinformatics applications.},
}
@article {pmid38939612,
year = {2023},
author = {Tanade, C and Rakestraw, E and Ladd, W and Draeger, E and Randles, A},
title = {Cloud Computing to Enable Wearable-Driven Longitudinal Hemodynamic Maps.},
journal = {International Conference for High Performance Computing, Networking, Storage and Analysis : [proceedings]. SC (Conference : Supercomputing)},
volume = {2023},
number = {},
pages = {},
doi = {10.1145/3581784.3607101},
pmid = {38939612},
issn = {2167-4337},
abstract = {Tracking hemodynamic responses to treatment and stimuli over long periods remains a grand challenge. Moving from established single-heartbeat technology to longitudinal profiles would require continuous data describing how the patient's state evolves, new methods to extend the temporal domain over which flow is sampled, and high-throughput computing resources. While personalized digital twins can accurately measure 3D hemodynamics over several heartbeats, state-of-the-art methods would require hundreds of years of wallclock time on leadership scale systems to simulate one day of activity. To address these challenges, we propose a cloud-based, parallel-in-time framework leveraging continuous data from wearable devices to capture the first 3D patient-specific, longitudinal hemodynamic maps. We demonstrate the validity of our method by establishing ground truth data for 750 beats and comparing the results. Our cloud-based framework is based on an initial fixed set of simulations to enable the wearable-informed creation of personalized longitudinal hemodynamic maps.},
}
@article {pmid38934441,
year = {2024},
author = {Siruvoru, V and Aparna, S},
title = {Hybrid deep learning and optimized clustering mechanism for load balancing and fault tolerance in cloud computing.},
journal = {Network (Bristol, England)},
volume = {},
number = {},
pages = {1-22},
doi = {10.1080/0954898X.2024.2369137},
pmid = {38934441},
issn = {1361-6536},
abstract = {Cloud services are one of the most quickly developing technologies. Furthermore, load balancing is recognized as a fundamental challenge for achieving energy efficiency. The primary function of load balancing is to deliver optimal services by releasing the load over multiple resources. Fault tolerance is being used to improve the reliability and accessibility of the network. In this paper, a hybrid Deep Learning-based load balancing algorithm is developed. Initially, tasks are allocated to all VMs in a round-robin method. Furthermore, the Deep Embedding Cluster (DEC) utilizes the Central Processing Unit (CPU), bandwidth, memory, processing elements, and frequency scaling factors while determining if a VM is overloaded or underloaded. The task performed on the overloaded VM is valued and the tasks accomplished on the overloaded VM are assigned to the underloaded VM for cloud load balancing. In addition, the Deep Q Recurrent Neural Network (DQRNN) is proposed to balance the load based on numerous factors such as supply, demand, capacity, load, resource utilization, and fault tolerance. Furthermore, the effectiveness of this model is assessed by load, capacity, resource consumption, and success rate, with ideal values of 0.147, 0.726, 0.527, and 0.895 are achieved.},
}
@article {pmid38931731,
year = {2024},
author = {Francini, S and Marcelli, A and Chirici, G and Di Biase, RM and Fattorini, L and Corona, P},
title = {Per-Pixel Forest Attribute Mapping and Error Estimation: The Google Earth Engine and R dataDriven Tool.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {12},
pages = {},
doi = {10.3390/s24123947},
pmid = {38931731},
issn = {1424-8220},
abstract = {Remote sensing products are typically assessed using a single accuracy estimate for the entire map, despite significant variations in accuracy across different map areas or classes. Estimating per-pixel uncertainty is a major challenge for enhancing the usability and potential of remote sensing products. This paper introduces the dataDriven open access tool, a novel statistical design-based approach that specifically addresses this issue by estimating per-pixel uncertainty through a bootstrap resampling procedure. Leveraging Sentinel-2 remote sensing data as auxiliary information, the capabilities of the Google Earth Engine cloud computing platform, and the R programming language, dataDriven can be applied in any world region and variables of interest. In this study, the dataDriven tool was tested in the Rincine forest estate study area-eastern Tuscany, Italy-focusing on volume density as the variable of interest. The average volume density was 0.042, corresponding to 420 m[3] per hectare. The estimated pixel errors ranged between 93 m[3] and 979 m[3] per hectare and were 285 m[3] per hectare on average. The ability to produce error estimates for each pixel in the map is a novel aspect in the context of the current advances in remote sensing and forest monitoring and assessment. It constitutes a significant support in forest management applications and also a powerful communication tool since it informs users about areas where map estimates are unreliable, at the same time highlighting the areas where the information provided via the map is more trustworthy. In light of this, the dataDriven tool aims to support researchers and practitioners in the spatially exhaustive use of remote sensing-derived products and map validation.},
}
@article {pmid38931559,
year = {2024},
author = {Hong, S and Kim, Y and Nam, J and Kim, S},
title = {On the Analysis of Inter-Relationship between Auto-Scaling Policy and QoS of FaaS Workloads.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {12},
pages = {},
doi = {10.3390/s24123774},
pmid = {38931559},
issn = {1424-8220},
support = {2021R1G1A1006326//National Research Foundation of Korea/ ; },
abstract = {A recent development in cloud computing has introduced serverless technology, enabling the convenient and flexible management of cloud-native applications. Typically, the Function-as-a-Service (FaaS) solutions rely on serverless backend solutions, such as Kubernetes (K8s) and Knative, to leverage the advantages of resource management for underlying containerized contexts, including auto-scaling and pod scheduling. To take the advantages, recent cloud service providers also deploy self-hosted serverless services by facilitating their on-premise hosted FaaS platforms rather than relying on commercial public cloud offerings. However, the lack of standardized guidelines on K8s abstraction to fairly schedule and allocate resources on auto-scaling configuration options for such on-premise hosting environment in serverless computing poses challenges in meeting the service level objectives (SLOs) of diverse workloads. This study fills this gap by exploring the relationship between auto-scaling behavior and the performance of FaaS workloads depending on scaling-related configurations in K8s. Based on comprehensive measurement studies, we derived the logic as to which workload should be applied and with what type of scaling configurations, such as base metric, threshold to maximize the difference in latency SLO, and number of responses. Additionally, we propose a methodology to assess the scaling efficiency of the related K8s configurations regarding the quality of service (QoS) of FaaS workloads.},
}
@article {pmid38918484,
year = {2024},
author = {Hernández Olcina, J and Anquela Julián, AB and Martín Furones, ÁE},
title = {Navigating latency hurdles: an in-depth examination of a cloud-powered GNSS real-time positioning application on mobile devices.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {14668},
pmid = {38918484},
issn = {2045-2322},
abstract = {A growing dependence on real-time positioning apps for navigation, safety, and location-based services necessitates a deep understanding of latency challenges within cloud-based Global Navigation Satellite System (GNSS) solutions. This study analyses a GNSS real-time positioning app on smartphones that utilizes cloud computing for positioning data delivery. The study investigates and quantifies diverse latency contributors throughout the system architecture, including GNSS signal acquisition, data transmission, cloud processing, and result dissemination. Controlled experiments and real-world scenarios are employed to assess the influence of network conditions, device capabilities, and cloud server load on overall positioning latency. Findings highlight system bottlenecks and their relative contributions to latency. Additionally, practical recommendations are presented for developers and cloud service providers to mitigate these challenges and guarantee an optimal user experience for real-time positioning applications. This study not only elucidates the complex interplay of factors affecting GNSS app latency, but also paves the way for future advancements in cloud-based positioning solutions, ensuring the accuracy and timeliness critical for safety-critical and emerging applications.},
}
@article {pmid38916063,
year = {2024},
author = {Ćosić, K and Popović, S and Wiederhold, BK},
title = {Enhancing Aviation Safety through AI-Driven Mental Health Management for Pilots and Air Traffic Controllers.},
journal = {Cyberpsychology, behavior and social networking},
volume = {},
number = {},
pages = {},
doi = {10.1089/cyber.2023.0737},
pmid = {38916063},
issn = {2152-2723},
abstract = {This article provides an overview of the mental health challenges faced by pilots and air traffic controllers (ATCs), whose stressful professional lives may negatively impact global flight safety and security. The adverse effects of mental health disorders on their flight performance pose a particular safety risk, especially in sudden unexpected startle situations. Therefore, the early detection, prediction and prevention of mental health deterioration in pilots and ATCs, particularly among those at high risk, are crucial to minimize potential air crash incidents caused by human factors. Recent research in artificial intelligence (AI) demonstrates the potential of machine and deep learning, edge and cloud computing, virtual reality and wearable multimodal physiological sensors for monitoring and predicting mental health disorders. Longitudinal monitoring and analysis of pilots' and ATCs physiological, cognitive and behavioral states could help predict individuals at risk of undisclosed or emerging mental health disorders. Utilizing AI tools and methodologies to identify and select these individuals for preventive mental health training and interventions could be a promising and effective approach to preventing potential air crash accidents attributed to human factors and related mental health problems. Based on these insights, the article advocates for the design of a multidisciplinary mental healthcare ecosystem in modern aviation using AI tools and technologies, to foster more efficient and effective mental health management, thereby enhancing flight safety and security standards. This proposed ecosystem requires the collaboration of multidisciplinary experts, including psychologists, neuroscientists, physiologists, psychiatrists, etc. to address these challenges in modern aviation.},
}
@article {pmid38915693,
year = {2024},
author = {Czech, E and Millar, TR and White, T and Jeffery, B and Miles, A and Tallman, S and Wojdyla, R and Zabad, S and Hammerbacher, J and Kelleher, J},
title = {Analysis-ready VCF at Biobank scale using Zarr.},
journal = {bioRxiv : the preprint server for biology},
volume = {},
number = {},
pages = {},
doi = {10.1101/2024.06.11.598241},
pmid = {38915693},
abstract = {BACKGROUND: Variant Call Format (VCF) is the standard file format for interchanging genetic variation data and associated quality control metrics. The usual row-wise encoding of the VCF data model (either as text or packed binary) emphasises efficient retrieval of all data for a given variant, but accessing data on a field or sample basis is inefficient. Biobank scale datasets currently available consist of hundreds of thousands of whole genomes and hundreds of terabytes of compressed VCF. Row-wise data storage is fundamentally unsuitable and a more scalable approach is needed.
RESULTS: We present the VCF Zarr specification, an encoding of the VCF data model using Zarr which makes retrieving subsets of the data much more efficient. Zarr is a cloud-native format for storing multi-dimensional data, widely used in scientific computing. We show how this format is far more efficient than standard VCF based approaches, and competitive with specialised methods for storing genotype data in terms of compression ratios and calculation performance. We demonstrate the VCF Zarr format (and the vcf2zarr conversion utility) on a subset of the Genomics England aggV2 dataset comprising 78,195 samples and 59,880,903 variants, with a 5X reduction in storage and greater than 300X reduction in CPU usage in some representative benchmarks.
CONCLUSIONS: Large row-encoded VCF files are a major bottleneck for current research, and storing and processing these files incurs a substantial cost. The VCF Zarr specification, building on widely-used, open-source technologies has the potential to greatly reduce these costs, and may enable a diverse ecosystem of next-generation tools for analysing genetic variation data directly from cloud-based object stores.},
}
@article {pmid38912450,
year = {2024},
author = {Yang, Y and Ren, K and Song, J},
title = {Enhancing Earth data analysis in 5G satellite networks: A novel lightweight approach integrating improved deep learning.},
journal = {Heliyon},
volume = {10},
number = {11},
pages = {e32071},
pmid = {38912450},
issn = {2405-8440},
abstract = {Efficiently handling huge data amounts and enabling processing-intensive applications to run in faraway areas simultaneously is the ultimate objective of 5G networks. Currently, in order to distribute computing tasks, ongoing studies are exploring the incorporation of fog-cloud servers onto satellites, presenting a promising solution to enhance connectivity in remote areas. Nevertheless, analyzing the copious amounts of data produced by scattered sensors remains a challenging endeavor. The conventional strategy of transmitting this data to a central server for analysis can be costly. In contrast to centralized learning methods, distributed machine learning (ML) provides an alternative approach, albeit with notable drawbacks. This paper addresses the comparative learning expenses of centralized and distributed learning systems to tackle these challenges directly. It proposes the creation of an integrated system that harmoniously merges cloud servers with satellite network structures, leveraging the strengths of each system. This integration could represent a major breakthrough in satellite-based networking technology by streamlining data processing from remote nodes and cutting down on expenses. The core of this approach lies in the adaptive tailoring of learning techniques for individual entities based on their specific contextual nuances. The experimental findings underscore the prowess of the innovative lightweight strategy, LMAED[2]L (Enhanced Deep Learning for Earth Data Analysis), across a spectrum of machine learning assignments, showcasing remarkable and consistent performance under diverse operational conditions. Through a strategic fusion of centralized and distributed learning frameworks, the LMAED2L method emerges as a dynamic and effective remedy for the intricate data analysis challenges encountered within satellite networks interfaced with cloud servers. The empirical findings reveal a significant performance boost of our novel approach over traditional methods, with an average increase in reward (4.1 %), task completion rate (3.9 %), and delivered packets (3.4 %). This report suggests that these advancements will catalyze the integration of cutting-edge machine learning algorithms within future networks, elevating responsiveness, efficiency, and resource utilization to new heights.},
}
@article {pmid38909109,
year = {2024},
author = {Qu, L and Xie, HQ and Pei, JL and Li, YG and Wu, JM and Feng, G and Xiao, ML},
title = {Cloud inversion analysis of surrounding rock parameters for underground powerhouse based on PSO-BP optimized neural network and web technology.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {14399},
pmid = {38909109},
issn = {2045-2322},
support = {No. 52109135//National Natural Science Foundation of China/ ; No. 2022-03//Science and Technology Innovation Program from Water Resources of Guangdong Province/ ; },
abstract = {Aiming at the shortcomings of the BP neural network in practical applications, such as easy to fall into local extremum and slow convergence speed, we optimized the initial weights and thresholds of the BP neural network using the particle swarm optimization (PSO). Additionally, cloud computing service, web technology, cloud database and numerical simulation were integrated to construct an intelligent feedback analysis cloud program for underground engineering safety monitoring based on the PSO-BP algorithm. The program could conveniently, quickly, and intelligently carry out numerical analysis of underground engineering and dynamic feedback analysis of surrounding rock parameters. The program was applied to the cloud inversion analysis of the surrounding rock parameters for the underground powerhouse of the Shuangjiangkou Hydropower Station. The calculated displacement simulated with the back-analyzed parameters matches the measured displacement very well. The posterior variance evaluation shows that the posterior error ratio is 0.045 and the small error probability is 0.999. The evaluation results indicate that the intelligent feedback analysis cloud program has high accuracy and can be applied to engineering practice.},
}
@article {pmid38907377,
year = {2021},
author = {Tonti, S and Marzolini, B and Bulgheroni, M},
title = {Smartphone-Based Passive Sensing for Behavioral and Physical Monitoring in Free-Life Conditions: Technical Usability Study.},
journal = {JMIR biomedical engineering},
volume = {6},
number = {2},
pages = {e15417},
doi = {10.2196/15417},
pmid = {38907377},
issn = {2561-3278},
abstract = {BACKGROUND: Smartphone use is widely spreading in society. Their embedded functions and sensors may play an important role in therapy monitoring and planning. However, the use of smartphones for intrapersonal behavioral and physical monitoring is not yet fully supported by adequate studies addressing technical reliability and acceptance.
OBJECTIVE: The objective of this paper is to identify and discuss technical issues that may impact on the wide use of smartphones as clinical monitoring tools. The focus is on the quality of the data and transparency of the acquisition process.
METHODS: QuantifyMyPerson is a platform for continuous monitoring of smartphone use and embedded sensors data. The platform consists of an app for data acquisition, a backend cloud server for data storage and processing, and a web-based dashboard for data management and visualization. The data processing aims to extract meaningful features for the description of daily life such as phone status, calls, app use, GPS, and accelerometer data. A total of health subjects installed the app on their smartphones, running it for 7 months. The acquired data were analyzed to assess impact on smartphone performance (ie, battery consumption and anomalies in functioning) and data integrity. Relevance of the selected features in describing changes in daily life was assessed through the computation of a k-nearest neighbors global anomaly score to detect days that differ from others.
RESULTS: The effectiveness of smartphone-based monitoring depends on the acceptability and interoperability of the system as user retention and data integrity are key aspects. Acceptability was confirmed by the full transparency of the app and the absence of any conflicts with daily smartphone use. The only perceived issue was the battery consumption even though the trend of battery drain with and without the app running was comparable. Regarding interoperability, the app was successfully installed and run on several Android brands. The study shows that some smartphone manufacturers implement power-saving policies not allowing continuous sensor data acquisition and impacting integrity. Data integrity was 96% on smartphones whose power-saving policies do not impact the embedded sensor management and 84% overall.
CONCLUSIONS: The main technological barriers to continuous behavioral and physical monitoring (ie, battery consumption and power-saving policies of manufacturers) may be overcome. Battery consumption increase is mainly due to GPS triangulation and may be limited, while data missing because of power-saving policies are related only to periods of nonuse of the phone since the embedded sensors are reactivated by any smartphone event. Overall, smartphone-based passive sensing is fully feasible and scalable despite the Android market fragmentation.},
}
@article {pmid38904211,
year = {2024},
author = {Navaneethakrishnan, M and Robinson Joel, M and Kalavai Palani, S and Gnanaprakasam, GJ},
title = {EfficientNet-deep quantum neural network-based economic denial of sustainability attack detection to enhance network security in cloud.},
journal = {Network (Bristol, England)},
volume = {},
number = {},
pages = {1-25},
doi = {10.1080/0954898X.2024.2361093},
pmid = {38904211},
issn = {1361-6536},
abstract = {Cloud computing (CC) is a future revolution in the Information technology (IT) and Communication field. Security and internet connectivity are the common major factors to slow down the proliferation of CC. Recently, a new kind of denial of service (DDoS) attacks, known as Economic Denial of Sustainability (EDoS) attack, has been emerging. Though EDoS attacks are smaller at a moment, it can be expected to develop in nearer prospective in tandem with progression in the cloud usage. Here, EfficientNet-B3-Attn-2 fused Deep Quantum Neural Network (EfficientNet-DQNN) is presented for EDoS detection. Initially, cloud is simulated and thereafter, considered input log file is fed to perform data pre-processing. Z-Score Normalization ;(ZSN) is employed to carry out pre-processing of data. Afterwards, feature fusion (FF) is accomplished based on Deep Neural Network (DNN) with Kulczynski similarity. Then, data augmentation (DA) is executed by oversampling based upon Synthetic Minority Over-sampling Technique (SMOTE). At last, attack detection is conducted utilizing EfficientNet-DQNN. Furthermore, EfficientNet-DQNN is formed by incorporation of EfficientNet-B3-Attn-2 with DQNN. In addition, EfficientNet-DQNN attained 89.8% of F1-score, 90.4% of accuracy, 91.1% of precision and 91.2% of recall using BOT-IOT dataset at K-Fold is 9.},
}
@article {pmid38902499,
year = {2024},
author = {Dai, S},
title = {On the quantum circuit implementation of modus ponens.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {14245},
pmid = {38902499},
issn = {2045-2322},
support = {62006168//National Natural Science Foundation of China/ ; LQ21A010001//Natural Science Foundation of Zhejiang Province/ ; },
abstract = {The process of inference reflects the structure of propositions with assigned truth values, either true or false. Modus ponens is a fundamental form of inference that involves affirming the antecedent to affirm the consequent. Inspired by the quantum computer, the superposition of true and false is used for the parallel processing. In this work, we propose a quantum version of modus ponens. Additionally, we introduce two generations of quantum modus ponens: quantum modus ponens inference chain and multidimensional quantum modus ponens. Finally, a simple implementation of quantum modus ponens on the OriginQ quantum computing cloud platform is demonstrated.},
}
@article {pmid38894434,
year = {2024},
author = {Gazis, A and Katsiri, E},
title = {Streamline Intelligent Crowd Monitoring with IoT Cloud Computing Middleware.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {11},
pages = {},
pmid = {38894434},
issn = {1424-8220},
abstract = {This article introduces a novel middleware that utilizes cost-effective, low-power computing devices like Raspberry Pi to analyze data from wireless sensor networks (WSNs). It is designed for indoor settings like historical buildings and museums, tracking visitors and identifying points of interest. It serves as an evacuation aid by monitoring occupancy and gauging the popularity of specific areas, subjects, or art exhibitions. The middleware employs a basic form of the MapReduce algorithm to gather WSN data and distribute it across available computer nodes. Data collected by RFID sensors on visitor badges is stored on mini-computers placed in exhibition rooms and then transmitted to a remote database after a preset time frame. Utilizing MapReduce for data analysis and a leader election algorithm for fault tolerance, this middleware showcases its viability through metrics, demonstrating applications like swift prototyping and accurate validation of findings. Despite using simpler hardware, its performance matches resource-intensive methods involving audiovisual and AI techniques. This design's innovation lies in its fault-tolerant, distributed setup using budget-friendly, low-power devices rather than resource-heavy hardware or methods. Successfully tested at a historical building in Greece (M. Hatzidakis' residence), it is tailored for indoor spaces. This paper compares its algorithmic application layer with other implementations, highlighting its technical strengths and advantages. Particularly relevant in the wake of the COVID-19 pandemic and general monitoring middleware for indoor locations, this middleware holds promise in tracking visitor counts and overall building occupancy.},
}
@article {pmid38894431,
year = {2024},
author = {López-Ortiz, EJ and Perea-Trigo, M and Soria-Morillo, LM and Álvarez-García, JA and Vegas-Olmos, JJ},
title = {Energy-Efficient Edge and Cloud Image Classification with Multi-Reservoir Echo State Network and Data Processing Units.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {11},
pages = {},
pmid = {38894431},
issn = {1424-8220},
abstract = {In an era dominated by Internet of Things (IoT) devices, software-as-a-service (SaaS) platforms, and rapid advances in cloud and edge computing, the demand for efficient and lightweight models suitable for resource-constrained devices such as data processing units (DPUs) has surged. Traditional deep learning models, such as convolutional neural networks (CNNs), pose significant computational and memory challenges, limiting their use in resource-constrained environments. Echo State Networks (ESNs), based on reservoir computing principles, offer a promising alternative with reduced computational complexity and shorter training times. This study explores the applicability of ESN-based architectures in image classification and weather forecasting tasks, using benchmarks such as the MNIST, FashionMnist, and CloudCast datasets. Through comprehensive evaluations, the Multi-Reservoir ESN (MRESN) architecture emerges as a standout performer, demonstrating its potential for deployment on DPUs or home stations. In exploiting the dynamic adaptability of MRESN to changing input signals, such as weather forecasts, continuous on-device training becomes feasible, eliminating the need for static pre-trained models. Our results highlight the importance of lightweight models such as MRESN in cloud and edge computing applications where efficiency and sustainability are paramount. This study contributes to the advancement of efficient computing practices by providing novel insights into the performance and versatility of MRESN architectures. By facilitating the adoption of lightweight models in resource-constrained environments, our research provides a viable alternative for improved efficiency and scalability in modern computing paradigms.},
}
@article {pmid38876087,
year = {2024},
author = {Bayerlein, R and Swarnakar, V and Selfridge, A and Spencer, BA and Nardo, L and Badawi, RD},
title = {Cloud-based serverless computing enables accelerated monte carlo simulations for nuclear medicine imaging.},
journal = {Biomedical physics & engineering express},
volume = {},
number = {},
pages = {},
doi = {10.1088/2057-1976/ad5847},
pmid = {38876087},
issn = {2057-1976},
abstract = {This study investigates the potential of cloud-based serverless computing to accelerate Monte Carlo (MC) simulations for nuclear medicine imaging tasks. MC simulations can pose a high computational burden - even when executed on modern multi-core computing servers. Cloud computing allows simulation tasks to be highly parallelized and considerably accelerated. We investigate the computational performance of a cloud-based serverless MC simulation of radioactive decays for positron emission tomography imaging using Amazon Web Service (AWS) Lambda serverless computing platform for the first time in scientific literature. We provide a comparison of the computational performance of AWS to a modern on-premises multi-thread reconstruction server by measuring the execution times of the processes using between 10^5 and 2∙10^10 simulated decays. We deployed two popular MC simulation frameworks - SimSET and GATE - within the AWS computing environment. Containerized application images were used as a basis for an AWS Lambda function, and local (non-cloud) scripts were used to orchestrate the deployment of simulations. The task was broken down into smaller parallel runs, and launched on concurrently running AWS Lambda instances, and the results were postprocessed and downloaded via the Simple Storage Service. Our implementation of cloud-based MC simulations with SimSET outperforms local server-based computations by more than an order of magnitude. However, the GATE implementation creates more and larger output file sizes and reveals that the internet connection speed can become the primary bottleneck for data transfers. Simulating 109 decays using SimSET is possible within 5 min and accrues computation costs of about $10 on AWS, whereas GATE would have to run in batches for more than 100 min at considerably higher costs. Adopting cloud-based serverless computing architecture in medical imaging research facilities can considerably improve processing times and overall workflow efficiency, with future research exploring additional enhancements through optimized configurations and computational methods.},
}
@article {pmid38875671,
year = {2024},
author = {Guo, Y and Ganti, S and Wu, Y},
title = {Enhancing Energy Efficiency in Telehealth Internet of Things Systems Through Fog and Cloud Computing Integration: Simulation Study.},
journal = {JMIR biomedical engineering},
volume = {9},
number = {},
pages = {e50175},
doi = {10.2196/50175},
pmid = {38875671},
issn = {2561-3278},
abstract = {BACKGROUND: The increasing adoption of telehealth Internet of Things (IoT) devices in health care informatics has led to concerns about energy use and data processing efficiency.
OBJECTIVE: This paper introduces an innovative model that integrates telehealth IoT devices with a fog and cloud computing-based platform, aiming to enhance energy efficiency in telehealth IoT systems.
METHODS: The proposed model incorporates adaptive energy-saving strategies, localized fog nodes, and a hybrid cloud infrastructure. Simulation analyses were conducted to assess the model's effectiveness in reducing energy consumption and enhancing data processing efficiency.
RESULTS: Simulation results demonstrated significant energy savings, with a 2% reduction in energy consumption achieved through adaptive energy-saving strategies. The sample size for the simulation was 10-40, providing statistical robustness to the findings.
CONCLUSIONS: The proposed model successfully addresses energy and data processing challenges in telehealth IoT scenarios. By integrating fog computing for local processing and a hybrid cloud infrastructure, substantial energy savings are achieved. Ongoing research will focus on refining the energy conservation model and exploring additional functional enhancements for broader applicability in health care and industrial contexts.},
}
@article {pmid38875568,
year = {2023},
author = {Chan, NB and Li, W and Aung, T and Bazuaye, E and Montero, RM},
title = {Machine Learning-Based Time in Patterns for Blood Glucose Fluctuation Pattern Recognition in Type 1 Diabetes Management: Development and Validation Study.},
journal = {JMIR AI},
volume = {2},
number = {},
pages = {e45450},
doi = {10.2196/45450},
pmid = {38875568},
issn = {2817-1705},
abstract = {BACKGROUND: Continuous glucose monitoring (CGM) for diabetes combines noninvasive glucose biosensors, continuous monitoring, cloud computing, and analytics to connect and simulate a hospital setting in a person's home. CGM systems inspired analytics methods to measure glycemic variability (GV), but existing GV analytics methods disregard glucose trends and patterns; hence, they fail to capture entire temporal patterns and do not provide granular insights about glucose fluctuations.
OBJECTIVE: This study aimed to propose a machine learning-based framework for blood glucose fluctuation pattern recognition, which enables a more comprehensive representation of GV profiles that could present detailed fluctuation information, be easily understood by clinicians, and provide insights about patient groups based on time in blood fluctuation patterns.
METHODS: Overall, 1.5 million measurements from 126 patients in the United Kingdom with type 1 diabetes mellitus (T1DM) were collected, and prevalent blood fluctuation patterns were extracted using dynamic time warping. The patterns were further validated in 225 patients in the United States with T1DM. Hierarchical clustering was then applied on time in patterns to form 4 clusters of patients. Patient groups were compared using statistical analysis.
RESULTS: In total, 6 patterns depicting distinctive glucose levels and trends were identified and validated, based on which 4 GV profiles of patients with T1DM were found. They were significantly different in terms of glycemic statuses such as diabetes duration (P=.04), glycated hemoglobin level (P<.001), and time in range (P<.001) and thus had different management needs.
CONCLUSIONS: The proposed method can analytically extract existing blood fluctuation patterns from CGM data. Thus, time in patterns can capture a rich view of patients' GV profile. Its conceptual resemblance with time in range, along with rich blood fluctuation details, makes it more scalable, accessible, and informative to clinicians.},
}
@article {pmid38870489,
year = {2024},
author = {Danning, Z and Jia, Q and Yinni, M and Linjia, L},
title = {Establishment and Verification of a Skin Cancer Diagnosis Model Based on Image Convolutional Neural Network Analysis and Artificial Intelligence Algorithms.},
journal = {Alternative therapies in health and medicine},
volume = {},
number = {},
pages = {},
pmid = {38870489},
issn = {1078-6791},
abstract = {Skin cancer is a serious public health problem, with countless deaths due to skin cancer each year. Early detection, aggressive and effective primary focus is the best treatment for skin cancer, which is important to improve patients' prognosis and reduce the death rate of the disease. However, judging skin tumors by the naked eye alone is a highly subjective factor, and the diagnosis can vary greatly even among professionally trained physicians. Clinically, skin endoscopy is a commonly used method for early diagnosis. However, the manual examination is time-consuming, laborious, and highly dependent on the clinical practice of dermatologists. In today's society, with the rapid development of information technology, the amount of information is increasing at a geometric rate, and new technologies such as cloud computing, distributed, data mining, and meta-inspiration are emerging. In this paper, we design and build a computer-aided diagnosis system for dermatoscopic images and apply meta-heuristic algorithms to image enhancement and image cutting to improve the quality of images, thus increasing the speed of diagnosis, early detection, and early treatment.},
}
@article {pmid38869158,
year = {2024},
author = {Hu, Y and Schnaubelt, M and Chen, L and Zhang, B and Hoang, T and Lih, TM and Zhang, Z and Zhang, H},
title = {MS-PyCloud: A Cloud Computing-Based Pipeline for Proteomic and Glycoproteomic Data Analyses.},
journal = {Analytical chemistry},
volume = {},
number = {},
pages = {},
doi = {10.1021/acs.analchem.3c01497},
pmid = {38869158},
issn = {1520-6882},
abstract = {Rapid development and wide adoption of mass spectrometry-based glycoproteomic technologies have empowered scientists to study proteins and protein glycosylation in complex samples on a large scale. This progress has also created unprecedented challenges for individual laboratories to store, manage, and analyze proteomic and glycoproteomic data, both in the cost for proprietary software and high-performance computing and in the long processing time that discourages on-the-fly changes of data processing settings required in explorative and discovery analysis. We developed an open-source, cloud computing-based pipeline, MS-PyCloud, with graphical user interface (GUI), for proteomic and glycoproteomic data analysis. The major components of this pipeline include data file integrity validation, MS/MS database search for spectral assignments to peptide sequences, false discovery rate estimation, protein inference, quantitation of global protein levels, and specific glycan-modified glycopeptides as well as other modification-specific peptides such as phosphorylation, acetylation, and ubiquitination. To ensure the transparency and reproducibility of data analysis, MS-PyCloud includes open-source software tools with comprehensive testing and versioning for spectrum assignments. Leveraging public cloud computing infrastructure via Amazon Web Services (AWS), MS-PyCloud scales seamlessly based on analysis demand to achieve fast and efficient performance. Application of the pipeline to the analysis of large-scale LC-MS/MS data sets demonstrated the effectiveness and high performance of MS-PyCloud. The software can be downloaded at https://github.com/huizhanglab-jhu/ms-pycloud.},
}
@article {pmid38868668,
year = {2024},
author = {Sochat, V and Culquicondor, A and Ojea, A and Milroy, D},
title = {The Flux Operator.},
journal = {F1000Research},
volume = {13},
number = {},
pages = {203},
pmid = {38868668},
issn = {2046-1402},
mesh = {*Cloud Computing ; Workload ; Workflow ; },
abstract = {Converged computing is an emerging area of computing that brings together the best of both worlds for high performance computing (HPC) and cloud-native communities. The economic influence of cloud computing and the need for workflow portability, flexibility, and manageability are driving this emergence. Navigating the uncharted territory and building an effective space for both HPC and cloud require collaborative technological development and research. In this work, we focus on developing components for the converged workload manager, the central component of batch workflows running in any environment. From the cloud we base our work on Kubernetes, the de facto standard batch workload orchestrator. From HPC the orchestrator counterpart is Flux Framework, a fully hierarchical resource management and graph-based scheduler with a modular architecture that supports sophisticated scheduling and job management. Bringing these managers together consists of implementing Flux inside of Kubernetes, enabling hierarchical resource management and scheduling that scales without burdening the Kubernetes scheduler. This paper introduces the Flux Operator - an on-demand HPC workload manager deployed in Kubernetes. Our work describes design decisions, mapping components between environments, and experimental features. We perform experiments that compare application performance when deployed by the Flux Operator and the MPI Operator and present the results. Finally, we review remaining challenges and describe our vision of the future for improved technological innovation and collaboration through converged computing.},
}
@article {pmid38862616,
year = {2024},
author = {Salcedo, A and Tarabichi, M and Buchanan, A and Espiritu, SMG and Zhang, H and Zhu, K and Ou Yang, TH and Leshchiner, I and Anastassiou, D and Guan, Y and Jang, GH and Mootor, MFE and Haase, K and Deshwar, AG and Zou, W and Umar, I and Dentro, S and Wintersinger, JA and Chiotti, K and Demeulemeester, J and Jolly, C and Sycza, L and Ko, M and , and , and Wedge, DC and Morris, QD and Ellrott, K and Van Loo, P and Boutros, PC},
title = {Crowd-sourced benchmarking of single-sample tumor subclonal reconstruction.},
journal = {Nature biotechnology},
volume = {},
number = {},
pages = {},
pmid = {38862616},
issn = {1546-1696},
abstract = {Subclonal reconstruction algorithms use bulk DNA sequencing data to quantify parameters of tumor evolution, allowing an assessment of how cancers initiate, progress and respond to selective pressures. We launched the ICGC-TCGA (International Cancer Genome Consortium-The Cancer Genome Atlas) DREAM Somatic Mutation Calling Tumor Heterogeneity and Evolution Challenge to benchmark existing subclonal reconstruction algorithms. This 7-year community effort used cloud computing to benchmark 31 subclonal reconstruction algorithms on 51 simulated tumors. Algorithms were scored on seven independent tasks, leading to 12,061 total runs. Algorithm choice influenced performance substantially more than tumor features but purity-adjusted read depth, copy-number state and read mappability were associated with the performance of most algorithms on most tasks. No single algorithm was a top performer for all seven tasks and existing ensemble strategies were unable to outperform the best individual methods, highlighting a key research need. All containerized methods, evaluation code and datasets are available to support further assessment of the determinants of subclonal reconstruction accuracy and development of improved methods to understand tumor evolution.},
}
@article {pmid38862433,
year = {2024},
author = {Ko, G and Lee, JH and Sim, YM and Song, W and Yoon, BH and Byeon, I and Lee, BH and Kim, SO and Choi, J and Jang, I and Kim, H and Yang, JO and Jang, K and Kim, S and Kim, JH and Jeon, J and Jung, J and Hwang, S and Park, JH and Kim, PG and Kim, SY and Lee, B},
title = {KoNA: Korean Nucleotide Archive as A New Data Repository for Nucleotide Sequence Data.},
journal = {Genomics, proteomics & bioinformatics},
volume = {22},
number = {1},
pages = {},
doi = {10.1093/gpbjnl/qzae017},
pmid = {38862433},
issn = {2210-3244},
mesh = {Republic of Korea ; *Databases, Nucleic Acid ; Humans ; High-Throughput Nucleotide Sequencing/methods ; },
abstract = {During the last decade, the generation and accumulation of petabase-scale high-throughput sequencing data have resulted in great challenges, including access to human data, as well as transfer, storage, and sharing of enormous amounts of data. To promote data-driven biological research, the Korean government announced that all biological data generated from government-funded research projects should be deposited at the Korea BioData Station (K-BDS), which consists of multiple databases for individual data types. Here, we introduce the Korean Nucleotide Archive (KoNA), a repository of nucleotide sequence data. As of July 2022, the Korean Read Archive in KoNA has collected over 477 TB of raw next-generation sequencing data from national genome projects. To ensure data quality and prepare for international alignment, a standard operating procedure was adopted, which is similar to that of the International Nucleotide Sequence Database Collaboration. The standard operating procedure includes quality control processes for submitted data and metadata using an automated pipeline, followed by manual examination. To ensure fast and stable data transfer, a high-speed transmission system called GBox is used in KoNA. Furthermore, the data uploaded to or downloaded from KoNA through GBox can be readily processed using a cloud computing service called Bio-Express. This seamless coupling of KoNA, GBox, and Bio-Express enhances the data experience, including submission, access, and analysis of raw nucleotide sequences. KoNA not only satisfies the unmet needs for a national sequence repository in Korea but also provides datasets to researchers globally and contributes to advances in genomics. The KoNA is available at https://www.kobic.re.kr/kona/.},
}
@article {pmid38860521,
year = {2024},
author = {McMurry, AJ and Gottlieb, DI and Miller, TA and Jones, JR and Atreja, A and Crago, J and Desai, PM and Dixon, BE and Garber, M and Ignatov, V and Kirchner, LA and Payne, PRO and Saldanha, AJ and Shankar, PRV and Solad, YV and Sprouse, EA and Terry, M and Wilcox, AB and Mandl, KD},
title = {Cumulus: a federated electronic health record-based learning system powered by Fast Healthcare Interoperability Resources and artificial intelligence.},
journal = {Journal of the American Medical Informatics Association : JAMIA},
volume = {},
number = {},
pages = {},
doi = {10.1093/jamia/ocae130},
pmid = {38860521},
issn = {1527-974X},
support = {90AX0031/01-00/OC/ONCHIT HHS/United States ; /CC/CDC HHS/United States ; U01TR002623/NH/NIH HHS/United States ; NU38OT000286/CC/CDC HHS/United States ; U18DP006500/CC/CDC HHS/United States ; NU58IP000004/CC/CDC HHS/United States ; },
abstract = {OBJECTIVE: To address challenges in large-scale electronic health record (EHR) data exchange, we sought to develop, deploy, and test an open source, cloud-hosted app "listener" that accesses standardized data across the SMART/HL7 Bulk FHIR Access application programming interface (API).
METHODS: We advance a model for scalable, federated, data sharing and learning. Cumulus software is designed to address key technology and policy desiderata including local utility, control, and administrative simplicity as well as privacy preservation during robust data sharing, and artificial intelligence (AI) for processing unstructured text.
RESULTS: Cumulus relies on containerized, cloud-hosted software, installed within a healthcare organization's security envelope. Cumulus accesses EHR data via the Bulk FHIR interface and streamlines automated processing and sharing. The modular design enables use of the latest AI and natural language processing tools and supports provider autonomy and administrative simplicity. In an initial test, Cumulus was deployed across 5 healthcare systems each partnered with public health. Cumulus output is patient counts which were aggregated into a table stratifying variables of interest to enable population health studies. All code is available open source. A policy stipulating that only aggregate data leave the institution greatly facilitated data sharing agreements.
DISCUSSION AND CONCLUSION: Cumulus addresses barriers to data sharing based on (1) federally required support for standard APIs, (2) increasing use of cloud computing, and (3) advances in AI. There is potential for scalability to support learning across myriad network configurations and use cases.},
}
@article {pmid38859877,
year = {2024},
author = {Yang, T and Du, Y and Sun, M and Meng, J and Li, Y},
title = {Risk Management for Whole-Process Safe Disposal of Medical Waste: Progress and Challenges.},
journal = {Risk management and healthcare policy},
volume = {17},
number = {},
pages = {1503-1522},
pmid = {38859877},
issn = {1179-1594},
abstract = {Over the past decade, the global outbreaks of SARS, influenza A (H1N1), COVID-19, and other major infectious diseases have exposed the insufficient capacity for emergency disposal of medical waste in numerous countries and regions. Particularly during epidemics of major infectious diseases, medical waste exhibits new characteristics such as accelerated growth rate, heightened risk level, and more stringent disposal requirements. Consequently, there is an urgent need for advanced theoretical approaches that can perceive, predict, evaluate, and control risks associated with safe disposal throughout the entire process in a timely, accurate, efficient, and comprehensive manner. This article provides a systematic review of relevant research on collection, storage, transportation, and disposal of medical waste throughout its entirety to illustrate the current state of safe disposal practices. Building upon this foundation and leveraging emerging information technologies like Internet of Things (IoT), cloud computing, big data analytics, and artificial intelligence (AI), we deeply contemplate future research directions with an aim to minimize risks across all stages of medical waste disposal while offering valuable references and decision support to further advance safe disposal practices.},
}
@article {pmid38855254,
year = {2024},
author = {Ullah, S and Ou, J and Xie, Y and Tian, W},
title = {Facial expression recognition (FER) survey: a vision, architectural elements, and future directions.},
journal = {PeerJ. Computer science},
volume = {10},
number = {},
pages = {e2024},
pmid = {38855254},
issn = {2376-5992},
abstract = {With the cutting-edge advancements in computer vision, facial expression recognition (FER) is an active research area due to its broad practical applications. It has been utilized in various fields, including education, advertising and marketing, entertainment and gaming, health, and transportation. The facial expression recognition-based systems are rapidly evolving due to new challenges, and significant research studies have been conducted on both basic and compound facial expressions of emotions; however, measuring emotions is challenging. Fueled by the recent advancements and challenges to the FER systems, in this article, we have discussed the basics of FER and architectural elements, FER applications and use-cases, FER-based global leading companies, interconnection between FER, Internet of Things (IoT) and Cloud computing, summarize open challenges in-depth to FER technologies, and future directions through utilizing Preferred Reporting Items for Systematic reviews and Meta Analyses Method (PRISMA). In the end, the conclusion and future thoughts are discussed. By overcoming the identified challenges and future directions in this research study, researchers will revolutionize the discipline of facial expression recognition in the future.},
}
@article {pmid38854703,
year = {2023},
author = {Aman, SS and N'guessan, BG and Agbo, DDA and Kone, T},
title = {Search engine Performance optimization: methods and techniques.},
journal = {F1000Research},
volume = {12},
number = {},
pages = {1317},
pmid = {38854703},
issn = {2046-1402},
mesh = {*Search Engine/methods ; Humans ; },
abstract = {BACKGROUND: With the rapid advancement of information technology, search engine optimisation (SEO) has become crucial for enhancing the visibility and relevance of online content. In this context, the use of cloud platforms like Microsoft Azure is being explored to bolster SEO capabilities.
METHODS: This scientific article offers an in-depth study of search engine optimisation. It explores the different methods and techniques used to improve the performance and efficiency of a search engine, focusing on key aspects such as result relevance, search speed and user experience. The article also presents case studies and concrete examples to illustrate the practical application of optimisation techniques.
RESULTS: The results demonstrate the importance of optimisation in delivering high quality search results and meeting the increasing demands of users.
CONCLUSIONS: The article addresses the enhancement of search engines through the Microsoft Azure infrastructure and its associated components. It highlights methods such as indexing, semantic analysis, parallel searches, and caching to strengthen the relevance of results, speed up searches, and optimise the user experience. Following the application of these methods, a marked improvement was observed in these areas, thereby showcasing the capability of Microsoft Azure in enhancing search engines. The study sheds light on the implementation and analysis of these Azure-focused techniques, introduces a methodology for assessing their efficacy, and details the specific benefits of each method. Looking forward, the article suggests integrating artificial intelligence to elevate the relevance of results, venturing into other cloud infrastructures to boost performance, and evaluating these methods in specific scenarios, such as multimedia information search. In summary, with Microsoft Azure, the enhancement of search engines appears promising, with increased relevance and a heightened user experience in a rapidly evolving sector.},
}
@article {pmid38844552,
year = {2024},
author = {Hie, BL and Kim, S and Rando, TA and Bryson, B and Berger, B},
title = {Scanorama: integrating large and diverse single-cell transcriptomic datasets.},
journal = {Nature protocols},
volume = {},
number = {},
pages = {},
pmid = {38844552},
issn = {1750-2799},
abstract = {Merging diverse single-cell RNA sequencing (scRNA-seq) data from numerous experiments, laboratories and technologies can uncover important biological insights. Nonetheless, integrating scRNA-seq data encounters special challenges when the datasets are composed of diverse cell type compositions. Scanorama offers a robust solution for improving the quality and interpretation of heterogeneous scRNA-seq data by effectively merging information from diverse sources. Scanorama is designed to address the technical variation introduced by differences in sample preparation, sequencing depth and experimental batches that can confound the analysis of multiple scRNA-seq datasets. Here we provide a detailed protocol for using Scanorama within a Scanpy-based single-cell analysis workflow coupled with Google Colaboratory, a cloud-based free Jupyter notebook environment service. The protocol involves Scanorama integration, a process that typically spans 0.5-3 h. Scanorama integration requires a basic understanding of cellular biology, transcriptomic technologies and bioinformatics. Our protocol and new Scanorama-Colaboratory resource should make scRNA-seq integration more widely accessible to researchers.},
}
@article {pmid38839812,
year = {2024},
author = {Zheng, P and Yang, J and Lou, J and Wang, B},
title = {Design and application of virtual simulation teaching platform for intelligent manufacturing.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {12895},
pmid = {38839812},
issn = {2045-2322},
support = {ZJXF2022126//the Special Project of Scientific Research and Development Center of Higher Education Institutions, Ministry of Education of the People's Republic of China/ ; },
abstract = {Aiming at the practical teaching of intelligent manufacturing majors faced with lack of equipment, tense teachers and other problems such as high equipment investment, high material loss, high teaching risk, difficult to implement internship, difficult to observe production, difficult to reproduce the results, and so on, we take the electrical automation technology, mechatronics technology and industrial robotics technology majors of intelligent manufacturing majors as an example, and design and establish a virtual simulation teaching platform for intelligent manufacturing majors by using the cloud computing platform, edge computing technology, and terminal equipment synergy. The platform includes six major virtual simulation modules, including virtual simulation of electrician electronics and PLC control, virtual and real combination of typical production lines of intelligent manufacturing, dual-axis collaborative robotics workstation, digital twin simulation, virtual disassembly of industrial robots, virtual simulation of magnetic yoke axis flexible production line. The platform covers the virtual simulation teaching content of basic principle experiments, advanced application experiments, and advanced integration experiments in intelligent manufacturing majors. In order to test the effectiveness of this virtual simulation platform for practical teaching in engineering, this paper organizes a teaching practice activity involving 246 students from two parallel classes of three different majors. Through a one-year teaching application, we analyzed the data on the grades of 7 core courses involved in three majors in one academic year, the proportion of participation in competitions and innovative activities, the number of awards and certificates of professional qualifications, and the subjective questionnaires of the testers. The analysis shows that the learners who adopt the virtual simulation teaching platform proposed in this paper for practical teaching are better than the learners under the traditional teaching method in terms of academic performance, proportion of participation in competitions and innovative activities, and proportion of awards and certificates by more than 13%, 37%, 36%, 27% and 22%, respectively. Therefore, the virtual simulation teaching platform of intelligent manufacturing established in this paper has obvious superiority in solving the problem of "three highs and three difficulties" existing in the practical teaching of engineering, and according to the questionnaire feedback from the testers, the platform can effectively alleviate the shortage of practical training equipment, stimulate the interest in learning, and help to broaden and improve the knowledge system of the learners.},
}
@article {pmid38838394,
year = {2024},
author = {Lai, Q and Guo, S},
title = {Heterogeneous coexisting attractors, large-scale amplitude control and finite-time synchronization of central cyclic memristive neural networks.},
journal = {Neural networks : the official journal of the International Neural Network Society},
volume = {178},
number = {},
pages = {106412},
doi = {10.1016/j.neunet.2024.106412},
pmid = {38838394},
issn = {1879-2782},
abstract = {Memristors are of great theoretical and practical significance for chaotic dynamics research of brain-like neural networks due to their excellent physical properties such as brain synapse-like memorability and nonlinearity, especially crucial for the promotion of AI big models, cloud computing, and intelligent systems in the artificial intelligence field. In this paper, we introduce memristors as self-connecting synapses into a four-dimensional Hopfield neural network, constructing a central cyclic memristive neural network (CCMNN), and achieving its effective control. The model adopts a central loop topology and exhibits a variety of complex dynamic behaviors such as chaos, bifurcation, and homogeneous and heterogeneous coexisting attractors. The complex dynamic behaviors of the CCMNN are investigated in depth numerically by equilibrium point stability analysis as well as phase trajectory maps, bifurcation maps, time-domain maps, and LEs. It is found that with the variation of the internal parameters of the memristor, asymmetric heterogeneous attractor coexistence phenomena appear under different initial conditions, including the multi-stable coexistence behaviors of periodic-periodic, periodic-stable point, periodic-chaotic, and stable point-chaotic. In addition, by adjusting the structural parameters, a wide range of amplitude control can be realized without changing the chaotic state of the system. Finally, based on the CCMNN model, an adaptive synchronization controller is designed to achieve finite-time synchronization control, and its application prospect in simple secure communication is discussed. A microcontroller-based hardware circuit and NIST test are conducted to verify the correctness of the numerical results and theoretical analysis.},
}
@article {pmid38837943,
year = {2024},
author = {Oliva, A and Kaphle, A and Reguant, R and Sng, LMF and Twine, NA and Malakar, Y and Wickramarachchi, A and Keller, M and Ranbaduge, T and Chan, EKF and Breen, J and Buckberry, S and Guennewig, B and Haas, M and Brown, A and Cowley, MJ and Thorne, N and Jain, Y and Bauer, DC},
title = {Future-proofing genomic data and consent management: a comprehensive review of technology innovations.},
journal = {GigaScience},
volume = {13},
number = {},
pages = {},
doi = {10.1093/gigascience/giae021},
pmid = {38837943},
issn = {2047-217X},
mesh = {Humans ; *Genomics/methods/ethics ; Computer Security ; Cloud Computing ; Informed Consent ; },
abstract = {Genomic information is increasingly used to inform medical treatments and manage future disease risks. However, any personal and societal gains must be carefully balanced against the risk to individuals contributing their genomic data. Expanding our understanding of actionable genomic insights requires researchers to access large global datasets to capture the complexity of genomic contribution to diseases. Similarly, clinicians need efficient access to a patient's genome as well as population-representative historical records for evidence-based decisions. Both researchers and clinicians hence rely on participants to consent to the use of their genomic data, which in turn requires trust in the professional and ethical handling of this information. Here, we review existing and emerging solutions for secure and effective genomic information management, including storage, encryption, consent, and authorization that are needed to build participant trust. We discuss recent innovations in cloud computing, quantum-computing-proof encryption, and self-sovereign identity. These innovations can augment key developments from within the genomics community, notably GA4GH Passports and the Crypt4GH file container standard. We also explore how decentralized storage as well as the digital consenting process can offer culturally acceptable processes to encourage data contributions from ethnic minorities. We conclude that the individual and their right for self-determination needs to be put at the center of any genomics framework, because only on an individual level can the received benefits be accurately balanced against the risk of exposing private information.},
}
@article {pmid38834903,
year = {2024},
author = {Peter, R and Moreira, S and Tagliabue, E and Hillenbrand, M and Nunes, RG and Mathis-Ullrich, F},
title = {Stereo reconstruction from microscopic images for computer-assisted ophthalmic surgery.},
journal = {International journal of computer assisted radiology and surgery},
volume = {},
number = {},
pages = {},
pmid = {38834903},
issn = {1861-6429},
abstract = {PURPOSE: This work presents a novel platform for stereo reconstruction in anterior segment ophthalmic surgery to enable enhanced scene understanding, especially depth perception, for advanced computer-assisted eye surgery by effectively addressing the lack of texture and corneal distortions artifacts in the surgical scene.
METHODS: The proposed platform for stereo reconstruction uses a two-step approach: generating a sparse 3D point cloud from microscopic images, deriving a dense 3D representation by fitting surfaces onto the point cloud, and considering geometrical priors of the eye anatomy. We incorporate a pre-processing step to rectify distortion artifacts induced by the cornea's high refractive power, achieved by aligning a 3D phenotypical cornea geometry model to the images and computing a distortion map using ray tracing.
RESULTS: The accuracy of 3D reconstruction is evaluated on stereo microscopic images of ex vivo porcine eyes, rigid phantom eyes, and synthetic photo-realistic images. The results demonstrate the potential of the proposed platform to enhance scene understanding via an accurate 3D representation of the eye and enable the estimation of instrument to layer distances in porcine eyes with a mean average error of 190 μ m , comparable to the scale of surgeons' hand tremor.
CONCLUSION: This work marks a significant advancement in stereo reconstruction for ophthalmic surgery by addressing corneal distortions, a previously often overlooked aspect in such surgical scenarios. This could improve surgical outcomes by allowing for intra-operative computer assistance, e.g., in the form of virtual distance sensors.},
}
@article {pmid38833448,
year = {2024},
author = {H S, M and Gupta, P},
title = {Federated learning inspired Antlion based orchestration for Edge computing environment.},
journal = {PloS one},
volume = {19},
number = {6},
pages = {e0304067},
doi = {10.1371/journal.pone.0304067},
pmid = {38833448},
issn = {1932-6203},
mesh = {*Neural Networks, Computer ; *Algorithms ; Fuzzy Logic ; Internet of Things ; Cloud Computing ; },
abstract = {Edge computing is a scalable, modern, and distributed computing architecture that brings computational workloads closer to smart gateways or Edge devices. This computing model delivers IoT (Internet of Things) computations and processes the IoT requests from the Edge of the network. In a diverse and independent environment like Fog-Edge, resource management is a critical issue. Hence, scheduling is a vital process to enhance efficiency and allocation of resources properly to the tasks. The manuscript proposes an Artificial Neural Network (ANN) inspired Antlion algorithm for task orchestration Edge environments. Its aim is to enhance resource utilization and reduce energy consumption. Comparative analysis with different algorithms shows that the proposed algorithm balances the load on the Edge layer, which results in lower load on the cloud, improves power consumption, CPU utilization, network utilization, and reduces average waiting time for requests. The proposed model is tested for healthcare application in Edge computing environment. The evaluation shows that the proposed algorithm outperforms existing fuzzy logic algorithms. The performance of the ANN inspired Antlion based orchestration approach is evaluated using performance metrics, power consumption, CPU utilization, network utilization, and average waiting time for requests respectively. It outperforms the existing fuzzy logic, round robin algorithm. The proposed technique achieves an average cloud energy consumption improvement of 95.94%, and average Edge energy consumption improvement of 16.79%, 19.85% in average CPU utilization in Edge computing environment, 10.64% in average CPU utilization in cloud environment, and 23.33% in average network utilization, and the average waiting time decreases by 96% compared to fuzzy logic and 1.4% compared to round-robin respectively.},
}
@article {pmid38832828,
year = {2024},
author = {Herre, C and Ho, A and Eisenbraun, B and Vincent, J and Nicholson, T and Boutsioukis, G and Meyer, PA and Ottaviano, M and Krause, KL and Key, J and Sliz, P},
title = {Introduction of the Capsules environment to support further growth of the SBGrid structural biology software collection.},
journal = {Acta crystallographica. Section D, Structural biology},
volume = {},
number = {},
pages = {},
doi = {10.1107/S2059798324004881},
pmid = {38832828},
issn = {2059-7983},
support = {1R25GM151273//National Institutes of Health, National Institute of General Medical Sciences/ ; 21-UOO-003-CSG//Royal Society Te Apārangi/ ; },
abstract = {The expansive scientific software ecosystem, characterized by millions of titles across various platforms and formats, poses significant challenges in maintaining reproducibility and provenance in scientific research. The diversity of independently developed applications, evolving versions and heterogeneous components highlights the need for rigorous methodologies to navigate these complexities. In response to these challenges, the SBGrid team builds, installs and configures over 530 specialized software applications for use in the on-premises and cloud-based computing environments of SBGrid Consortium members. To address the intricacies of supporting this diverse application collection, the team has developed the Capsule Software Execution Environment, generally referred to as Capsules. Capsules rely on a collection of programmatically generated bash scripts that work together to isolate the runtime environment of one application from all other applications, thereby providing a transparent cross-platform solution without requiring specialized tools or elevated account privileges for researchers. Capsules facilitate modular, secure software distribution while maintaining a centralized, conflict-free environment. The SBGrid platform, which combines Capsules with the SBGrid collection of structural biology applications, aligns with FAIR goals by enhancing the findability, accessibility, interoperability and reusability of scientific software, ensuring seamless functionality across diverse computing environments. Its adaptability enables application beyond structural biology into other scientific fields.},
}
@article {pmid38829364,
year = {2024},
author = {Rathinam, R and Sivakumar, P and Sigamani, S and Kothandaraman, I},
title = {SJFO: Sail Jelly Fish Optimization enabled VM migration with DRNN-based prediction for load balancing in cloud computing.},
journal = {Network (Bristol, England)},
volume = {},
number = {},
pages = {1-26},
doi = {10.1080/0954898X.2024.2359609},
pmid = {38829364},
issn = {1361-6536},
abstract = {The dynamic workload is evenly distributed among all nodes using balancing methods like hosts or VMs. Load Balancing as a Service (LBaaS) is another name for load balancing in the cloud. In this research work, the load is balanced by the application of Virtual Machine (VM) migration carried out by proposed Sail Jelly Fish Optimization (SJFO). The SJFO is formed by combining Sail Fish Optimizer (SFO) and Jellyfish Search (JS) optimizer. In the Cloud model, many Physical Machines (PMs) are present, where these PMs are comprised of many VMs. Each VM has many tasks, and these tasks depend on various parameters like Central Processing Unit (CPU), memory, Million Instructions per Second (MIPS), capacity, total number of processing entities, as well as bandwidth. Here, the load is predicted by Deep Recurrent Neural Network (DRNN) and this predicted load is compared with a threshold value, where VM migration is done based on predicted values. Furthermore, the performance of SJFO-VM is analysed using the metrics like capacity, load, and resource utilization. The proposed method shows better performance with a superior capacity of 0.598, an inferior load of 0.089, and an inferior resource utilization of 0.257.},
}
@article {pmid38828387,
year = {2024},
author = {McCormick, I and Butcher, R and Ramke, J and Bolster, NM and Limburg, H and Chroston, H and Bastawrous, A and Burton, MJ and Mactaggart, I},
title = {The Rapid Assessment of Avoidable Blindness survey: Review of the methodology and protocol for the seventh version (RAAB7).},
journal = {Wellcome open research},
volume = {9},
number = {},
pages = {133},
pmid = {38828387},
issn = {2398-502X},
abstract = {The Rapid Assessment of Avoidable Blindness (RAAB) is a population-based cross-sectional survey methodology used to collect data on the prevalence of vision impairment and its causes and eye care service indicators among the population 50 years and older. RAAB has been used for over 20 years with modifications to the protocol over time reflected in changing version numbers; this paper describes the latest version of the methodology-RAAB7. RAAB7 is a collaborative project between the International Centre for Eye Health and Peek Vision with guidance from a steering group of global eye health stakeholders. We have fully digitised RAAB, allowing for fast, accurate and secure data collection. A bespoke Android mobile application automatically synchronises data to a secure Amazon Web Services virtual private cloud when devices are online so users can monitor data collection in real-time. Vision is screened using Peek Vision's digital visual acuity test for mobile devices and uncorrected, corrected and pinhole visual acuity are collected. An optional module on Disability is available. We have rebuilt the RAAB data repository as the end point of RAAB7's digital data workflow, including a front-end website to access the past 20 years of RAAB surveys worldwide. This website (https://www.raab.world) hosts open access RAAB data to support the advocacy and research efforts of the global eye health community. Active research sub-projects are finalising three new components in 2024-2025: 1) Near vision screening to address data gaps on near vision impairment and effective refractive error coverage; 2) an optional Health Economics module to assess the affordability of eye care services and productivity losses associated with vision impairment; 3) an optional Health Systems data collection module to support RAAB's primary aim to inform eye health service planning by supporting users to integrate eye care facility data with population data.},
}
@article {pmid38828338,
year = {2024},
author = {Zhu, X and Peng, X},
title = {Strategic assessment model of smart stadiums based on genetic algorithms and literature visualization analysis: A case study from Chengdu, China.},
journal = {Heliyon},
volume = {10},
number = {11},
pages = {e31759},
pmid = {38828338},
issn = {2405-8440},
abstract = {This paper leverages Citespace and VOSviewer software to perform a comprehensive bibliometric analysis on a corpus of 384 references related to smart sports venues, spanning from 1998 to 2022. The analysis encompasses various facets, including author network analysis, institutional network analysis, temporal mapping, keyword clustering, and co-citation network analysis. Moreover, this paper constructs a smart stadiums strategic assessment model (SSSAM) to compensate for confusion and aimlessness by genetic algorithms (GA). Our findings indicate an exponential growth in publications on smart sports venues year over year. Arizona State University emerges as the institution with the highest number of collaborative publications, Energy and Buildings becomes the publication with the most documents. While, Wang X stands out as the scholar with the most substantial contribution to the field. In scrutinizing the betweenness centrality indicators, a paradigm shift in research hotspots becomes evident-from intelligent software to the domains of the Internet of Things (IoT), intelligent services, and artificial intelligence (AI). The SSSAM model based on artificial neural networks (ANN) and GA algorithms also reached similar conclusions through a case study of the International University Sports Federation (FISU), building Information Modeling (BIM), cloud computing and artificial intelligence Internet of Things (AIoT) are expected to develop in the future. Three key themes developed over time. Finally, a comprehensive knowledge system with common references and future hot spots is proposed.},
}
@article {pmid38827487,
year = {2024},
author = {Nisanova, A and Yavary, A and Deaner, J and Ali, FS and Gogte, P and Kaplan, R and Chen, KC and Nudleman, E and Grewal, D and Gupta, M and Wolfe, J and Klufas, M and Yiu, G and Soltani, I and Emami-Naeini, P},
title = {Performance of Automated Machine Learning in Predicting Outcomes of Pneumatic Retinopexy.},
journal = {Ophthalmology science},
volume = {4},
number = {5},
pages = {100470},
pmid = {38827487},
issn = {2666-9145},
abstract = {PURPOSE: Automated machine learning (AutoML) has emerged as a novel tool for medical professionals lacking coding experience, enabling them to develop predictive models for treatment outcomes. This study evaluated the performance of AutoML tools in developing models predicting the success of pneumatic retinopexy (PR) in treatment of rhegmatogenous retinal detachment (RRD). These models were then compared with custom models created by machine learning (ML) experts.
DESIGN: Retrospective multicenter study.
PARTICIPANTS: Five hundred and thirty nine consecutive patients with primary RRD that underwent PR by a vitreoretinal fellow at 6 training hospitals between 2002 and 2022.
METHODS: We used 2 AutoML platforms: MATLAB Classification Learner and Google Cloud AutoML. Additional models were developed by computer scientists. We included patient demographics and baseline characteristics, including lens and macula status, RRD size, number and location of breaks, presence of vitreous hemorrhage and lattice degeneration, and physicians' experience. The dataset was split into a training (n = 483) and test set (n = 56). The training set, with a 2:1 success-to-failure ratio, was used to train the MATLAB models. Because Google Cloud AutoML requires a minimum of 1000 samples, the training set was tripled to create a new set with 1449 datapoints. Additionally, balanced datasets with a 1:1 success-to-failure ratio were created using Python.
MAIN OUTCOME MEASURES: Single-procedure anatomic success rate, as predicted by the ML models. F2 scores and area under the receiver operating curve (AUROC) were used as primary metrics to compare models.
RESULTS: The best performing AutoML model (F2 score: 0.85; AUROC: 0.90; MATLAB), showed comparable performance to the custom model (0.92, 0.86) when trained on the balanced datasets. However, training the AutoML model with imbalanced data yielded misleadingly high AUROC (0.81) despite low F2-score (0.2) and sensitivity (0.17).
CONCLUSIONS: We demonstrated the feasibility of using AutoML as an accessible tool for medical professionals to develop models from clinical data. Such models can ultimately aid in the clinical decision-making, contributing to better patient outcomes. However, outcomes can be misleading or unreliable if used naively. Limitations exist, particularly if datasets contain missing variables or are highly imbalanced. Proper model selection and data preprocessing can improve the reliability of AutoML tools.
FINANCIAL DISCLOSURES: Proprietary or commercial disclosure may be found in the Footnotes and Disclosures at the end of this article.},
}
@article {pmid38826407,
year = {2024},
author = {Rodriguez, A and Kim, Y and Nandi, TN and Keat, K and Kumar, R and Bhukar, R and Conery, M and Liu, M and Hessington, J and Maheshwari, K and Schmidt, D and , and Begoli, E and Tourassi, G and Muralidhar, S and Natarajan, P and Voight, BF and Cho, K and Gaziano, JM and Damrauer, SM and Liao, KP and Zhou, W and Huffman, JE and Verma, A and Madduri, RK},
title = {Accelerating Genome- and Phenome-Wide Association Studies using GPUs - A case study using data from the Million Veteran Program.},
journal = {bioRxiv : the preprint server for biology},
volume = {},
number = {},
pages = {},
doi = {10.1101/2024.05.17.594583},
pmid = {38826407},
abstract = {The expansion of biobanks has significantly propelled genomic discoveries yet the sheer scale of data within these repositories poses formidable computational hurdles, particularly in handling extensive matrix operations required by prevailing statistical frameworks. In this work, we introduce computational optimizations to the SAIGE (Scalable and Accurate Implementation of Generalized Mixed Model) algorithm, notably employing a GPU-based distributed computing approach to tackle these challenges. We applied these optimizations to conduct a large-scale genome-wide association study (GWAS) across 2,068 phenotypes derived from electronic health records of 635,969 diverse participants from the Veterans Affairs (VA) Million Veteran Program (MVP). Our strategies enabled scaling up the analysis to over 6,000 nodes on the Department of Energy (DOE) Oak Ridge Leadership Computing Facility (OLCF) Summit High-Performance Computer (HPC), resulting in a 20-fold acceleration compared to the baseline model. We also provide a Docker container with our optimizations that was successfully used on multiple cloud infrastructures on UK Biobank and All of Us datasets where we showed significant time and cost benefits over the baseline SAIGE model.},
}
@article {pmid38826171,
year = {2024},
author = {Lowndes, JS and Holder, AM and Markowitz, EH and Clatterbuck, C and Bradford, AL and Doering, K and Stevens, MH and Butland, S and Burke, D and Kross, S and Hollister, JW and Stawitz, C and Siple, MC and Rios, A and Welch, JN and Li, B and Nojavan, F and Davis, A and Steiner, E and London, JM and Fenwick, I and Hunzinger, A and Verstaen, J and Holmes, E and Virdi, M and Barrett, AP and Robinson, E},
title = {Shifting institutional culture to develop climate solutions with Open Science.},
journal = {Ecology and evolution},
volume = {14},
number = {6},
pages = {e11341},
pmid = {38826171},
issn = {2045-7758},
abstract = {To address our climate emergency, "we must rapidly, radically reshape society"-Johnson & Wilkinson, All We Can Save. In science, reshaping requires formidable technical (cloud, coding, reproducibility) and cultural shifts (mindsets, hybrid collaboration, inclusion). We are a group of cross-government and academic scientists that are exploring better ways of working and not being too entrenched in our bureaucracies to do better science, support colleagues, and change the culture at our organizations. We share much-needed success stories and action for what we can all do to reshape science as part of the Open Science movement and 2023 Year of Open Science.},
}
@article {pmid38813089,
year = {2024},
author = {Mimar, S and Paul, AS and Lucarelli, N and Border, S and Naglah, A and Barisoni, L and Hodgin, J and Rosenberg, AZ and Clapp, W and Sarder, P},
title = {ComPRePS: An Automated Cloud-based Image Analysis tool to democratize AI in Digital Pathology.},
journal = {Proceedings of SPIE--the International Society for Optical Engineering},
volume = {12933},
number = {},
pages = {},
pmid = {38813089},
issn = {0277-786X},
abstract = {Artificial intelligence (AI) has extensive applications in a wide range of disciplines including healthcare and clinical practice. Advances in high-resolution whole-slide brightfield microscopy allow for the digitization of histologically stained tissue sections, producing gigapixel-scale whole-slide images (WSI). The significant improvement in computing and revolution of deep neural network (DNN)-based AI technologies over the last decade allow us to integrate massively parallelized computational power, cutting-edge AI algorithms, and big data storage, management, and processing. Applied to WSIs, AI has created opportunities for improved disease diagnostics and prognostics with the ultimate goal of enhancing precision medicine and resulting patient care. The National Institutes of Health (NIH) has recognized the importance of developing standardized principles for data management and discovery for the advancement of science and proposed the Findable, Accessible, Interoperable, Reusable, (FAIR) Data Principles[1] with the goal of building a modernized biomedical data resource ecosystem to establish collaborative research communities. In line with this mission and to democratize AI-based image analysis in digital pathology, we propose ComPRePS: an end-to-end automated Computational Renal Pathology Suite which combines massive scalability, on-demand cloud computing, and an easy-to-use web-based user interface for data upload, storage, management, slide-level visualization, and domain expert interaction. Moreover, our platform is equipped with both in-house and collaborator developed sophisticated AI algorithms in the back-end server for image analysis to identify clinically relevant micro-anatomic functional tissue units (FTU) and to extract image features.},
}
@article {pmid38810758,
year = {2024},
author = {Yu, J and Nie, S and Liu, W and Zhu, X and Sun, Z and Li, J and Wang, C and Xi, X and Fan, H},
title = {Mapping global mangrove canopy height by integrating Ice, Cloud, and Land Elevation Satellite-2 photon-counting LiDAR data with multi-source images.},
journal = {The Science of the total environment},
volume = {},
number = {},
pages = {173487},
doi = {10.1016/j.scitotenv.2024.173487},
pmid = {38810758},
issn = {1879-1026},
abstract = {Large-scale and precise measurement of mangrove canopy height is crucial for understanding and evaluating wetland ecosystems' condition, health, and productivity. This study generates a global mangrove canopy height map with a 30 m resolution by integrating Ice, Cloud, and Land Elevation Satellite-2 (ICESat-2) photon-counting light detection and ranging (LiDAR) data with multi-source imagery. Initially, high-quality mangrove canopy height samples were extracted using meticulous processing and filtering of ICESat-2 data. Subsequently, mangrove canopy height models were established using the random forest (RF) algorithm, incorporating ICESat-2 canopy height samples, Sentinel-2 data, TanDEM-X DEM data and WorldClim data. Furthermore, a global 30 m mangrove canopy height map was generated utilizing the Google Earth Engine platform. Finally, the global map's accuracy was evaluated by comparing it with reference canopy heights derived from both space-borne and airborne LiDAR data. Results indicate that the global 30 m resolution mangrove height map was found to be consistent with canopy heights obtained from space-borne (r = 0.88, Bisa = -0.07 m, RMSE = 3.66 m, RMSE% = 29.86 %) and airborne LiDAR (r = 0.52, Bisa = -1.08 m, RMSE = 3.39 m, RMSE% = 39.05 %). Additionally, our findings reveal that mangroves worldwide exhibit an average height of 12.65 m, with the tallest mangrove reaching a height of 44.94 m. These results demonstrate the feasibility and effectiveness of using ICESat-2 data integrated with multi-source imagery to generate a global mangrove canopy height map. This dataset offers reliable information that can significantly support government and organizational efforts to protect and conserve mangrove ecosystems.},
}
@article {pmid38798429,
year = {2024},
author = {Oh, S and Gravel-Pucillo, K and Ramos, M and Davis, S and Carey, V and Morgan, M and Waldron, L},
title = {AnVILWorkflow: A runnable workflow package for Cloud-implemented bioinformatics analysis pipelines.},
journal = {Research square},
volume = {},
number = {},
pages = {},
doi = {10.21203/rs.3.rs-4370115/v1},
pmid = {38798429},
abstract = {Advancements in sequencing technologies and the development of new data collection methods produce large volumes of biological data. The Genomic Data Science Analysis, Visualization, and Informatics Lab-space (AnVIL) provides a cloud-based platform for democratizing access to large-scale genomics data and analysis tools. However, utilizing the full capabilities of AnVIL can be challenging for researchers without extensive bioinformatics expertise, especially for executing complex workflows. Here we present the AnVILWorkflow R package, which enables the convenient execution of bioinformatics workflows hosted on AnVIL directly from an R environment. AnVILWorkflowsimplifies the setup of the cloud computing environment, input data formatting, workflow submission, and retrieval of results through intuitive functions. We demonstrate the utility of AnVILWorkflowfor three use cases: bulk RNA-seq analysis with Salmon, metagenomics analysis with bioBakery, and digital pathology image processing with PathML. The key features of AnVILWorkflow include user-friendly browsing of available data and workflows, seamless integration of R and non-R tools within a reproducible analysis pipeline, and accessibility to scalable computing resources without direct management overhead. While some limitations exist around workflow customization, AnVILWorkflowlowers the barrier to taking advantage of AnVIL's resources, especially for exploratory analyses or bulk processing with established workflows. This empowers a broader community of researchers to leverage the latest genomics tools and datasets using familiar R syntax. This package is distributed through the Bioconductor project (https://bioconductor.org/packages/AnVILWorkflow), and the source code is available through GitHub (https://github.com/shbrief/AnVILWorkflow).},
}
@article {pmid38797827,
year = {2024},
author = {Alrashdi, I},
title = {Fog-based deep learning framework for real-time pandemic screening in smart cities from multi-site tomographies.},
journal = {BMC medical imaging},
volume = {24},
number = {1},
pages = {123},
pmid = {38797827},
issn = {1471-2342},
support = {DGSSR-2023-02-02058//Graduate Studies and Scientific Research at Jouf University/ ; },
mesh = {Humans ; *Deep Learning ; *COVID-19 ; *Tomography, X-Ray Computed/methods ; *Pandemics ; SARS-CoV-2 ; Cities ; Internet of Things ; },
abstract = {The quick proliferation of pandemic diseases has been imposing many concerns on the international health infrastructure. To combat pandemic diseases in smart cities, Artificial Intelligence of Things (AIoT) technology, based on the integration of artificial intelligence (AI) with the Internet of Things (IoT), is commonly used to promote efficient control and diagnosis during the outbreak, thereby minimizing possible losses. However, the presence of multi-source institutional data remains one of the major challenges hindering the practical usage of AIoT solutions for pandemic disease diagnosis. This paper presents a novel framework that utilizes multi-site data fusion to boost the accurateness of pandemic disease diagnosis. In particular, we focus on a case study of COVID-19 lesion segmentation, a crucial task for understanding disease progression and optimizing treatment strategies. In this study, we propose a novel multi-decoder segmentation network for efficient segmentation of infections from cross-domain CT scans in smart cities. The multi-decoder segmentation network leverages data from heterogeneous domains and utilizes strong learning representations to accurately segment infections. Performance evaluation of the multi-decoder segmentation network was conducted on three publicly accessible datasets, demonstrating robust results with an average dice score of 89.9% and an average surface dice of 86.87%. To address scalability and latency issues associated with centralized cloud systems, fog computing (FC) emerges as a viable solution. FC brings resources closer to the operator, offering low latency and energy-efficient data management and processing. In this context, we propose a unique FC technique called PANDFOG to deploy the multi-decoder segmentation network on edge nodes for practical and clinical applications of automated COVID-19 pneumonia analysis. The results of this study highlight the efficacy of the multi-decoder segmentation network in accurately segmenting infections from cross-domain CT scans. Moreover, the proposed PANDFOG system demonstrates the practical deployment of the multi-decoder segmentation network on edge nodes, providing real-time access to COVID-19 segmentation findings for improved patient monitoring and clinical decision-making.},
}
@article {pmid38794890,
year = {2024},
author = {de Azevedo Soares Dos Santos, HC and Rodrigues Cintra Armellini, B and Naves, GL and Bueris, V and Moreno, ACR and de Cássia Café Ferreira, R},
title = {Using "adopt a bacterium" as an E-learning tool for simultaneously teaching microbiology to different health-related university courses.},
journal = {FEMS microbiology letters},
volume = {},
number = {},
pages = {},
doi = {10.1093/femsle/fnae033},
pmid = {38794890},
issn = {1574-6968},
abstract = {The COVID-19 pandemic has posed challenges for education, particularly in undergraduate teaching. In this study, we report on the experience of how a private university successfully addressed this challenge through an active methodology applied to a microbiology discipline offered remotely to students from various health-related courses (veterinary, physiotherapy, nursing, biomedicine, and nutrition). Remote teaching was combined with the 'Adopt a Bacterium' methodology, implemented for the first time on Google Sites. The distance learning activity notably improved student participation in microbiology discussions, both through word cloud analysis and the richness of discourse measured by the Shannon index. Furthermore, feedback from students about the e-learning approach was highly positive, indicating its effectiveness in motivating and involving students in the learning process. The results also demonstrate that despite being offered simultaneously to students, the methodology allowed for the acquisition of specialized knowledge within each course and sparked student interest in various aspects of microbiology. In conclusion, the remote 'Adopt a Bacterium' methodology facilitated knowledge sharing among undergraduate students from different health-related courses and represented a valuable resource in distance microbiology education.},
}
@article {pmid38794107,
year = {2024},
author = {Shaghaghi, N and Fazlollahi, F and Shrivastav, T and Graham, A and Mayer, J and Liu, B and Jiang, G and Govindaraju, N and Garg, S and Dunigan, K and Ferguson, P},
title = {DOxy: A Dissolved Oxygen Monitoring System.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {10},
pages = {},
doi = {10.3390/s24103253},
pmid = {38794107},
issn = {1424-8220},
abstract = {Dissolved Oxygen (DO) in water enables marine life. Measuring the prevalence of DO in a body of water is an important part of sustainability efforts because low oxygen levels are a primary indicator of contamination and distress in bodies of water. Therefore, aquariums and aquaculture of all types are in need of near real-time dissolved oxygen monitoring and spend a lot of money on purchasing and maintaining DO meters that are either expensive, inefficient, or manually operated-in which case they also need to ensure that manual readings are taken frequently which is time consuming. Hence a cost-effective and sustainable automated Internet of Things (IoT) system for this task is necessary and long overdue. DOxy, is such an IoT system under research and development at Santa Clara University's Ethical, Pragmatic, and Intelligent Computing (EPIC) Laboratory which utilizes cost-effective, accessible, and sustainable Sensing Units (SUs) for measuring the dissolved oxygen levels present in bodies of water which send their readings to a web based cloud infrastructure for storage, analysis, and visualization. DOxy's SUs are equipped with a High-sensitivity Pulse Oximeter meant for measuring dissolved oxygen levels in human blood, not water. Hence a number of parallel readings of water samples were gathered by both the High-sensitivity Pulse Oximeter and a standard dissolved oxygen meter. Then, two approaches for relating the readings were investigated. In the first, various machine learning models were trained and tested to produce a dynamic mapping of sensor readings to actual DO values. In the second, curve-fitting models were used to produce a successful conversion formula usable in the DOxy SUs offline. Both proved successful in producing accurate results.},
}
@article {pmid38794080,
year = {2024},
author = {Kitsiou, A and Sideri, M and Pantelelis, M and Simou, S and Mavroeidi, AG and Vgena, K and Tzortzaki, E and Kalloniatis, C},
title = {Specification of Self-Adaptive Privacy-Related Requirements within Cloud Computing Environments (CCE).},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {10},
pages = {},
doi = {10.3390/s24103227},
pmid = {38794080},
issn = {1424-8220},
support = {2550//Hellenic Foundation for Research and Innovation/ ; },
abstract = {This paper presents a novel approach to address the challenges of self-adaptive privacy in cloud computing environments (CCE). Under the Cloud-InSPiRe project, the aim is to provide an interdisciplinary framework and a beta-version tool for self-adaptive privacy design, effectively focusing on the integration of technical measures with social needs. To address that, a pilot taxonomy that aligns technical, infrastructural, and social requirements is proposed after two supplementary surveys that have been conducted, focusing on users' privacy needs and developers' perspectives on self-adaptive privacy. Through the integration of users' social identity-based practices and developers' insights, the taxonomy aims to provide clear guidance for developers, ensuring compliance with regulatory standards and fostering a user-centric approach to self-adaptive privacy design tailored to diverse user groups, ultimately enhancing satisfaction and confidence in cloud services.},
}
@article {pmid38794042,
year = {2024},
author = {Zimmerleiter, R and Greibl, W and Meininger, G and Duswald, K and Hannesschläger, G and Gattinger, P and Rohm, M and Fuczik, C and Holzer, R and Brandstetter, M},
title = {Sensor for Rapid In-Field Classification of Cannabis Samples Based on Near-Infrared Spectroscopy.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {10},
pages = {},
doi = {10.3390/s24103188},
pmid = {38794042},
issn = {1424-8220},
support = {FO999886335//Austrian Research Promotion Agency/ ; },
mesh = {*Cannabis/chemistry/classification ; *Spectroscopy, Near-Infrared/methods ; Discriminant Analysis ; Least-Squares Analysis ; Humans ; Dronabinol/analysis ; },
abstract = {A rugged handheld sensor for rapid in-field classification of cannabis samples based on their THC content using ultra-compact near-infrared spectrometer technology is presented. The device is designed for use by the Austrian authorities to discriminate between legal and illegal cannabis samples directly at the place of intervention. Hence, the sensor allows direct measurement through commonly encountered transparent plastic packaging made from polypropylene or polyethylene without any sample preparation. The measurement time is below 20 s. Measured spectral data are evaluated using partial least squares discriminant analysis directly on the device's hardware, eliminating the need for internet connectivity for cloud computing. The classification result is visually indicated directly on the sensor via a colored LED. Validation of the sensor is performed on an independent data set acquired by non-expert users after a short introduction. Despite the challenging setting, the achieved classification accuracy is higher than 80%. Therefore, the handheld sensor has the potential to reduce the number of unnecessarily confiscated legal cannabis samples, which would lead to significant monetary savings for the authorities.},
}
@article {pmid38794035,
year = {2024},
author = {Lin, J and Guan, Y},
title = {Load Prediction in Double-Channel Residual Self-Attention Temporal Convolutional Network with Weight Adaptive Updating in Cloud Computing.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {10},
pages = {},
doi = {10.3390/s24103181},
pmid = {38794035},
issn = {1424-8220},
abstract = {When resource demand increases and decreases rapidly, container clusters in the cloud environment need to respond to the number of containers in a timely manner to ensure service quality. Resource load prediction is a prominent challenge issue with the widespread adoption of cloud computing. A novel cloud computing load prediction method has been proposed, the Double-channel residual Self-attention Temporal convolutional Network with Weight adaptive updating (DSTNW), in order to make the response of the container cluster more rapid and accurate. A Double-channel Temporal Convolution Network model (DTN) has been developed to capture long-term sequence dependencies and enhance feature extraction capabilities when the model handles long load sequences. Double-channel dilated causal convolution has been adopted to replace the single-channel dilated causal convolution in the DTN. A residual temporal self-attention mechanism (SM) has been proposed to improve the performance of the network and focus on features with significant contributions from the DTN. DTN and SM jointly constitute a dual-channel residual self-attention temporal convolutional network (DSTN). In addition, by evaluating the accuracy aspects of single and stacked DSTNs, an adaptive weight strategy has been proposed to assign corresponding weights for the single and stacked DSTNs, respectively. The experimental results highlight that the developed method has outstanding prediction performance for cloud computing in comparison with some state-of-the-art methods. The proposed method achieved an average improvement of 24.16% and 30.48% on the Container dataset and Google dataset, respectively.},
}
@article {pmid38794018,
year = {2024},
author = {Xie, Y and Meng, X and Nguyen, DT and Xiang, Z and Ye, G and Hu, L},
title = {A Discussion of Building a Smart SHM Platform for Long-Span Bridge Monitoring.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {10},
pages = {},
doi = {10.3390/s24103163},
pmid = {38794018},
issn = {1424-8220},
support = {4000108996/13/NL/US//European Space Agency/ ; 4000116646/16/NL/US//European Space Agency/ ; },
abstract = {This paper explores the development of a smart Structural Health Monitoring (SHM) platform tailored for long-span bridge monitoring, using the Forth Road Bridge (FRB) as a case study. It discusses the selection of smart sensors available for real-time monitoring, the formulation of an effective data strategy encompassing the collection, processing, management, analysis, and visualization of monitoring data sets to support decision-making, and the establishment of a cost-effective and intelligent sensor network aligned with the objectives set through comprehensive communication with asset owners. Due to the high data rates and dense sensor installations, conventional processing techniques are inadequate for fulfilling monitoring functionalities and ensuring security. Cloud-computing emerges as a widely adopted solution for processing and storing vast monitoring data sets. Drawing from the authors' experience in implementing long-span bridge monitoring systems in the UK and China, this paper compares the advantages and limitations of employing cloud- computing for long-span bridge monitoring. Furthermore, it explores strategies for developing a robust data strategy and leveraging artificial intelligence (AI) and digital twin (DT) technologies to extract relevant information or patterns regarding asset health conditions. This information is then visualized through the interaction between physical and virtual worlds, facilitating timely and informed decision-making in managing critical road transport infrastructure.},
}
@article {pmid38786560,
year = {2024},
author = {Peralta, T and Menoscal, M and Bravo, G and Rosado, V and Vaca, V and Capa, D and Mulas, M and Jordá-Bordehore, L},
title = {Rock Slope Stability Analysis Using Terrestrial Photogrammetry and Virtual Reality on Ignimbritic Deposits.},
journal = {Journal of imaging},
volume = {10},
number = {5},
pages = {},
doi = {10.3390/jimaging10050106},
pmid = {38786560},
issn = {2313-433X},
abstract = {Puerto de Cajas serves as a vital high-altitude passage in Ecuador, connecting the coastal region to the city of Cuenca. The stability of this rocky massif is carefully managed through the assessment of blocks and discontinuities, ensuring safe travel. This study presents a novel approach, employing rapid and cost-effective methods to evaluate an unexplored area within the protected expanse of Cajas. Using terrestrial photogrammetry and strategically positioned geomechanical stations along the slopes, we generated a detailed point cloud capturing elusive terrain features. We have used terrestrial photogrammetry for digitalization of the slope. Validation of the collected data was achieved by comparing directional data from Cloud Compare software with manual readings using a digital compass integrated in a phone at control points. The analysis encompasses three slopes, employing the SMR, Q-slope, and kinematic methodologies. Results from the SMR system closely align with kinematic analysis, indicating satisfactory slope quality. Nonetheless, continued vigilance in stability control remains imperative for ensuring road safety and preserving the site's integrity. Moreover, this research lays the groundwork for the creation of a publicly accessible 3D repository, enhancing visualization capabilities through Google Virtual Reality. This initiative not only aids in replicating the findings but also facilitates access to an augmented reality environment, thereby fostering collaborative research endeavors.},
}
@article {pmid38776642,
year = {2024},
author = {Li, X and Zhao, P and Liang, M and Ji, X and Zhang, D and Xie, Z},
title = {Dynamics changes of coastal aquaculture ponds based on the Google Earth Engine in Jiangsu Province, China.},
journal = {Marine pollution bulletin},
volume = {203},
number = {},
pages = {116502},
doi = {10.1016/j.marpolbul.2024.116502},
pmid = {38776642},
issn = {1879-3363},
abstract = {Monitoring the spatiotemporal variation in coastal aquaculture zones is essential to providing a scientific basis for formulating scientifically reasonable land management policies. This study uses the Google Earth Engine (GEE) remote sensing cloud platform to extract aquaculture information based on Landsat series and Sentinel-2 images for the six years of 1984 to 2021 (1984, 1990, 2000, 2010, 2016 and 2021), so as to analyze the changes in the coastal aquaculture pond area, along with its spatiotemporal characteristics, of Jiangsu Province. The overall area of coastal aquaculture ponds in Jiangsu shows an increasing trend in the early period and a decreasing trend in the later period. Over the past 37 years, the area of coastal aquaculture ponds has increased by a total of 54,639.73 ha. This study can provide basic data for the sustainable development of coastal aquaculture in Jiangsu, and a reference for related studies in other regions.},
}
@article {pmid38771196,
year = {2024},
author = {Hulagappa Nebagiri, M and Pillappa Hnumanthappa, L},
title = {Fractional social optimization-based migration and replica management algorithm for load balancing in distributed file system for cloud computing.},
journal = {Network (Bristol, England)},
volume = {},
number = {},
pages = {1-28},
doi = {10.1080/0954898X.2024.2353665},
pmid = {38771196},
issn = {1361-6536},
abstract = {Effective management of data is a major issue in Distributed File System (DFS), like the cloud. This issue is handled by replicating files in an effective manner, which can minimize the time of data access and elevate the data availability. This paper devises a Fractional Social Optimization Algorithm (FSOA) for replica management along with balancing load in DFS in the cloud stage. Balancing the workload for DFS is the main objective. Here, the chunk creation is done by partitioning the file into a different number of chunks considering Deep Fuzzy Clustering (DFC) and then in the round-robin manner the Virtual machine (VM) is assigned. In that case for balancing the load considering certain objectives like resource use, energy consumption and migration cost thereby the load balancing is performed with the proposed FSOA. Here, the FSOA is formulated by uniting the Social optimization algorithm (SOA) and Fractional Calculus (FC). The replica management is done in DFS using the proposed FSOA by considering the various objectives. The FSOA has the smallest load of 0.299, smallest cost of 0.395, smallest energy consumption of 0.510, smallest overhead of 0.358, and smallest throughput of 0.537.},
}
@article {pmid38770301,
year = {2024},
author = {Qureshi, KM and Mewada, BG and Kaur, S and Khan, A and Al-Qahtani, MM and Qureshi, MRNM},
title = {Investigating industry 4.0 technologies in logistics 4.0 usage towards sustainable manufacturing supply chain.},
journal = {Heliyon},
volume = {10},
number = {10},
pages = {e30661},
pmid = {38770301},
issn = {2405-8440},
abstract = {In the era of Industry 4.0 (I4.0), automation and data analysis have undergone significant advancements, greatly impacting production management and operations management. Technologies such as the Internet of Things (IoT), robotics, cloud computing (CC), and big data, have played a crucial role in shaping Logistics 4.0 (L4.0) and improving the efficiency of the manufacturing supply chain (SC), ultimately contributing to sustainability goals. The present research investigates the role of I4.0 technologies within the framework of the extended theory of planned behavior (ETPB). The research explores various variables including subjective norms, attitude, perceived behavior control, leading to word-of-mouth, and purchase intention. By modeling these variables, the study aims to understand the influence of I4.0 technologies on L4.0 to establish a sustainable manufacturing SC. A questionnaire was administered to gather input from small and medium-sized firms (SMEs) in the manufacturing industry. An empirical study along with partial least squares structural equation modeling (SEM), was conducted to analyze the data. The findings indicate that the use of I4.0 technology in L4.0 influences subjective norms, which subsequently influence attitudes and personal behavior control. This, in turn, leads to word-of-mouth and purchase intention. The results provide valuable insights for shippers and logistics service providers empowering them to enhance their performance and contribute to achieving sustainability objectives. Consequently, this study contributes to promoting sustainability in the manufacturing SC by stimulating the adoption of I4.0 technologies in L4.0.},
}
@article {pmid38768167,
year = {2024},
author = {Vo, DH and Vo, AT and Dinh, CT and Tran, NP},
title = {Corporate restructuring and firm performance in Vietnam: The moderating role of digital transformation.},
journal = {PloS one},
volume = {19},
number = {5},
pages = {e0303491},
doi = {10.1371/journal.pone.0303491},
pmid = {38768167},
issn = {1932-6203},
mesh = {Vietnam ; Humans ; *Commerce ; Information Technology ; },
abstract = {In the digital age, firms should continually innovate and adapt to remain competitive and enhance performance. Innovation and adaptation require firms to take a holistic approach to their corporate structuring to ensure efficiency and effectiveness to stay competitive. This study examines how corporate restructuring impacts firm performance in Vietnam. We then investigate the moderating role of digital transformation in the corporate restructuring-firm performance nexus. We use content analysis, with a focus on particular terms, including "digitalization," "big data," "cloud computing," "blockchain," and "information technology" for 11 years, from 2011 to 2021. The frequency index from these keywords is developed to proxy the digital transformation for the Vietnamese listed firms. A final sample includes 118 Vietnamese listed firms with sufficient data for the analysis using the generalized method of moments (GMM) approach. The results indicate that corporate restructuring, including financial, portfolio, and operational restructuring, has a negative effect on firm performance in Vietnam. Digital transformation also negatively affects firm performance. However, corporate restructuring implemented in conjunction with digital transformation improves the performance of Vietnamese listed firms. These findings largely remain unchanged across various robustness analyses.},
}
@article {pmid38753476,
year = {2024},
author = {Gupta, I and Saxena, D and Singh, AK and Lee, CN},
title = {A Multiple Controlled Toffoli Driven Adaptive Quantum Neural Network Model for Dynamic Workload Prediction in Cloud Environments.},
journal = {IEEE transactions on pattern analysis and machine intelligence},
volume = {PP},
number = {},
pages = {},
doi = {10.1109/TPAMI.2024.3402061},
pmid = {38753476},
issn = {1939-3539},
abstract = {The key challenges in cloud computing encompass dynamic resource scaling, load balancing, and power consumption. Accurate workload prediction is identified as a crucial strategy to address these challenges. Despite numerous methods proposed to tackle this issue, existing approaches fall short of capturing the high-variance nature of volatile and dynamic cloud workloads. Consequently, this paper introduces a novel model aimed at addressing this limitation. This paper presents a novel Multiple Controlled Toffoli-driven Adaptive Quantum Neural Network (MCT-AQNN) model to establish an empirical solution to complex, elastic as well as challenging workload prediction problems by optimizing the exploration, adaption, and exploitation proficiencies through quantum learning. The computational adaptability of quantum computing is ingrained with machine learning algorithms to derive more precise correlations from dynamic and complex workloads. The furnished input data point and hatched neural weights are refitted in the form of qubits while the controlling effects of Multiple Controlled Toffoli (MCT) gates are operated at the hidden and output layers of Quantum Neural Network (QNN) for enhancing learning capabilities. Complimentarily, a Uniformly Adaptive Quantum Machine Learning (UAQL) algorithm has evolved to functionally and effectually train the QNN. The extensive experiments are conducted and the comparisons are performed with state-of-the-art methods using four real-world benchmark datasets. Experimental results evince that MCT-AQNN has up to 32%-96% higher accuracy than the existing approaches.},
}
@article {pmid38749656,
year = {2024},
author = {Koenig, Z and Yohannes, MT and Nkambule, LL and Zhao, X and Goodrich, JK and Kim, HA and Wilson, MW and Tiao, G and Hao, SP and Sahakian, N and Chao, KR and Walker, MA and Lyu, Y and , and Rehm, H and Neale, BM and Talkowski, ME and Daly, MJ and Brand, H and Karczewski, KJ and Atkinson, EG and Martin, AR},
title = {A harmonized public resource of deeply sequenced diverse human genomes.},
journal = {Genome research},
volume = {},
number = {},
pages = {},
doi = {10.1101/gr.278378.123},
pmid = {38749656},
issn = {1549-5469},
abstract = {Underrepresented populations are often excluded from genomic studies due in part to a lack of resources supporting their analyses. The 1000 Genomes Project (1kGP) and Human Genome Diversity Project (HGDP), which have recently been sequenced to high coverage, are valuable genomic resources because of the global diversity they capture and their open data sharing policies. Here, we harmonized a high quality set of 4,094 whole genomes from 80 populations in the HGDP and 1kGP with data from the Genome Aggregation Database (gnomAD) and identified over 153 million high-quality SNVs, indels, and SVs. We performed a detailed ancestry analysis of this cohort, characterizing population structure and patterns of admixture across populations, analyzing site frequency spectra, and measuring variant counts at global and subcontinental levels. We also demonstrate substantial added value from this dataset compared to the prior versions of the component resources, typically combined via liftOver and variant intersection; for example, we catalog millions of new genetic variants, mostly rare, compared to previous releases. In addition to unrestricted individual-level public release, we provide detailed tutorials for conducting many of the most common quality control steps and analyses with these data in a scalable cloud-computing environment and publicly release this new phased joint callset for use as a haplotype resource in phasing and imputation pipelines. This jointly called reference panel will serve as a key resource to support research of diverse ancestry populations.},
}
@article {pmid38746269,
year = {2024},
author = {Thiriveedhi, VK and Krishnaswamy, D and Clunie, D and Pieper, S and Kikinis, R and Fedorov, A},
title = {Cloud-based large-scale curation of medical imaging data using AI segmentation.},
journal = {Research square},
volume = {},
number = {},
pages = {},
doi = {10.21203/rs.3.rs-4351526/v1},
pmid = {38746269},
abstract = {Rapid advances in medical imaging Artificial Intelligence (AI) offer unprecedented opportunities for automatic analysis and extraction of data from large imaging collections. Computational demands of such modern AI tools may be difficult to satisfy with the capabilities available on premises. Cloud computing offers the promise of economical access and extreme scalability. Few studies examine the price/performance tradeoffs of using the cloud, in particular for medical image analysis tasks. We investigate the use of cloud-provisioned compute resources for AI-based curation of the National Lung Screening Trial (NLST) Computed Tomography (CT) images available from the National Cancer Institute (NCI) Imaging Data Commons (IDC). We evaluated NCI Cancer Research Data Commons (CRDC) Cloud Resources - Terra (FireCloud) and Seven Bridges-Cancer Genomics Cloud (SB-CGC) platforms - to perform automatic image segmentation with TotalSegmentator and pyradiomics feature extraction for a large cohort containing >126,000 CT volumes from >26,000 patients. Utilizing >21,000 Virtual Machines (VMs) over the course of the computation we completed analysis in under 9 hours, as compared to the estimated 522 days that would be needed on a single workstation. The total cost of utilizing the cloud for this analysis was $1,011.05. Our contributions include: 1) an evaluation of the numerous tradeoffs towards optimizing the use of cloud resources for large-scale image analysis; 2) CloudSegmentator, an open source reproducible implementation of the developed workflows, which can be reused and extended; 3) practical recommendations for utilizing the cloud for large-scale medical image computing tasks. We also share the results of the analysis: the total of 9,565,554 segmentations of the anatomic structures and the accompanying radiomics features in IDC as of release v18.},
}
@article {pmid38743439,
year = {2024},
author = {Philippou, J and Yáñez Feliú, G and Rudge, TJ},
title = {WebCM: A Web-Based Platform for Multiuser Individual-Based Modeling of Multicellular Microbial Populations and Communities.},
journal = {ACS synthetic biology},
volume = {},
number = {},
pages = {},
doi = {10.1021/acssynbio.3c00486},
pmid = {38743439},
issn = {2161-5063},
abstract = {WebCM is a web platform that enables users to create, edit, run, and view individual-based simulations of multicellular microbial populations and communities on a remote compute server. WebCM builds upon the simulation software CellModeller in the back end and provides users with a web-browser-based modeling interface including model editing, execution, and playback. Multiple users can run and manage multiple simulations simultaneously, sharing the host hardware. Since it is based on CellModeller, it can utilize both GPU and CPU parallelization. The user interface provides real-time interactive 3D graphical representations for inspection of simulations at all time points, and the results can be downloaded for detailed offline analysis. It can be run on cloud computing services or on a local server, allowing collaboration within and between laboratories.},
}
@article {pmid38733003,
year = {2024},
author = {Lin, Z and Liang, J},
title = {Edge Caching Data Distribution Strategy with Minimum Energy Consumption.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {9},
pages = {},
doi = {10.3390/s24092898},
pmid = {38733003},
issn = {1424-8220},
support = {61862003//National Natural Science Foundation of China/ ; },
abstract = {In the context of the rapid development of the Internet of Vehicles, virtual reality, automatic driving and the industrial Internet, the terminal devices in the network show explosive growth. As a result, more and more information is generated from the edge of the network, which makes the data throughput increase dramatically in the mobile communication network. As the key technology of the fifth-generation mobile communication network, mobile edge caching technology which caches popular data to the edge server deployed at the edge of the network avoids the data transmission delay of the backhaul link and the occurrence of network congestion. With the growing scale of the network, distributing hot data from cloud servers to edge servers will generate huge energy consumption. To realize the green and sustainable development of the communication industry and reduce the energy consumption of distribution of data that needs to be cached in edge servers, we make the first attempt to propose and solve the problem of edge caching data distribution with minimum energy consumption (ECDDMEC) in this paper. First, we model and formulate the problem as a constrained optimization problem and then prove its NP-hardness. Subsequently, we design a greedy algorithm with computational complexity of O(n2) to solve the problem approximately. Experimental results show that compared with the distribution strategy of each edge server directly requesting data from the cloud server, the strategy obtained by the algorithm can significantly reduce the energy consumption of data distribution.},
}
@article {pmid38732864,
year = {2024},
author = {Emvoliadis, A and Vryzas, N and Stamatiadou, ME and Vrysis, L and Dimoulas, C},
title = {Multimodal Environmental Sensing Using AI & IoT Solutions: A Cognitive Sound Analysis Perspective.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {9},
pages = {},
doi = {10.3390/s24092755},
pmid = {38732864},
issn = {1424-8220},
abstract = {This study presents a novel audio compression technique, tailored for environmental monitoring within multi-modal data processing pipelines. Considering the crucial role that audio data play in environmental evaluations, particularly in contexts with extreme resource limitations, our strategy substantially decreases bit rates to facilitate efficient data transfer and storage. This is accomplished without undermining the accuracy necessary for trustworthy air pollution analysis while simultaneously minimizing processing expenses. More specifically, our approach fuses a Deep-Learning-based model, optimized for edge devices, along with a conventional coding schema for audio compression. Once transmitted to the cloud, the compressed data undergo a decoding process, leveraging vast cloud computing resources for accurate reconstruction and classification. The experimental results indicate that our approach leads to a relatively minor decrease in accuracy, even at notably low bit rates, and demonstrates strong robustness in identifying data from labels not included in our training dataset.},
}
@article {pmid38732863,
year = {2024},
author = {Hanczewski, S and Stasiak, M and Weissenberg, M},
title = {An Analytical Model of IaaS Architecture for Determining Resource Utilization.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {9},
pages = {},
doi = {10.3390/s24092758},
pmid = {38732863},
issn = {1424-8220},
support = {Grant 0313/SBAD/1312//Ministry of Education and Science/ ; },
abstract = {Cloud computing has become a major component of the modern IT ecosystem. A key contributor to this has been the development of Infrastructure as a Service (IaaS) architecture, in which users' virtual machines (VMs) are run on the service provider's physical infrastructure, making it possible to become independent of the need to purchase one's own physical machines (PMs). One of the main aspects to consider when designing such systems is achieving the optimal utilization of individual resources, such as processor, RAM, disk, and available bandwidth. In response to these challenges, the authors developed an analytical model (the ARU method) to determine the average utilization levels of the aforementioned resources. The effectiveness of the proposed analytical model was evaluated by comparing the results obtained by utilizing the model with those obtained by conducting a digital simulation of the operation of a cloud system according to the IaaS paradigm. The results show the effectiveness of the model regardless of the structure of the emerging requests, the variability of the capacity of individual resources, and the number of physical machines in the system. This translates into the applicability of the model in the design process of cloud systems.},
}
@article {pmid38633810,
year = {2024},
author = {Du, X and Novoa-Laurentiev, J and Plasaek, JM and Chuang, YW and Wang, L and Marshall, G and Mueller, SK and Chang, F and Datta, S and Paek, H and Lin, B and Wei, Q and Wang, X and Wang, J and Ding, H and Manion, FJ and Du, J and Bates, DW and Zhou, L},
title = {Enhancing Early Detection of Cognitive Decline in the Elderly: A Comparative Study Utilizing Large Language Models in Clinical Notes.},
journal = {medRxiv : the preprint server for health sciences},
volume = {},
number = {},
pages = {},
doi = {10.1101/2024.04.03.24305298},
pmid = {38633810},
support = {R44 AG081006/AG/NIA NIH HHS/United States ; },
abstract = {BACKGROUND: Large language models (LLMs) have shown promising performance in various healthcare domains, but their effectiveness in identifying specific clinical conditions in real medical records is less explored. This study evaluates LLMs for detecting signs of cognitive decline in real electronic health record (EHR) clinical notes, comparing their error profiles with traditional models. The insights gained will inform strategies for performance enhancement.
METHODS: This study, conducted at Mass General Brigham in Boston, MA, analyzed clinical notes from the four years prior to a 2019 diagnosis of mild cognitive impairment in patients aged 50 and older. We used a randomly annotated sample of 4,949 note sections, filtered with keywords related to cognitive functions, for model development. For testing, a random annotated sample of 1,996 note sections without keyword filtering was utilized. We developed prompts for two LLMs, Llama 2 and GPT-4, on HIPAA-compliant cloud-computing platforms using multiple approaches (e.g., both hard and soft prompting and error analysis-based instructions) to select the optimal LLM-based method. Baseline models included a hierarchical attention-based neural network and XGBoost. Subsequently, we constructed an ensemble of the three models using a majority vote approach.
RESULTS: GPT-4 demonstrated superior accuracy and efficiency compared to Llama 2, but did not outperform traditional models. The ensemble model outperformed the individual models, achieving a precision of 90.3%, a recall of 94.2%, and an F1-score of 92.2%. Notably, the ensemble model showed a significant improvement in precision, increasing from a range of 70%-79% to above 90%, compared to the best-performing single model. Error analysis revealed that 63 samples were incorrectly predicted by at least one model; however, only 2 cases (3.2%) were mutual errors across all models, indicating diverse error profiles among them.
CONCLUSIONS: LLMs and traditional machine learning models trained using local EHR data exhibited diverse error profiles. The ensemble of these models was found to be complementary, enhancing diagnostic performance. Future research should investigate integrating LLMs with smaller, localized models and incorporating medical data and domain knowledge to enhance performance on specific tasks.},
}
@article {pmid38719856,
year = {2024},
author = {Kent, RM and Barbosa, WAS and Gauthier, DJ},
title = {Controlling chaos using edge computing hardware.},
journal = {Nature communications},
volume = {15},
number = {1},
pages = {3886},
pmid = {38719856},
issn = {2041-1723},
support = {FA9550-22-1-0203//United States Department of Defense | U.S. Air Force (United States Air Force)/ ; },
abstract = {Machine learning provides a data-driven approach for creating a digital twin of a system - a digital model used to predict the system behavior. Having an accurate digital twin can drive many applications, such as controlling autonomous systems. Often, the size, weight, and power consumption of the digital twin or related controller must be minimized, ideally realized on embedded computing hardware that can operate without a cloud-computing connection. Here, we show that a nonlinear controller based on next-generation reservoir computing can tackle a difficult control problem: controlling a chaotic system to an arbitrary time-dependent state. The model is accurate, yet it is small enough to be evaluated on a field-programmable gate array typically found in embedded devices. Furthermore, the model only requires 25.0 ± 7.0 nJ per evaluation, well below other algorithms, even without systematic power optimization. Our work represents the first step in deploying efficient machine learning algorithms to the computing "edge."},
}
@article {pmid38711808,
year = {2024},
author = {Buchanan, BC and Tang, Y and Lopez, H and Casanova, NG and Garcia, JGN and Yoon, JY},
title = {Development of a cloud-based flow rate tool for eNAMPT biomarker detection.},
journal = {PNAS nexus},
volume = {3},
number = {5},
pages = {pgae173},
pmid = {38711808},
issn = {2752-6542},
abstract = {Increased levels of extracellular nicotinamide phosphoribosyltransferase (eNAMPT) are increasingly recognized as a highly useful biomarker of inflammatory disease and disease severity. In preclinical animal studies, a monoclonal antibody that neutralizes eNAMPT has been generated to successfully reduce the extent of inflammatory cascade activation. Thus, the rapid detection of eNAMPT concentration in plasma samples at the point of care (POC) would be of great utility in assessing the benefit of administering an anti-eNAMPT therapeutic. To determine the feasibility of this POC test, we conducted a particle immunoagglutination assay on a paper microfluidic platform and quantified its extent with a flow rate measurement in less than 1 min. A smartphone and cloud-based Google Colab were used to analyze the flow rates automatically. A horizontal flow model and an immunoagglutination binding model were evaluated to optimize the detection time, sample dilution, and particle concentration. This assay successfully detected eNAMPT in both human whole blood and plasma samples (diluted to 10 and 1%), with the limit of detection of 1-20 pg/mL (equivalent to 0.1-0.2 ng/mL in undiluted blood and plasma) and a linear range of 5-40 pg/mL. Furthermore, the smartphone POC assay distinguished clinical samples with low, mid, and high eNAMPT concentrations. Together, these results indicate this POC assay, which utilizes low-cost materials, time-effective methods, and a straightforward immunoassay (without surface immobilization), may reliably allow rapid determination of eNAMPT blood/plasma levels to advantage patient stratification in clinical trials and guide ALT-100 mAb therapeutic decision-making.},
}
@article {pmid38707321,
year = {2024},
author = {Sankar M S, K and Gupta, S and Luthra, S and Kumar, A and Jagtap, S and Samadhiya, A},
title = {Empowering sustainable manufacturing: Unleashing digital innovation in spool fabrication industries.},
journal = {Heliyon},
volume = {10},
number = {9},
pages = {e29994},
pmid = {38707321},
issn = {2405-8440},
abstract = {In industrial landscapes, spool fabrication industries play a crucial role in the successful completion of numerous industrial projects by providing prefabricated modules. However, the implementation of digitalized sustainable practices in spool fabrication industries is progressing slowly and is still in its embryonic stage due to several challenges. To implement digitalized sustainable manufacturing (SM), digital technologies such as Internet of Things, Cloud computing, Big data analytics, Cyber-physical systems, Augmented reality, Virtual reality, and Machine learning are required in the context of sustainability. The scope of the present study entails prioritization of the enablers that promote the implementation of digitalized sustainable practices in spool fabrication industries using the Improved Fuzzy Stepwise Weight Assessment Ratio Analysis (IMF-SWARA) method integrated with Triangular Fuzzy Bonferroni Mean (TFBM). The enablers are identified through a systematic literature review and are validated by a team of seven experts through a questionnaire survey. Then the finally identified enablers are analyzed by the IMF-SWARA and TFBM integrated approach. The results indicate that the most significant enablers are management support, leadership, governmental policies and regulations to implement digitalized SM. The study provides a comprehensive analysis of digital SM enablers in the spool fabrication industry and offers guidelines for the transformation of conventional systems into digitalized SM practices.},
}
@article {pmid38705837,
year = {2024},
author = {Mishra, A and Kim, HS and Kumar, R and Srivastava, V},
title = {Advances in Vibrio-related infection management: an integrated technology approach for aquaculture and human health.},
journal = {Critical reviews in biotechnology},
volume = {},
number = {},
pages = {1-28},
doi = {10.1080/07388551.2024.2336526},
pmid = {38705837},
issn = {1549-7801},
abstract = {Vibrio species pose significant threats worldwide, causing mortalities in aquaculture and infections in humans. Global warming and the emergence of worldwide strains of Vibrio diseases are increasing day by day. Control of Vibrio species requires effective monitoring, diagnosis, and treatment strategies at the global scale. Despite current efforts based on chemical, biological, and mechanical means, Vibrio control management faces limitations due to complicated implementation processes. This review explores the intricacies and challenges of Vibrio-related diseases, including accurate and cost-effective diagnosis and effective control. The global burden due to emerging Vibrio species further complicates management strategies. We propose an innovative integrated technology model that harnesses cutting-edge technologies to address these obstacles. The proposed model incorporates advanced tools, such as biosensing technologies, the Internet of Things (IoT), remote sensing devices, cloud computing, and machine learning. This model offers invaluable insights and supports better decision-making by integrating real-time ecological data and biological phenotype signatures. A major advantage of our approach lies in leveraging cloud-based analytics programs, efficiently extracting meaningful information from vast and complex datasets. Collaborating with data and clinical professionals ensures logical and customized solutions tailored to each unique situation. Aquaculture biotechnology that prioritizes sustainability may have a large impact on human health and the seafood industry. Our review underscores the importance of adopting this model, revolutionizing the prognosis and management of Vibrio-related infections, even under complex circumstances. Furthermore, this model has promising implications for aquaculture and public health, addressing the United Nations Sustainable Development Goals and their development agenda.},
}
@article {pmid38698084,
year = {2024},
author = {Han, Y and Wei, Z and Huang, G},
title = {An imbalance data quality monitoring based on SMOTE-XGBOOST supported by edge computing.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {10151},
pmid = {38698084},
issn = {2045-2322},
support = {51975386//the National Natural Science Foundation of China/ ; N2022J014//Science and Technology Research and Development Program of China State Railway Group Co.,Ltd./ ; 2022020630-JH1/108//Science and Technology Program of Liaoning Province "Unveiling and Commanding"/ ; },
abstract = {Product assembly involves extensive production data that is characterized by high dimensionality, multiple samples, and data imbalance. The article proposes an edge computing-based framework for monitoring product assembly quality in industrial Internet of Things. Edge computing technology relieves the pressure of aggregating enormous amounts of data to cloud center for processing. To address the problem of data imbalance, we compared five sampling methods: Borderline SMOTE, Random Downsampling, Random Upsampling, SMOTE, and ADASYN. Finally, the quality monitoring model SMOTE-XGBoost is proposed, and the hyperparameters of the model are optimized by using the Grid Search method. The proposed framework and quality control methodology were applied to an assembly line of IGBT modules for the traction system, and the validity of the model was experimentally verified.},
}
@article {pmid38696761,
year = {2024},
author = {Peccoud, S and Berezin, CT and Hernandez, SI and Peccoud, J},
title = {PlasCAT: Plasmid cloud assembly tool.},
journal = {Bioinformatics (Oxford, England)},
volume = {},
number = {},
pages = {},
doi = {10.1093/bioinformatics/btae299},
pmid = {38696761},
issn = {1367-4811},
abstract = {SUMMARY: PlasCAT is an easy-to-use cloud-based bioinformatics tool that enables de novo plasmid sequence assembly from raw sequencing data. Non-technical users can now assemble sequences from long reads and short reads without ever touching a line of code. PlasCAT uses high-performance computing servers to reduce run times on assemblies and deliver results faster.
PlasCAT is freely available on the web at https://sequencing.genofab.com. The assembly pipeline source code and server code are available for download at https://bitbucket.org/genofabinc/workspace/projects/PLASCAT. Click the Cancel button to access the source code without authenticating. Web servers implemented in React.js and Python, with all major browsers supported.},
}
@article {pmid38695012,
year = {2024},
author = {Blindenbach, J and Kang, J and Hong, S and Karam, C and Lehner, T and Gürsoy, G},
title = {Ultra-secure storage and analysis of genetic data for the advancement of precision medicine.},
journal = {bioRxiv : the preprint server for biology},
volume = {},
number = {},
pages = {},
doi = {10.1101/2024.04.16.589793},
pmid = {38695012},
abstract = {Cloud computing provides the opportunity to store the ever-growing genotype-phenotype data sets needed to achieve the full potential of precision medicine. However, due to the sensitive nature of this data and the patchwork of data privacy laws across states and countries, additional security protections are proving necessary to ensure data privacy and security. Here we present SQUiD, a secure queryable database for storing and analyzing genotype-phenotype data. With SQUiD, genotype-phenotype data can be stored in a low-security, low-cost public cloud in the encrypted form, which researchers can securely query without the public cloud ever being able to decrypt the data. We demonstrate the usability of SQUiD by replicating various commonly used calculations such as polygenic risk scores, cohort creation for GWAS, MAF filtering, and patient similarity analysis both on synthetic and UK Biobank data. Our work represents a new and scalable platform enabling the realization of precision medicine without security and privacy concerns.},
}
@article {pmid38682960,
year = {2024},
author = {Drmota, P and Nadlinger, DP and Main, D and Nichol, BC and Ainley, EM and Leichtle, D and Mantri, A and Kashefi, E and Srinivas, R and Araneda, G and Ballance, CJ and Lucas, DM},
title = {Verifiable Blind Quantum Computing with Trapped Ions and Single Photons.},
journal = {Physical review letters},
volume = {132},
number = {15},
pages = {150604},
doi = {10.1103/PhysRevLett.132.150604},
pmid = {38682960},
issn = {1079-7114},
abstract = {We report the first hybrid matter-photon implementation of verifiable blind quantum computing. We use a trapped-ion quantum server and a client-side photonic detection system networked via a fiber-optic quantum link. The availability of memory qubits and deterministic entangling gates enables interactive protocols without postselection-key requirements for any scalable blind server, which previous realizations could not provide. We quantify the privacy at ≲0.03 leaked classical bits per qubit. This experiment demonstrates a path to fully verified quantum computing in the cloud.},
}
@article {pmid38682533,
year = {2024},
author = {Schweitzer, M and Ostheimer, P and Lins, A and Romano, V and Steger, B and Baumgarten, D and Augustin, M},
title = {Transforming Tele-Ophthalmology: Utilizing Cloud Computing for Remote Eye Care.},
journal = {Studies in health technology and informatics},
volume = {313},
number = {},
pages = {215-220},
doi = {10.3233/SHTI240040},
pmid = {38682533},
issn = {1879-8365},
mesh = {*Cloud Computing ; *Ophthalmology ; *Telemedicine ; Humans ; Radiology Information Systems ; Information Storage and Retrieval/methods ; },
abstract = {BACKGROUND: Tele-ophthalmology is gaining recognition for its role in improving eye care accessibility via cloud-based solutions. The Google Cloud Platform (GCP) Healthcare API enables secure and efficient management of medical image data such as high-resolution ophthalmic images.
OBJECTIVES: This study investigates cloud-based solutions' effectiveness in tele-ophthalmology, with a focus on GCP's role in data management, annotation, and integration for a novel imaging device.
METHODS: Leveraging the Integrating the Healthcare Enterprise (IHE) Eye Care profile, the cloud platform was utilized as a PACS and integrated with the Open Health Imaging Foundation (OHIF) Viewer for image display and annotation capabilities for ophthalmic images.
RESULTS: The setup of a GCP DICOM storage and the OHIF Viewer facilitated remote image data analytics. Prolonged loading times and relatively large individual image file sizes indicated system challenges.
CONCLUSION: Cloud platforms have the potential to ease distributed data analytics, as needed for efficient tele-ophthalmology scenarios in research and clinical practice, by providing scalable and secure image management solutions.},
}
@article {pmid38680952,
year = {2016},
author = {Iorga, M and Scarfone, K},
title = {Using a Capability Oriented Methodology to Build Your Cloud Ecosystem.},
journal = {IEEE cloud computing},
volume = {3},
number = {2},
pages = {},
doi = {10.1109/mcc.2016.38},
pmid = {38680952},
issn = {2325-6095},
abstract = {Organizations often struggle to capture the necessary functional capabilities for each cloud-based solution adopted for their information systems. Identifying, defining, selecting, and prioritizing these functional capabilities and the security components that implement and enforce them is surprisingly challenging. This article explains recent developments by the National Institute of Standards and Technology (NIST) in addressing these challenges. The article focuses on the capability oriented methodology for orchestrating a secure cloud ecosystem proposed as part of the NIST Cloud Computing Security Reference Architecture. The methodology recognizes that risk may vary for cloud Actors within a single ecosystem, so it takes a risk-based approach to functional capabilities. The result is an assessment of which cloud Actor is responsible for implementing each security component and how implementation should be prioritized. A cloud Actor, especially a cloud Consumer, that follows the methodology can more easily make well-informed decisions regarding their cloud ecosystems.},
}
@article {pmid38677415,
year = {2024},
author = {van der Laan, EE and Hazenberg, P and Weerts, AH},
title = {Simulation of long-term storage dynamics of headwater reservoirs across the globe using public cloud computing infrastructure.},
journal = {The Science of the total environment},
volume = {},
number = {},
pages = {172678},
doi = {10.1016/j.scitotenv.2024.172678},
pmid = {38677415},
issn = {1879-1026},
abstract = {Reservoirs play an important role in relation to water security, flood risk, hydropower and natural flow regime. This study derives a novel dataset with a long-term daily water-balance (reservoir volume, inflow, outflow, evaporation and precipitation) of headwater reservoirs and storage dynamics across the globe. The data is generated using cloud computing infrastructure and a high resolution distributed hydrological model wflow_sbm. Model results are validated against earth observed surface water area and in-situ measured reservoir volume and show an overall good model performance. Simulated headwater reservoir storage indicate that 19.4-24.4 % of the reservoirs had a significant decrease in storage. This change is mainly driven by a decrease in reservoir inflow and increase in evaporation. Deployment on a kubernetes cloud environment and using reproducible workflows shows that these kind of simulations and analyses can be conducted in less than a day.},
}
@article {pmid38676279,
year = {2024},
author = {Abdullahi, I and Longo, S and Samie, M},
title = {Towards a Distributed Digital Twin Framework for Predictive Maintenance in Industrial Internet of Things (IIoT).},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {8},
pages = {},
doi = {10.3390/s24082663},
pmid = {38676279},
issn = {1424-8220},
abstract = {This study uses a wind turbine case study as a subdomain of Industrial Internet of Things (IIoT) to showcase an architecture for implementing a distributed digital twin in which all important aspects of a predictive maintenance solution in a DT use a fog computing paradigm, and the typical predictive maintenance DT is improved to offer better asset utilization and management through real-time condition monitoring, predictive analytics, and health management of selected components of wind turbines in a wind farm. Digital twin (DT) is a technology that sits at the intersection of Internet of Things, Cloud Computing, and Software Engineering to provide a suitable tool for replicating physical objects in the digital space. This can facilitate the implementation of asset management in manufacturing systems through predictive maintenance solutions leveraged by machine learning (ML). With DTs, a solution architecture can easily use data and software to implement asset management solutions such as condition monitoring and predictive maintenance using acquired sensor data from physical objects and computing capabilities in the digital space. While DT offers a good solution, it is an emerging technology that could be improved with better standards, architectural framework, and implementation methodologies. Researchers in both academia and industry have showcased DT implementations with different levels of success. However, DTs remain limited in standards and architectures that offer efficient predictive maintenance solutions with real-time sensor data and intelligent DT capabilities. An appropriate feedback mechanism is also needed to improve asset management operations.},
}
@article {pmid38668979,
year = {2024},
author = {Hsiao, J and Deng, LC and Moroz, LL and Chalasani, SH and Edsinger, E},
title = {Ocean to Tree: Leveraging Single-Molecule RNA-Seq to Repair Genome Gene Models and Improve Phylogenomic Analysis of Gene and Species Evolution.},
journal = {Methods in molecular biology (Clifton, N.J.)},
volume = {2757},
number = {},
pages = {461-490},
pmid = {38668979},
issn = {1940-6029},
mesh = {Animals ; *Phylogeny ; *Evolution, Molecular ; *RNA-Seq/methods ; *Ctenophora/genetics/classification ; Genome/genetics ; Computational Biology/methods ; Software ; Genomics/methods ; Models, Genetic ; },
abstract = {Understanding gene evolution across genomes and organisms, including ctenophores, can provide unexpected biological insights. It enables powerful integrative approaches that leverage sequence diversity to advance biomedicine. Sequencing and bioinformatic tools can be inexpensive and user-friendly, but numerous options and coding can intimidate new users. Distinct challenges exist in working with data from diverse species but may go unrecognized by researchers accustomed to gold-standard genomes. Here, we provide a high-level workflow and detailed pipeline to enable animal collection, single-molecule sequencing, and phylogenomic analysis of gene and species evolution. As a demonstration, we focus on (1) PacBio RNA-seq of the genome-sequenced ctenophore Mnemiopsis leidyi, (2) diversity and evolution of the mechanosensitive ion channel Piezo in genetic models and basal-branching animals, and (3) associated challenges and solutions to working with diverse species and genomes, including gene model updating and repair using single-molecule RNA-seq. We provide a Python Jupyter Notebook version of our pipeline (GitHub Repository: Ctenophore-Ocean-To-Tree-2023 https://github.com/000generic/Ctenophore-Ocean-To-Tree-2023) that can be run for free in the Google Colab cloud to replicate our findings or modified for specific or greater use. Our protocol enables users to design new sequencing projects in ctenophores, marine invertebrates, or other novel organisms. It provides a simple, comprehensive platform that can ease new user entry into running their evolutionary sequence analyses.},
}
@article {pmid38665579,
year = {2024},
author = {El Jaouhari, A and Arif, J and Samadhiya, A and Naz, F and Kumar, A},
title = {Exploring the application of ICTs in decarbonizing the agriculture supply chain: A literature review and research agenda.},
journal = {Heliyon},
volume = {10},
number = {8},
pages = {e29564},
doi = {10.1016/j.heliyon.2024.e29564},
pmid = {38665579},
issn = {2405-8440},
abstract = {The contemporary agricultural supply chain necessitates the integration of information and communication technologies to effectively mitigate the multifaceted challenges posed by climate change and rising global demand for food products. Furthermore, recent developments in information and communication technologies, such as blockchain, big data analytics, the internet of things, artificial intelligence, cloud computing, etc., have made this transformation possible. Each of these technologies plays a particular role in enabling the agriculture supply chain ecosystem to be intelligent enough to handle today's world's challenges. Thus, this paper reviews the crucial information and communication technologies-enabled agriculture supply chains to understand their potential uses and contemporary developments. The review is supported by 57 research papers from the Scopus database. Five research areas analyze the applications of the technology reviewed in the agriculture supply chain: food safety and traceability, security and information system management, wasting food, supervision and tracking, agricultural businesses and decision-making, and other applications not explicitly related to the agriculture supply chain. The study also emphasizes how information and communication technologies can help agriculture supply chains and promote agriculture supply chain decarbonization. An information and communication technologies application framework for a decarbonized agriculture supply chain is suggested based on the research's findings. The framework identifies the contribution of information and communication technologies to decision-making in agriculture supply chains. The review also offers guidelines to academics, policymakers, and practitioners on managing agriculture supply chains successfully for enhanced agricultural productivity and decarbonization.},
}
@article {pmid38665413,
year = {2023},
author = {Khazali, M and Lechner, W},
title = {Scalable quantum processors empowered by the Fermi scattering of Rydberg electrons.},
journal = {Communications physics},
volume = {6},
number = {1},
pages = {57},
doi = {10.1038/s42005-023-01174-4},
pmid = {38665413},
issn = {2399-3650},
abstract = {Quantum computing promises exponential speed-up compared to its classical counterpart. While the neutral atom processors are the pioneering platform in terms of scalability, the dipolar Rydberg gates impose the main bottlenecks on the scaling of these devices. This article presents an alternative scheme for neutral atom quantum processing, based on the Fermi scattering of a Rydberg electron from ground-state atoms in spin-dependent lattice geometries. Instead of relying on Rydberg pair-potentials, the interaction is controlled by engineering the electron cloud of a sole Rydberg atom. The present scheme addresses the scaling obstacles in Rydberg processors by exponentially suppressing the population of short-lived states and by operating in ultra-dense atomic lattices. The restoring forces in molecule type Rydberg-Fermi potential preserve the trapping over a long interaction period. Furthermore, the proposed scheme mitigates different competing infidelity criteria, eliminates unwanted cross-talks, and significantly suppresses the operation depth in running complicated quantum algorithms.},
}
@article {pmid38660213,
year = {2024},
author = {Ullah, R and Yahya, M and Mostarda, L and Alshammari, A and Alutaibi, AI and Sarwar, N and Ullah, F and Ullah, S},
title = {Intelligent decision making for energy efficient fog nodes selection and smart switching in the IOT: a machine learning approach.},
journal = {PeerJ. Computer science},
volume = {10},
number = {},
pages = {e1833},
pmid = {38660213},
issn = {2376-5992},
abstract = {With the emergence of Internet of Things (IoT) technology, a huge amount of data is generated, which is costly to transfer to the cloud data centers in terms of security, bandwidth, and latency. Fog computing is an efficient paradigm for locally processing and manipulating IoT-generated data. It is difficult to configure the fog nodes to provide all of the services required by the end devices because of the static configuration, poor processing, and storage capacities. To enhance fog nodes' capabilities, it is essential to reconfigure them to accommodate a broader range and variety of hosted services. In this study, we focus on the placement of fog services and their dynamic reconfiguration in response to the end-device requests. Due to its growing successes and popularity in the IoT era, the Decision Tree (DT) machine learning model is implemented to predict the occurrence of requests and events in advance. The DT model enables the fog nodes to predict requests for a specific service in advance and reconfigure the fog node accordingly. The performance of the proposed model is evaluated in terms of high throughput, minimized energy consumption, and dynamic fog node smart switching. The simulation results demonstrate a notable increase in the fog node hit ratios, scaling up to 99% for the majority of services concurrently with a substantial reduction in miss ratios. Furthermore, the energy consumption is greatly reduced by over 50% as compared to a static node.},
}
@article {pmid38660188,
year = {2024},
author = {Cambronero, ME and Martínez, MA and Llana, L and Rodríguez, RJ and Russo, A},
title = {Towards a GDPR-compliant cloud architecture with data privacy controlled through sticky policies.},
journal = {PeerJ. Computer science},
volume = {10},
number = {},
pages = {e1898},
pmid = {38660188},
issn = {2376-5992},
abstract = {Data privacy is one of the biggest challenges facing system architects at the system design stage. Especially when certain laws, such as the General Data Protection Regulation (GDPR), have to be complied with by cloud environments. In this article, we want to help cloud providers comply with the GDPR by proposing a GDPR-compliant cloud architecture. To do this, we use model-driven engineering techniques to design cloud architecture and analyze cloud interactions. In particular, we develop a complete framework, called MDCT, which includes a Unified Modeling Language profile that allows us to define specific cloud scenarios and profile validation to ensure that certain required properties are met. The validation process is implemented through the Object Constraint Language (OCL) rules, which allow us to describe the constraints in these models. To comply with many GDPR articles, the proposed cloud architecture considers data privacy and data tracking, enabling safe and secure data management and tracking in the context of the cloud. For this purpose, sticky policies associated with the data are incorporated to define permission for third parties to access the data and track instances of data access. As a result, a cloud architecture designed with MDCT contains a set of OCL rules to validate it as a GDPR-compliant cloud architecture. Our tool models key GDPR points such as user consent/withdrawal, the purpose of access, and data transparency and auditing, and considers data privacy and data tracking with the help of sticky policies.},
}
@article {pmid38660156,
year = {2024},
author = {Hassan, SR and Rehman, AU and Alsharabi, N and Arain, S and Quddus, A and Hamam, H},
title = {Design of load-aware resource allocation for heterogeneous fog computing systems.},
journal = {PeerJ. Computer science},
volume = {10},
number = {},
pages = {e1986},
pmid = {38660156},
issn = {2376-5992},
abstract = {The execution of delay-aware applications can be effectively handled by various computing paradigms, including the fog computing, edge computing, and cloudlets. Cloud computing offers services in a centralized way through a cloud server. On the contrary, the fog computing paradigm offers services in a dispersed manner providing services and computational facilities near the end devices. Due to the distributed provision of resources by the fog paradigm, this architecture is suitable for large-scale implementation of applications. Furthermore, fog computing offers a reduction in delay and network load as compared to cloud architecture. Resource distribution and load balancing are always important tasks in deploying efficient systems. In this research, we have proposed heuristic-based approach that achieves a reduction in network consumption and delays by efficiently utilizing fog resources according to the load generated by the clusters of edge nodes. The proposed algorithm considers the magnitude of data produced at the edge clusters while allocating the fog resources. The results of the evaluations performed on different scales confirm the efficacy of the proposed approach in achieving optimal performance.},
}
@article {pmid38658838,
year = {2024},
author = {Wei, W and Xia, X and Li, T and Chen, Q and Feng, X},
title = {Shaoxia: a web-based interactive analysis platform for single cell RNA sequencing data.},
journal = {BMC genomics},
volume = {25},
number = {1},
pages = {402},
pmid = {38658838},
issn = {1471-2164},
support = {82170971//the National Natural Science Foundations of China/ ; YJ201987//Fundamental Research Funds for the Central Universities/ ; 2021ZYD0090//Sichuan Science and Technology Program/ ; QDJF2019-3//Scientific Research Foundation, West China Hospital of Stomatology Sichuan University/ ; CIFMS 2019-I2M-5-004//CAMS Innovation Fund for Medical Sciences/ ; },
abstract = {BACKGROUND: In recent years, Single-cell RNA sequencing (scRNA-seq) is increasingly accessible to researchers of many fields. However, interpreting its data demands proficiency in multiple programming languages and bioinformatic skills, which limited researchers, without such expertise, exploring information from scRNA-seq data. Therefore, there is a tremendous need to develop easy-to-use software, covering all the aspects of scRNA-seq data analysis.
RESULTS: We proposed a clear analysis framework for scRNA-seq data, which emphasized the fundamental and crucial roles of cell identity annotation, abstracting the analysis process into three stages: upstream analysis, cell annotation and downstream analysis. The framework can equip researchers with a comprehensive understanding of the analysis procedure and facilitate effective data interpretation. Leveraging the developed framework, we engineered Shaoxia, an analysis platform designed to democratize scRNA-seq analysis by accelerating processing through high-performance computing capabilities and offering a user-friendly interface accessible even to wet-lab researchers without programming expertise.
CONCLUSION: Shaoxia stands as a powerful and user-friendly open-source software for automated scRNA-seq analysis, offering comprehensive functionality for streamlined functional genomics studies. Shaoxia is freely accessible at http://www.shaoxia.cloud , and its source code is publicly available at https://github.com/WiedenWei/shaoxia .},
}
@article {pmid38638340,
year = {2024},
author = {Abbas, Q and Alyas, T and Alghamdi, T and Alkhodre, AB and Albouq, S and Niazi, M and Tabassum, N},
title = {Redefining governance: a critical analysis of sustainability transformation in e-governance.},
journal = {Frontiers in big data},
volume = {7},
number = {},
pages = {1349116},
pmid = {38638340},
issn = {2624-909X},
abstract = {With the rapid growth of information and communication technologies, governments worldwide are embracing digital transformation to enhance service delivery and governance practices. In the rapidly evolving landscape of information technology (IT), secure data management stands as a cornerstone for organizations aiming to safeguard sensitive information. Robust data modeling techniques are pivotal in structuring and organizing data, ensuring its integrity, and facilitating efficient retrieval and analysis. As the world increasingly emphasizes sustainability, integrating eco-friendly practices into data management processes becomes imperative. This study focuses on the specific context of Pakistan and investigates the potential of cloud computing in advancing e-governance capabilities. Cloud computing offers scalability, cost efficiency, and enhanced data security, making it an ideal technology for digital transformation. Through an extensive literature review, analysis of case studies, and interviews with stakeholders, this research explores the current state of e-governance in Pakistan, identifies the challenges faced, and proposes a framework for leveraging cloud computing to overcome these challenges. The findings reveal that cloud computing can significantly enhance the accessibility, scalability, and cost-effectiveness of e-governance services, thereby improving citizen engagement and satisfaction. This study provides valuable insights for policymakers, government agencies, and researchers interested in the digital transformation of e-governance in Pakistan and offers a roadmap for leveraging cloud computing technologies in similar contexts. The findings contribute to the growing body of knowledge on e-governance and cloud computing, supporting the advancement of digital governance practices globally. This research identifies monitoring parameters necessary to establish a sustainable e-governance system incorporating big data and cloud computing. The proposed framework, Monitoring and Assessment System using Cloud (MASC), is validated through secondary data analysis and successfully fulfills the research objectives. By leveraging big data and cloud computing, governments can revolutionize their digital governance practices, driving transformative changes and enhancing efficiency and effectiveness in public administration.},
}
@article {pmid38628614,
year = {2024},
author = {Wang, TH and Kao, CC and Chang, TH},
title = {Ensemble Machine Learning for Predicting 90-Day Outcomes and Analyzing Risk Factors in Acute Kidney Injury Requiring Dialysis.},
journal = {Journal of multidisciplinary healthcare},
volume = {17},
number = {},
pages = {1589-1602},
pmid = {38628614},
issn = {1178-2390},
abstract = {PURPOSE: Our objectives were to (1) employ ensemble machine learning algorithms utilizing real-world clinical data to predict 90-day prognosis, including dialysis dependence and mortality, following the first hospitalized dialysis and (2) identify the significant factors associated with overall outcomes.
PATIENTS AND METHODS: We identified hospitalized patients with Acute kidney injury requiring dialysis (AKI-D) from a dataset of the Taipei Medical University Clinical Research Database (TMUCRD) from January 2008 to December 2020. The extracted data comprise demographics, comorbidities, medications, and laboratory parameters. Ensemble machine learning models were developed utilizing real-world clinical data through the Google Cloud Platform.
RESULTS: The Study Analyzed 1080 Patients in the Dialysis-Dependent Module, Out of Which 616 Received Regular Dialysis After 90 Days. Our Ensemble Model, Consisting of 25 Feedforward Neural Network Models, Demonstrated the Best Performance with an Auroc of 0.846. We Identified the Baseline Creatinine Value, Assessed at Least 90 Days Before the Initial Dialysis, as the Most Crucial Factor. We selected 2358 patients, 984 of whom were deceased after 90 days, for the survival module. The ensemble model, comprising 15 feedforward neural network models and 10 gradient-boosted decision tree models, achieved superior performance with an AUROC of 0.865. The pre-dialysis creatinine value, tested within 90 days prior to the initial dialysis, was identified as the most significant factor.
CONCLUSION: Ensemble machine learning models outperform logistic regression models in predicting outcomes of AKI-D, compared to existing literature. Our study, which includes a large sample size from three different hospitals, supports the significance of the creatinine value tested before the first hospitalized dialysis in determining overall prognosis. Healthcare providers could benefit from utilizing our validated prediction model to improve clinical decision-making and enhance patient care for the high-risk population.},
}
@article {pmid38628390,
year = {2024},
author = {Fujinami, H and Kuraishi, S and Teramoto, A and Shimada, S and Takahashi, S and Ando, T and Yasuda, I},
title = {Development of a novel endoscopic hemostasis-assisted navigation AI system in the standardization of post-ESD coagulation.},
journal = {Endoscopy international open},
volume = {12},
number = {4},
pages = {E520-E525},
pmid = {38628390},
issn = {2364-3722},
abstract = {Background and study aims While gastric endoscopic submucosal dissection (ESD) has become a treatment with fewer complications, delayed bleeding remains a challenge. Post-ESD coagulation (PEC) is performed to prevent delayed bleeding. Therefore, we developed an artificial intelligence (AI) to detect vessels that require PEC in real time. Materials and methods Training data were extracted from 153 gastric ESD videos with sufficient images taken with a second-look endoscopy (SLE) and annotated as follows: (1) vessels that showed bleeding during SLE without PEC; (2) vessels that did not bleed during SLE with PEC; and (3) vessels that did not bleed even without PEC. The training model was created using Google Cloud Vertex AI and a program was created to display the vessels requiring PEC in real time using a bounding box. The evaluation of this AI was verified with 12 unlearned test videos, including four cases that required additional coagulation during SLE. Results The results of the test video validation indicated that 109 vessels on the ulcer required cauterization. Of these, 80 vessels (73.4%) were correctly determined as not requiring additional treatment. However, 25 vessels (22.9%), which did not require PEC, were overestimated. In the four videos that required additional coagulation in SLE, AI was able to detect all bleeding vessels. Conclusions The effectiveness and safety of this endoscopic treatment-assisted AI system that identifies visible vessels requiring PEC should be confirmed in future studies.},
}
@article {pmid38625954,
year = {2024},
author = {Frimpong, T and Hayfron Acquah, JB and Missah, YM and Dawson, JK and Ayawli, BBK and Baah, P and Sam, SA},
title = {Securing cloud data using secret key 4 optimization algorithm (SK4OA) with a non-linearity run time trend.},
journal = {PloS one},
volume = {19},
number = {4},
pages = {e0301760},
pmid = {38625954},
issn = {1932-6203},
mesh = {*Algorithms ; *Information Storage and Retrieval ; Cloud Computing ; Computer Security ; Microcomputers ; },
abstract = {Cloud computing alludes to the on-demand availability of personal computer framework resources, primarily information storage and processing power, without the customer's direct personal involvement. Cloud computing has developed dramatically among many organizations due to its benefits such as cost savings, resource pooling, broad network access, and ease of management; nonetheless, security has been a major concern. Researchers have proposed several cryptographic methods to offer cloud data security; however, their execution times are linear and longer. A Security Key 4 Optimization Algorithm (SK4OA) with a non-linear run time is proposed in this paper. The secret key of SK4OA determines the run time rather than the size of the data as such is able to transmit large volumes of data with minimal bandwidth and able to resist security attacks like brute force since its execution timings are unpredictable. A data set from Kaggle was used to determine the algorithm's mean and standard deviation after thirty (30) times of execution. Data sizes of 3KB, 5KB, 8KB, 12KB, and 16 KB were used in this study. There was an empirical analysis done against RC4, Salsa20, and Chacha20 based on encryption time, decryption time, throughput and memory utilization. The analysis showed that SK4OA generated lowest mean non-linear run time of 5.545±2.785 when 16KB of data was executed. Additionally, SK4OA's standard deviation was greater, indicating that the observed data varied far from the mean. However, RC4, Salsa20, and Chacha20 showed smaller standard deviations making them more clustered around the mean resulting in predictable run times.},
}
@article {pmid38610575,
year = {2024},
author = {Ocampo, AF and Fida, MR and Elmokashfi, A and Bryhni, H},
title = {Assessing the Cloud-RAN in the Linux Kernel: Sharing Computing and Network Resources.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {7},
pages = {},
pmid = {38610575},
issn = {1424-8220},
abstract = {Cloud-based Radio Access Network (Cloud-RAN) leverages virtualization to enable the coexistence of multiple virtual Base Band Units (vBBUs) with collocated workloads on a single edge computer, aiming for economic and operational efficiency. However, this coexistence can cause performance degradation in vBBUs due to resource contention. In this paper, we conduct an empirical analysis of vBBU performance on a Linux RT-Kernel, highlighting the impact of resource sharing with user-space tasks and Kernel threads. Furthermore, we evaluate CPU management strategies such as CPU affinity and CPU isolation as potential solutions to these performance challenges. Our results highlight that the implementation of CPU affinity can significantly reduce throughput variability by up to 40%, decrease vBBU's NACK ratios, and reduce vBBU scheduling latency within the Linux RT-Kernel. Collectively, these findings underscore the potential of CPU management strategies to enhance vBBU performance in Cloud-RAN environments, enabling more efficient and stable network operations. The paper concludes with a discussion on the efficient realization of Cloud-RAN, elucidating the benefits of implementing proposed CPU affinity allocations. The demonstrated enhancements, including reduced scheduling latency and improved end-to-end throughput, affirm the practicality and efficacy of the proposed strategies for optimizing Cloud-RAN deployments.},
}
@article {pmid38610476,
year = {2024},
author = {Liang, YP and Chang, CM and Chung, CC},
title = {Implementation of Lightweight Convolutional Neural Networks with an Early Exit Mechanism Utilizing 40 nm CMOS Process for Fire Detection in Unmanned Aerial Vehicles.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {7},
pages = {},
pmid = {38610476},
issn = {1424-8220},
support = {MOST-111-2221- E-194-049-//Ministry of Science and Technology of Taiwan/ ; },
abstract = {The advancement of unmanned aerial vehicles (UAVs) enables early detection of numerous disasters. Efforts have been made to automate the monitoring of data from UAVs, with machine learning methods recently attracting significant interest. These solutions often face challenges with high computational costs and energy usage. Conventionally, data from UAVs are processed using cloud computing, where they are sent to the cloud for analysis. However, this method might not meet the real-time needs of disaster relief scenarios. In contrast, edge computing provides real-time processing at the site but still struggles with computational and energy efficiency issues. To overcome these obstacles and enhance resource utilization, this paper presents a convolutional neural network (CNN) model with an early exit mechanism designed for fire detection in UAVs. This model is implemented using TSMC 40 nm CMOS technology, which aids in hardware acceleration. Notably, the neural network has a modest parameter count of 11.2 k. In the hardware computation part, the CNN circuit completes fire detection in approximately 230,000 cycles. Power-gating techniques are also used to turn off inactive memory, contributing to reduced power consumption. The experimental results show that this neural network reaches a maximum accuracy of 81.49% in the hardware implementation stage. After automatic layout and routing, the CNN hardware accelerator can operate at 300 MHz, consuming 117 mW of power.},
}
@article {pmid38610447,
year = {2024},
author = {Gomes, B and Soares, C and Torres, JM and Karmali, K and Karmali, S and Moreira, RS and Sobral, P},
title = {An Efficient Edge Computing-Enabled Network for Used Cooking Oil Collection.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {7},
pages = {},
pmid = {38610447},
issn = {1424-8220},
abstract = {In Portugal, more than 98% of domestic cooking oil is disposed of improperly every day. This avoids recycling/reconverting into another energy. Is also may become a potential harmful contaminant of soil and water. Driven by the utility of recycled cooking oil, and leveraging the exponential growth of ubiquitous computing approaches, we propose an IoT smart solution for domestic used cooking oil (UCO) collection bins. We call this approach SWAN, which stands for Smart Waste Accumulation Network. It is deployed and evaluated in Portugal. It consists of a countrywide network of collection bin units, available in public areas. Two metrics are considered to evaluate the system's success: (i) user engagement, and (ii) used cooking oil collection efficiency. The presented system should (i) perform under scenarios of temporary communication network failures, and (ii) be scalable to accommodate an ever-growing number of installed collection units. Thus, we choose a disruptive approach from the traditional cloud computing paradigm. It relies on edge node infrastructure to process, store, and act upon the locally collected data. The communication appears as a delay-tolerant task, i.e., an edge computing solution. We conduct a comparative analysis revealing the benefits of the edge computing enabled collection bin vs. a cloud computing solution. The studied period considers four years of collected data. An exponential increase in the amount of used cooking oil collected is identified, with the developed solution being responsible for surpassing the national collection totals of previous years. During the same period, we also improved the collection process as we were able to more accurately estimate the optimal collection and system's maintenance intervals.},
}
@article {pmid38610327,
year = {2024},
author = {Armijo, A and Zamora-Sánchez, D},
title = {Integration of Railway Bridge Structural Health Monitoring into the Internet of Things with a Digital Twin: A Case Study.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {7},
pages = {},
pmid = {38610327},
issn = {1424-8220},
support = {ZL-2020/00902//Basque Government/ ; GA10112353//European Commission/ ; GA10108395//European Commission/ ; },
abstract = {Structural health monitoring (SHM) is critical for ensuring the safety of infrastructure such as bridges. This article presents a digital twin solution for the SHM of railway bridges using low-cost wireless accelerometers and machine learning (ML). The system architecture combines on-premises edge computing and cloud analytics to enable efficient real-time monitoring and complete storage of relevant time-history datasets. After train crossings, the accelerometers stream raw vibration data, which are processed in the frequency domain and analyzed using machine learning to detect anomalies that indicate potential structural issues. The digital twin approach is demonstrated on an in-service railway bridge for which vibration data were collected over two years under normal operating conditions. By learning allowable ranges for vibration patterns, the digital twin model identifies abnormal spectral peaks that indicate potential changes in structural integrity. The long-term pilot proves that this affordable SHM system can provide automated and real-time warnings of bridge damage and also supports the use of in-house-designed sensors with lower cost and edge computing capabilities such as those used in the demonstration. The successful on-premises-cloud hybrid implementation provides a cost effective and scalable model for expanding monitoring to thousands of railway bridges, democratizing SHM to improve safety by avoiding catastrophic failures.},
}
@article {pmid38610235,
year = {2024},
author = {Gaffurini, M and Flammini, A and Ferrari, P and Fernandes Carvalho, D and Godoy, EP and Sisinni, E},
title = {End-to-End Emulation of LoRaWAN Architecture and Infrastructure in Complex Smart City Scenarios Exploiting Containers.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {7},
pages = {},
pmid = {38610235},
issn = {1424-8220},
support = {1033 17/06/2022, CN00000023//European Union/ ; },
abstract = {In a LoRaWAN network, the backend is generally distributed as Software as a Service (SaaS) based on container technology, and recently, a containerized version of the LoRaWAN node stack is also available. Exploiting the disaggregation of LoRaWAN components, this paper focuses on the emulation of complex end-to-end architecture and infrastructures for smart city scenarios, leveraging on lightweight virtualization technology. The fundamental metrics to gain insights and evaluate the scaling complexity of the emulated scenario are defined. Then, the methodology is applied to use cases taken from a real LoRaWAN application in a smart city with hundreds of nodes. As a result, the proposed approach based on containers allows for the following: (i) deployments of functionalities on diverse distributed hosts; (ii) the use of the very same SW running on real nodes; (iii) the simple configuration and management of the emulation process; (iv) affordable costs. Both premise and cloud servers are considered as emulation platforms to evaluate the resource request and emulation cost of the proposed approach. For instance, emulating one hour of an entire LoRaWAN network with hundreds of nodes requires very affordable hardware that, if realized with a cloud-based computing platform, may cost less than USD 1.},
}
@article {pmid38609681,
year = {2024},
author = {Gupta, P and Shukla, DP},
title = {Demi-decadal land use land cover change analysis of Mizoram, India, with topographic correction using machine learning algorithm.},
journal = {Environmental science and pollution research international},
volume = {},
number = {},
pages = {},
pmid = {38609681},
issn = {1614-7499},
abstract = {Mizoram (India) is part of UNESCO's biodiversity hotspots in India that is primarily populated by tribes who engage in shifting agriculture. Hence, the land use land cover (LULC) pattern of the state is frequently changing. We have used Landsat 5 and 8 satellite images to prepare LULC maps from 2000 to 2020 in every 5 years. The atmospherically corrected images were pre-processed for removal of cloud cover and then classified into six classes: waterbodies, farmland, settlement, open forest, dense forest, and bare land. We applied four machine learning (ML) algorithms for classification, namely, random forest (RF), classification and regression tree (CART), minimum distance (MD), and support vector machine (SVM) for the images from 2000 to 2020. With 80% training and 20% testing data, we found that the RF classifier works best with the most accuracy than other classifiers. The average overall accuracy (OA) and Kappa coefficient (KC) from 2000 to 2020 were 84.00% and 0.79 when the RF classifier was used. When using SVM, CART, and MD, the average OA and KC were 78.06%, 0.73; 78.60%, 0.72; and 73.32%, 0.65, respectively. We utilised three methods of topographic correction, namely, C-correction, SCS (sun canopy sensor) correction, and SCS + C correction to reduce the misclassification due to shadow effects. SCS + C correction worked best for this region; hence, we prepared LULC maps on SCS + C corrected satellite image. Hence, we have used RF classifier for LULC preparation demi-decadal from 2000 to 2020. The OA for 2000, 2005, 2010, 2015, and 2020 was found to be 84%, 81%, 81%, 85%, and 89%, respectively, using RF. The dense forest decreased from 2000 to 2020 with an increase in open forest, settlement, and agriculture; nevertheless, when Farmland was low, there was an increase in the barren land. The results were significantly improved with the topographic correction, and misclassification was quite less.},
}
@article {pmid38609409,
year = {2024},
author = {Zhang, Y and Geng, H and Su, L and He, S and Lu, L},
title = {An efficient polynomial-based verifiable computation scheme on multi-source outsourced data.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {8512},
pmid = {38609409},
issn = {2045-2322},
support = {U22B2038//Research and Verification of Key Technologies for Secure and Efficient Federated Learning/ ; },
abstract = {With the development of cloud computing, users are more inclined to outsource complex computing tasks to cloud servers with strong computing capacity, and the cloud returns the final calculation results. However, the cloud is not completely trustworthy, which may leak the data of user and even return incorrect calculations on purpose. Therefore, it is important to verify the results of computing tasks without revealing the privacy of the users. Among all the computing tasks, the polynomial calculation is widely used in information security, linear algebra, signal processing and other fields. Most existing polynomial-based verifiable computation schemes require that the input of the polynomial function must come from a single data source, which means that the data must be signed by a single user. However, the input of the polynomial may come from multiple users in the practical application. In order to solve this problem, the researchers have proposed some schemes for multi-source outsourced data, but these schemes have the common problem of low efficiency. To improve the efficiency, this paper proposes an efficient polynomial-based verifiable computation scheme on multi-source outsourced data. We optimize the polynomials using Horner's method to increase the speed of verification, in which the addition gate and the multiplication gate can be interleaved to represent the polynomial function. In order to adapt to this structure, we design the corresponding homomorphic verification tag, so that the input of the polynomial can come from multiple data sources. We prove the correctness and rationality of the scheme, and carry out numerical analysis and evaluation research to verify the efficiency of the scheme. The experimental indicate that data contributors can sign 1000 new data in merely 2 s, while the verification of a delegated polynomial function with a power of 100 requires only 18 ms. These results confirm that the proposed scheme is better than the existing scheme.},
}
@article {pmid38606391,
year = {2024},
author = {Li, S and Nair, R and Naqvi, SM},
title = {Acoustic and Text Features Analysis for Adult ADHD Screening: A Data-Driven Approach Utilizing DIVA Interview.},
journal = {IEEE journal of translational engineering in health and medicine},
volume = {12},
number = {},
pages = {359-370},
pmid = {38606391},
issn = {2168-2372},
mesh = {Adult ; Humans ; *Attention Deficit Disorder with Hyperactivity/diagnosis ; Treatment Outcome ; Magnetic Resonance Imaging ; },
abstract = {Attention Deficit Hyperactivity Disorder (ADHD) is a neurodevelopmental disorder commonly seen in childhood that leads to behavioural changes in social development and communication patterns, often continues into undiagnosed adulthood due to a global shortage of psychiatrists, resulting in delayed diagnoses with lasting consequences on individual's well-being and the societal impact. Recently, machine learning methodologies have been incorporated into healthcare systems to facilitate the diagnosis and enhance the potential prediction of treatment outcomes for mental health conditions. In ADHD detection, the previous research focused on utilizing functional magnetic resonance imaging (fMRI) or Electroencephalography (EEG) signals, which require costly equipment and trained personnel for data collection. In recent years, speech and text modalities have garnered increasing attention due to their cost-effectiveness and non-wearable sensing in data collection. In this research, conducted in collaboration with the Cumbria, Northumberland, Tyne and Wear NHS Foundation Trust, we gathered audio data from both ADHD patients and normal controls based on the clinically popular Diagnostic Interview for ADHD in adults (DIVA). Subsequently, we transformed the speech data into text modalities through the utilization of the Google Cloud Speech API. We extracted both acoustic and text features from the data, encompassing traditional acoustic features (e.g., MFCC), specialized feature sets (e.g., eGeMAPS), as well as deep-learned linguistic and semantic features derived from pre-trained deep learning models. These features are employed in conjunction with a support vector machine for ADHD classification, yielding promising outcomes in the utilization of audio and text data for effective adult ADHD screening. Clinical impact: This research introduces a transformative approach in ADHD diagnosis, employing speech and text analysis to facilitate early and more accessible detection, particularly beneficial in areas with limited psychiatric resources. Clinical and Translational Impact Statement: The successful application of machine learning techniques in analyzing audio and text data for ADHD screening represents a significant advancement in mental health diagnostics, paving the way for its integration into clinical settings and potentially improving patient outcomes on a broader scale.},
}
@article {pmid38601602,
year = {2024},
author = {Sachdeva, S and Bhatia, S and Al Harrasi, A and Shah, YA and Anwer, K and Philip, AK and Shah, SFA and Khan, A and Ahsan Halim, S},
title = {Unraveling the role of cloud computing in health care system and biomedical sciences.},
journal = {Heliyon},
volume = {10},
number = {7},
pages = {e29044},
pmid = {38601602},
issn = {2405-8440},
abstract = {Cloud computing has emerged as a transformative force in healthcare and biomedical sciences, offering scalable, on-demand resources for managing vast amounts of data. This review explores the integration of cloud computing within these fields, highlighting its pivotal role in enhancing data management, security, and accessibility. We examine the application of cloud computing in various healthcare domains, including electronic medical records, telemedicine, and personalized patient care, as well as its impact on bioinformatics research, particularly in genomics, proteomics, and metabolomics. The review also addresses the challenges and ethical considerations associated with cloud-based healthcare solutions, such as data privacy and cybersecurity. By providing a comprehensive overview, we aim to assist readers in understanding the significance of cloud computing in modern medical applications and its potential to revolutionize both patient care and biomedical research.},
}
@article {pmid38591672,
year = {2024},
author = {Hicks, CB and Martinez, TJ},
title = {Massively scalable workflows for quantum chemistry: BigChem and ChemCloud.},
journal = {The Journal of chemical physics},
volume = {160},
number = {14},
pages = {},
doi = {10.1063/5.0190834},
pmid = {38591672},
issn = {1089-7690},
abstract = {Electronic structure theory, i.e., quantum chemistry, is the fundamental building block for many problems in computational chemistry. We present a new distributed computing framework (BigChem), which allows for an efficient solution of many quantum chemistry problems in parallel. BigChem is designed to be easily composable and leverages industry-standard middleware (e.g., Celery, RabbitMQ, and Redis) for distributed approaches to large scale problems. BigChem can harness any collection of worker nodes, including ones on cloud providers (such as AWS or Azure), local clusters, or supercomputer centers (and any mixture of these). BigChem builds upon MolSSI packages, such as QCEngine to standardize the operation of numerous computational chemistry programs, demonstrated here with Psi4, xtb, geomeTRIC, and TeraChem. BigChem delivers full utilization of compute resources at scale, offers a programable canvas for designing sophisticated quantum chemistry workflows, and is fault tolerant to node failures and network disruptions. We demonstrate linear scalability of BigChem running computational chemistry workloads on up to 125 GPUs. Finally, we present ChemCloud, a web API to BigChem and successor to TeraChem Cloud. ChemCloud delivers scalable and secure access to BigChem over the Internet.},
}
@article {pmid38589881,
year = {2024},
author = {Holl, F and Clarke, L and Raffort, T and Serres, E and Archer, L and Saaristo, P},
title = {The Red Cross Red Crescent Health Information System (RCHIS): an electronic medical records and health information management system for the red cross red crescent emergency response units.},
journal = {Conflict and health},
volume = {18},
number = {1},
pages = {28},
pmid = {38589881},
issn = {1752-1505},
abstract = {BACKGROUND: The Red Cross and Red Crescent Movement (RCRC) utilizes specialized Emergency Response Units (ERUs) for international disaster response. However, data collection and reporting within ERUs have been time-consuming and paper-based. The Red Cross Red Crescent Health Information System (RCHIS) was developed to improve clinical documentation and reporting, ensuring accuracy and ease of use while increasing compliance with reporting standards.
CASE PRESENTATION: RCHIS is an Electronic Medical Record (EMR) and Health Information System (HIS) designed for RCRC ERUs. It can be accessed on Android tablets or Windows laptops, both online and offline. The system securely stores data on Microsoft Azure cloud, with synchronization facilitated through a local ERU server. The functional architecture covers all clinical functions of ERU clinics and hospitals, incorporating user-friendly features. A pilot study was conducted with the Portuguese Red Cross (PRC) during a large-scale event. Thirteen super users were trained and subsequently trained the staff. During the four-day pilot, 77 user accounts were created, and 243 patient files were documented. Feedback indicated that RCHIS was easy to use, requiring minimal training time, and had sufficient training for full utilization. Real-time reporting facilitated coordination with the civil defense authority.
CONCLUSIONS: The development and pilot use of RCHIS demonstrated its feasibility and efficacy within RCRC ERUs. The system addressed the need for an EMR and HIS solution, enabling comprehensive clinical documentation and supporting administrative reporting functions. The pilot study validated the training of trainers' approach and paved the way for further domestic use of RCHIS. RCHIS has the potential to improve patient safety, quality of care, and reporting efficiency within ERUs. Automated reporting reduces the burden on ERU leadership, while electronic compilation enhances record completeness and correctness. Ongoing feedback collection and feature development continue to enhance RCHIS's functionality. Further trainings took place in 2023 and preparations for international deployments are under way. RCHIS represents a significant step toward improved emergency medical care and coordination within the RCRC and has implications for similar systems in other Emergency Medical Teams.},
}
@article {pmid38586319,
year = {2024},
author = {Chen, A and Yu, S and Yang, X and Huang, D and Ren, Y},
title = {IoT data security in outsourced databases: A survey of verifiable database.},
journal = {Heliyon},
volume = {10},
number = {7},
pages = {e28117},
pmid = {38586319},
issn = {2405-8440},
abstract = {With the swift advancement of cloud computing and the Internet of Things (IoT), to address the issue of massive data storage, IoT devices opt to offload their data to cloud servers so as to alleviate the pressure of resident storage and computation. However, storing local data in an outsourced database is bound to face the danger of tampering. To handle the above problem, a verifiable database (VDB), which was initially suggested in 2011, has garnered sustained interest from researchers. The concept of VDB enables resource-limited clients to securely outsource extremely large databases to untrusted servers, where users can retrieve database records and modify them by allocating new values, and any attempts at tampering will be detected. This paper provides a systematic summary of VDB. First, a definition of VDB is given, along with correctness and security proofs. And the VDB based on commitment constructions is introduced separately, mainly divided into vector commitments and polynomial commitments. Then VDB schemes based on delegated polynomial functions are introduced, mainly in combination with Merkle trees and forward-secure symmetric searchable encryption. We then classify the current VDB schemes relying on four different assumptions. Besides, we classify the established VDB schemes built upon two different groups. Finally, we introduce the applications and future development of VDB. To our knowledge, this is the first VDB review paper to date.},
}
@article {pmid38585837,
year = {2024},
author = {Mimar, S and Paul, AS and Lucarelli, N and Border, S and Santo, BA and Naglah, A and Barisoni, L and Hodgin, J and Rosenberg, AZ and Clapp, W and Sarder, P and , },
title = {ComPRePS: An Automated Cloud-based Image Analysis tool to democratize AI in Digital Pathology.},
journal = {bioRxiv : the preprint server for biology},
volume = {},
number = {},
pages = {},
pmid = {38585837},
support = {R01 DK118431/DK/NIDDK NIH HHS/United States ; R21 DK128668/DK/NIDDK NIH HHS/United States ; R01 DK114485/DK/NIDDK NIH HHS/United States ; U01 DK133090/DK/NIDDK NIH HHS/United States ; R01 DK129541/DK/NIDDK NIH HHS/United States ; OT2 OD033753/OD/NIH HHS/United States ; },
abstract = {Artificial intelligence (AI) has extensive applications in a wide range of disciplines including healthcare and clinical practice. Advances in high-resolution whole-slide brightfield microscopy allow for the digitization of histologically stained tissue sections, producing gigapixel-scale whole-slide images (WSI). The significant improvement in computing and revolution of deep neural network (DNN)-based AI technologies over the last decade allow us to integrate massively parallelized computational power, cutting-edge AI algorithms, and big data storage, management, and processing. Applied to WSIs, AI has created opportunities for improved disease diagnostics and prognostics with the ultimate goal of enhancing precision medicine and resulting patient care. The National Institutes of Health (NIH) has recognized the importance of developing standardized principles for data management and discovery for the advancement of science and proposed the Findable, Accessible, Interoperable, Reusable, (FAIR) Data Principles[1] with the goal of building a modernized biomedical data resource ecosystem to establish collaborative research communities. In line with this mission and to democratize AI-based image analysis in digital pathology, we propose ComPRePS: an end-to-end automated Computational Renal Pathology Suite which combines massive scalability, on-demand cloud computing, and an easy-to-use web-based user interface for data upload, storage, management, slide-level visualization, and domain expert interaction. Moreover, our platform is equipped with both in-house and collaborator developed sophisticated AI algorithms in the back-end server for image analysis to identify clinically relevant micro-anatomic functional tissue units (FTU) and to extract image features.},
}
@article {pmid38584872,
year = {2024},
author = {Copeland, CJ and Roddy, JW and Schmidt, AK and Secor, PR and Wheeler, TJ},
title = {VIBES: a workflow for annotating and visualizing viral sequences integrated into bacterial genomes.},
journal = {NAR genomics and bioinformatics},
volume = {6},
number = {2},
pages = {lqae030},
pmid = {38584872},
issn = {2631-9268},
support = {R01 AI138981/AI/NIAID NIH HHS/United States ; R01 GM132600/GM/NIGMS NIH HHS/United States ; },
abstract = {Bacteriophages are viruses that infect bacteria. Many bacteriophages integrate their genomes into the bacterial chromosome and become prophages. Prophages may substantially burden or benefit host bacteria fitness, acting in some cases as parasites and in others as mutualists. Some prophages have been demonstrated to increase host virulence. The increasing ease of bacterial genome sequencing provides an opportunity to deeply explore prophage prevalence and insertion sites. Here we present VIBES (Viral Integrations in Bacterial genomES), a workflow intended to automate prophage annotation in complete bacterial genome sequences. VIBES provides additional context to prophage annotations by annotating bacterial genes and viral proteins in user-provided bacterial and viral genomes. The VIBES pipeline is implemented as a Nextflow-driven workflow, providing a simple, unified interface for execution on local, cluster and cloud computing environments. For each step of the pipeline, a container including all necessary software dependencies is provided. VIBES produces results in simple tab-separated format and generates intuitive and interactive visualizations for data exploration. Despite VIBES's primary emphasis on prophage annotation, its generic alignment-based design allows it to be deployed as a general-purpose sequence similarity search manager. We demonstrate the utility of the VIBES prophage annotation workflow by searching for 178 Pf phage genomes across 1072 Pseudomonas spp. genomes.},
}
@article {pmid38578775,
year = {2024},
author = {Nawaz Tareen, F and Alvi, AN and Alsamani, B and Alkhathami, M and Alsadie, D and Alosaimi, N},
title = {EOTE-FSC: An efficient offloaded task execution for fog enabled smart cities.},
journal = {PloS one},
volume = {19},
number = {4},
pages = {e0298363},
pmid = {38578775},
issn = {1932-6203},
mesh = {Cities ; *Algorithms ; *Communication ; Health Facilities ; Information Science ; },
abstract = {Smart cities provide ease in lifestyle to their community members with the help of Information and Communication Technology (ICT). It provides better water, waste and energy management, enhances the security and safety of its citizens and offers better health facilities. Most of these applications are based on IoT-based sensor networks, that are deployed in different areas of applications according to their demand. Due to limited processing capabilities, sensor nodes cannot process multiple tasks simultaneously and need to offload some of their tasks to remotely placed cloud servers, which may cause delays. To reduce the delay, computing nodes are placed in different vicinitys acting as fog-computing nodes are used, to execute the offloaded tasks. It has been observed that the offloaded tasks are not uniformly received by fog computing nodes and some fog nodes may receive more tasks as some may receive less number of tasks. This may cause an increase in overall task execution time. Furthermore, these tasks comprise different priority levels and must be executed before their deadline. In this work, an Efficient Offloaded Task Execution for Fog enabled Smart cities (EOTE - FSC) is proposed. EOTE - FSC proposes a load balancing mechanism by modifying the greedy algorithm to efficiently distribute the offloaded tasks to its attached fog nodes to reduce the overall task execution time. This results in the successful execution of most of the tasks within their deadline. In addition, EOTE - FSC modifies the task sequencing with a deadline algorithm for the fog node to optimally execute the offloaded tasks in such a way that most of the high-priority tasks are entertained. The load balancing results of EOTE - FSC are compared with state-of-the-art well-known Round Robin, Greedy, Round Robin with longest job first, and Round Robin with shortest job first algorithms. However, fog computing results of EOTE - FSC are compared with the First Come First Serve algorithm. The results show that the EOTE - FSC effectively offloaded the tasks on fog nodes and the maximum load on the fog computing nodes is reduced up to 29%, 27.3%, 23%, and 24.4% as compared to Round Robin, Greedy, Round Robin with LJF and Round Robin with SJF algorithms respectively. However, task execution in the proposed EOTE - FSC executes a maximum number of offloaded high-priority tasks as compared to the FCFS algorithm within the same computing capacity of fog nodes.},
}
@article {pmid38568312,
year = {2024},
author = {Khan, NS and Roy, SK and Talukdar, S and Billah, M and Iqbal, A and Zzaman, RU and Chowdhury, A and Mahtab, SB and Mallick, J},
title = {Empowering real-time flood impact assessment through the integration of machine learning and Google Earth Engine: a comprehensive approach.},
journal = {Environmental science and pollution research international},
volume = {},
number = {},
pages = {},
pmid = {38568312},
issn = {1614-7499},
abstract = {Floods cause substantial losses to life and property, especially in flood-prone regions like northwestern Bangladesh. Timely and precise evaluation of flood impacts is critical for effective flood management and decision-making. This research demonstrates an integrated approach utilizing machine learning and Google Earth Engine to enable real-time flood assessment. Synthetic aperture radar (SAR) data from Sentinel-1 and the Google Earth Engine platform were employed to generate near real-time flood maps of the 2020 flood in Kurigram and Lalmonirhat. An automatic thresholding technique quantified flooded areas. For land use/land cover (LULC) analysis, Sentinel-2's high resolution and machine learning models like artificial neural networks (ANN), random forests (RF) and support vector machines (SVM) were leveraged. ANN delivered the best LULC mapping with 0.94 accuracy based on metrics like accuracy, kappa, mean F1 score, mean sensitivity, mean specificity, mean positive predictive value, mean negative value, mean precision, mean recall, mean detection rate and mean balanced accuracy. Results showed over 600,000 people exposed at peak inundation in July-about 17% of the population. The machine learning-enabled LULC maps reliably identified vulnerable areas to prioritize flood management. Over half of croplands flooded in July. This research demonstrates the potential of integrating SAR, machine learning and cloud computing to empower authorities through real-time monitoring and accurate LULC mapping essential for effective flood response. The proposed comprehensive methodology can assist stakeholders in developing data-driven flood management strategies to reduce impacts.},
}
@article {pmid38560228,
year = {2024},
author = {Gheni, HM and AbdulRahaim, LA and Abdellatif, A},
title = {Real-time driver identification in IoV: A deep learning and cloud integration approach.},
journal = {Heliyon},
volume = {10},
number = {7},
pages = {e28109},
pmid = {38560228},
issn = {2405-8440},
abstract = {The Internet of Vehicles (IoV) emerges as a pivotal extension of the Internet of Things (IoT), specifically geared towards transforming the automotive landscape. In this evolving ecosystem, the demand for a seamless end-to-end system becomes paramount for enhancing operational efficiency and safety. Hence, this study introduces an innovative method for real-time driver identification by integrating cloud computing with deep learning. Utilizing the integrated capabilities of Google Cloud, Thingsboard, and Apache Kafka, the developed solution tailored for IoV technology is adept at managing real-time data collection, processing, prediction, and visualization, with resilience against sensor data anomalies. Also, this research suggests an appropriate method for driver identification by utilizing a combination of Convolutional Neural Networks (CNN) and multi-head self-attention in the proposed approach. The proposed model is validated on two datasets: Security and collected. Moreover, the results show that the proposed model surpassed the previous works by achieving an accuracy and F1 score of 99.95%. Even when challenged with data anomalies, this model maintains a high accuracy of 96.2%. By achieving accurate driver identification results, the proposed end-to-end IoV system can aid in optimizing fleet management, vehicle security, personalized driving experiences, insurance, and risk assessment. This emphasizes its potential for road safety and managing transportation more effectively.},
}
@article {pmid38559152,
year = {2024},
author = {Li, Y and Xue, F and Li, B and Yang, Y and Fan, Z and Shu, J and Yang, X and Wang, X and Lin, J and Copana, C and Zhao, B},
title = {Analyzing bivariate cross-trait genetic architecture in GWAS summary statistics with the BIGA cloud computing platform.},
journal = {bioRxiv : the preprint server for biology},
volume = {},
number = {},
pages = {},
pmid = {38559152},
abstract = {As large-scale biobanks provide increasing access to deep phenotyping and genomic data, genome-wide association studies (GWAS) are rapidly uncovering the genetic architecture behind various complex traits and diseases. GWAS publications typically make their summary-level data (GWAS summary statistics) publicly available, enabling further exploration of genetic overlaps between phenotypes gathered from different studies and cohorts. However, systematically analyzing high-dimensional GWAS summary statistics for thousands of phenotypes can be both logistically challenging and computationally demanding. In this paper, we introduce BIGA (https://bigagwas.org/), a website that aims to offer unified data analysis pipelines and processed data resources for cross-trait genetic architecture analyses using GWAS summary statistics. We have developed a framework to implement statistical genetics tools on a cloud computing platform, combined with extensive curated GWAS data resources. Through BIGA, users can upload data, submit jobs, and share results, providing the research community with a convenient tool for consolidating GWAS data and generating new insights.},
}
@article {pmid38559026,
year = {2024},
author = {Marini, S and Barquero, A and Wadhwani, AA and Bian, J and Ruiz, J and Boucher, C and Prosperi, M},
title = {OCTOPUS: Disk-based, Multiplatform, Mobile-friendly Metagenomics Classifier.},
journal = {bioRxiv : the preprint server for biology},
volume = {},
number = {},
pages = {},
pmid = {38559026},
support = {R01 AI141810/AI/NIAID NIH HHS/United States ; R01 AI145552/AI/NIAID NIH HHS/United States ; R01 AI170187/AI/NIAID NIH HHS/United States ; },
abstract = {Portable genomic sequencers such as Oxford Nanopore's MinION enable real-time applications in both clinical and environmental health, e.g., detection of bacterial outbreaks. However, there is a bottleneck in the downstream analytics when bioinformatics pipelines are unavailable, e.g., when cloud processing is unreachable due to absence of Internet connection, or only low-end computing devices can be carried on site. For instance, metagenomics classifiers usually require a large amount of memory or specific operating systems/libraries. In this work, we present a platform-friendly software for portable metagenomic analysis of Nanopore data, the Oligomer-based Classifier of Taxonomic Operational and Pan-genome Units via Singletons (OCTOPUS). OCTOPUS is written in Java, reimplements several features of the popular Kraken2 and KrakenUniq software, with original components for improving metagenomics classification on incomplete/sampled reference databases (e.g., selection of bacteria of public health priority), making it ideal for running on smartphones or tablets. We indexed both OCTOPUS and Kraken2 on a bacterial database with ~4,000 reference genomes, then simulated a positive (bacterial genomes from the same species, but different genomes) and two negative (viral, mammalian) Nanopore test sets. On the bacterial test set OCTOPUS yielded sensitivity and precision comparable to Kraken2 (94.4% and 99.8% versus 94.5% and 99.1%, respectively). On non-bacterial sequences (mammals and viral), OCTOPUS dramatically decreased (4- to 16-fold) the false positive rate when compared to Kraken2 (2.1% and 0.7% versus 8.2% and 11.2%, respectively). We also developed customized databases including viruses, and the World Health Organization's set of bacteria of concern for drug resistance, tested with real Nanopore data on an Android smartphone. OCTOPUS is publicly available at https://github.com/DataIntellSystLab/OCTOPUS and https://github.com/Ruiz-HCI-Lab/OctopusMobile.},
}
@article {pmid38555378,
year = {2024},
author = {Du, J and Dong, G and Ning, J and Xu, Z and Yang, R},
title = {Identity-based controlled delegated outsourcing data integrity auditing scheme.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {7582},
pmid = {38555378},
issn = {2045-2322},
support = {2023SKY007//Yunnan Minzu University Graduate Research Innovation Fund Project/ ; 61662089//National Natural Science Foundation of China/ ; },
abstract = {With the continuous development of cloud computing, the application of cloud storage has become more and more popular. To ensure the integrity and availability of cloud data, scholars have proposed several cloud data auditing schemes. Still, most need help with outsourced data integrity, controlled outsourcing, and source file auditing. Therefore, we propose a controlled delegation outsourcing data integrity auditing scheme based on the identity-based encryption model. Our proposed scheme allows users to specify a dedicated agent to assist in uploading data to the cloud. These authorized proxies use recognizable identities for authentication and authorization, thus avoiding the need for cumbersome certificate management in a secure distributed computing system. While solving the above problems, our scheme adopts a bucket-based red-black tree structure to efficiently realize the dynamic updating of data, which can complete the updating of data and rebalancing of structural updates constantly and realize the high efficiency of data operations. We define the security model of the scheme in detail and prove the scheme's security under the difficult problem assumption. In the performance analysis section, the proposed scheme is analyzed experimentally in comparison with other schemes, and the results show that the proposed scheme is efficient and secure.},
}
@article {pmid38546988,
year = {2024},
author = {Chen, X and Xu, G and Xu, X and Jiang, H and Tian, Z and Ma, T},
title = {Multicenter Hierarchical Federated Learning With Fault-Tolerance Mechanisms for Resilient Edge Computing Networks.},
journal = {IEEE transactions on neural networks and learning systems},
volume = {PP},
number = {},
pages = {},
doi = {10.1109/TNNLS.2024.3362974},
pmid = {38546988},
issn = {2162-2388},
abstract = {In the realm of federated learning (FL), the conventional dual-layered architecture, comprising a central parameter server and peripheral devices, often encounters challenges due to its significant reliance on the central server for communication and security. This dependence becomes particularly problematic in scenarios involving potential malfunctions of devices and servers. While existing device-edge-cloud hierarchical FL (HFL) models alleviate some dependence on central servers and reduce communication overheads, they primarily focus on load balancing within edge computing networks and fall short of achieving complete decentralization and edge-centric model aggregation. Addressing these limitations, we introduce the multicenter HFL (MCHFL) framework. This innovative framework replaces the traditional single central server architecture with a distributed network of robust global aggregation centers located at the edge, inherently enhancing fault tolerance crucial for maintaining operational integrity amidst edge network disruptions. Our comprehensive experiments with the MNIST, FashionMNIST, and CIFAR-10 datasets demonstrate the MCHFL's superior performance. Notably, even under high paralysis ratios of up to 50%, the MCHFL maintains high accuracy levels, with maximum accuracy reductions of only 2.60%, 5.12%, and 16.73% on these datasets, respectively. This performance significantly surpasses the notable accuracy declines observed in traditional single-center models under similar conditions. To the best of our knowledge, the MCHFL is the first edge multicenter FL framework with theoretical underpinnings. Our extensive experimental results across various datasets validate the MCHFL's effectiveness, showcasing its higher accuracy, faster convergence speed, and stronger robustness compared to single-center models, thereby establishing it as a pioneering paradigm in edge multicenter FL.},
}
@article {pmid38545518,
year = {2024},
author = {Lock, C and Toh, EMS and Keong, NC},
title = {Structural volumetric and Periodic Table DTI patterns in Complex Normal Pressure Hydrocephalus-Toward the principles of a translational taxonomy.},
journal = {Frontiers in human neuroscience},
volume = {18},
number = {},
pages = {1188533},
pmid = {38545518},
issn = {1662-5161},
abstract = {INTRODUCTION: We previously proposed a novel taxonomic framework to describe the diffusion tensor imaging (DTI) profiles of white matter tracts by their diffusivity and neural properties. We have shown the relevance of this strategy toward interpreting brain tissue signatures in Classic Normal Pressure Hydrocephalus vs. comparator cohorts of mild traumatic brain injury and Alzheimer's disease. In this iteration of the Periodic Table of DTI Elements, we examined patterns of tissue distortion in Complex NPH (CoNPH) and validated the methodology against an open-access dataset of healthy subjects, to expand its accessibility to a larger community.
METHODS: DTI measures for 12 patients with CoNPH with multiple comorbidities and 45 cognitively normal controls from the ADNI database were derived using the image processing pipeline on the brainlife.io open cloud computing platform. Using the Periodic Table algorithm, DTI profiles for CoNPH vs. controls were mapped according to injury patterns.
RESULTS: Structural volumes in most structures tested were significantly lower and the lateral ventricles higher in CoNPH vs. controls. In CoNPH, significantly lower fractional anisotropy (FA) and higher mean, axial, and radial diffusivities (MD, L1, and L2 and 3, respectively) were observed in white matter related to the lateral ventricles. Most diffusivity measures across supratentorial and infratentorial structures were significantly higher in CoNPH, with the largest differences in the cerebellum cortex. In subcortical deep gray matter structures, CoNPH and controls differed most significantly in the hippocampus, with the CoNPH group having a significantly lower FA and higher MD, L1, and L2 and 3. Cerebral and cerebellar white matter demonstrated more potential reversibility of injury compared to cerebral and cerebellar cortices.
DISCUSSION: The findings of widespread and significant reductions in subcortical deep gray matter structures, in comparison to healthy controls, support the hypothesis that Complex NPH cohorts retain imaging features associated with Classic NPH. The use of the algorithm of the Periodic Table allowed for greater consistency in the interpretation of DTI results by focusing on patterns of injury rather than an over-reliance on the interrogation of individual measures by statistical significance alone. Our aim is to provide a prototype that could be refined for an approach toward the concept of a "translational taxonomy."},
}
@article {pmid38544154,
year = {2024},
author = {Kang, S and Lee, S and Jung, Y},
title = {Design of Network-on-Chip-Based Restricted Coulomb Energy Neural Network Accelerator on FPGA Device.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {6},
pages = {},
pmid = {38544154},
issn = {1424-8220},
support = {00144288, 00144290//Ministry of Trade, Industry and Energy/ ; },
abstract = {Sensor applications in internet of things (IoT) systems, coupled with artificial intelligence (AI) technology, are becoming an increasingly significant part of modern life. For low-latency AI computation in IoT systems, there is a growing preference for edge-based computing over cloud-based alternatives. The restricted coulomb energy neural network (RCE-NN) is a machine learning algorithm well-suited for implementation on edge devices due to its simple learning and recognition scheme. In addition, because the RCE-NN generates neurons as needed, it is easy to adjust the network structure and learn additional data. Therefore, the RCE-NN can provide edge-based real-time processing for various sensor applications. However, previous RCE-NN accelerators have limited scalability when the number of neurons increases. In this paper, we propose a network-on-chip (NoC)-based RCE-NN accelerator and present the results of implementation on a field-programmable gate array (FPGA). NoC is an effective solution for managing massive interconnections. The proposed RCE-NN accelerator utilizes a hierarchical-star (H-star) topology, which efficiently handles a large number of neurons, along with routers specifically designed for the RCE-NN. These approaches result in only a slight decrease in the maximum operating frequency as the number of neurons increases. Consequently, the maximum operating frequency of the proposed RCE-NN accelerator with 512 neurons increased by 126.1% compared to a previous RCE-NN accelerator. This enhancement was verified with two datasets for gas and sign language recognition, achieving accelerations of up to 54.8% in learning time and up to 45.7% in recognition time. The NoC scheme of the proposed RCE-NN accelerator is an appropriate solution to ensure the scalability of the neural network while providing high-performance on-chip learning and recognition.},
}
@article {pmid38544035,
year = {2024},
author = {Zhan, Y and Xie, W and Shi, R and Huang, Y and Zheng, X},
title = {Dynamic Privacy-Preserving Anonymous Authentication Scheme for Condition-Matching in Fog-Cloud-Based VANETs.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {6},
pages = {},
pmid = {38544035},
issn = {1424-8220},
support = {61872091//National Natural Science Foundation of China/ ; JCKY20 19102C001//National Defense Basic Research Program of China/ ; 62372110//National Natural Science Foundation of China/ ; 2023J02008//Fujian Provincial Natural Science of Foundation/ ; 2020B0101090005//Key-Area Research and Development Program of Guangdong Province/ ; YSPTZX202145//The specific research fund of The Innovation Platform for Academician of Hainan Province/ ; 2022HZ022022//Major Special Project for Industrial Science and Technology in Fujian Province/ ; 2022H0012//Industrial Guiding Project in Fujian/ ; 2022L3003//Special Project of Central Finance Guiding Local Development/ ; },
abstract = {Secure group communication in Vehicle Ad hoc Networks (VANETs) over open channels remains a challenging task. To enable secure group communications with conditional privacy, it is necessary to establish a secure session using Authenticated Key Agreement (AKA). However, existing AKAs suffer from problems such as cross-domain dynamic group session key negotiation and heavy computational burdens on the Trusted Authority (TA) and vehicles. To address these challenges, we propose a dynamic privacy-preserving anonymous authentication scheme for condition matching in fog-cloud-based VANETs. The scheme employs general Elliptic Curve Cryptosystem (ECC) technology and fog-cloud computing methods to decrease computational overhead for On-Board Units (OBUs) and supports multiple TAs for improved service quality and robustness. Furthermore, certificateless technology alleviates TAs of key management burdens. The security analysis indicates that our solution satisfies the communication security and privacy requirements. Experimental simulations verify that our method achieves optimal overall performance with lower computational costs and smaller communication overhead compared to state-of-the-art solutions.},
}
@article {pmid38540411,
year = {2024},
author = {Yuan, DY and Park, JH and Li, Z and Thomas, R and Hwang, DM and Fu, L},
title = {A New Cloud-Native Tool for Pharmacogenetic Analysis.},
journal = {Genes},
volume = {15},
number = {3},
pages = {},
pmid = {38540411},
issn = {2073-4425},
support = {LMMD Strategic Innovation Fund//Sunnybrook Health Sciences Centre/ ; },
mesh = {Humans ; *Pharmacogenomic Testing ; *Pharmacogenetics/methods ; High-Throughput Nucleotide Sequencing/methods ; Genomics/methods ; Computational Biology ; },
abstract = {BACKGROUND: The advancement of next-generation sequencing (NGS) technologies provides opportunities for large-scale Pharmacogenetic (PGx) studies and pre-emptive PGx testing to cover a wide range of genotypes present in diverse populations. However, NGS-based PGx testing is limited by the lack of comprehensive computational tools to support genetic data analysis and clinical decisions.
METHODS: Bioinformatics utilities specialized for human genomics and the latest cloud-based technologies were used to develop a bioinformatics pipeline for analyzing the genomic sequence data and reporting PGx genotypes. A database was created and integrated in the pipeline for filtering the actionable PGx variants and clinical interpretations. Strict quality verification procedures were conducted on variant calls with the whole genome sequencing (WGS) dataset of the 1000 Genomes Project (G1K). The accuracy of PGx allele identification was validated using the WGS dataset of the Pharmacogenetics Reference Materials from the Centers for Disease Control and Prevention (CDC).
RESULTS: The newly created bioinformatics pipeline, Pgxtools, can analyze genomic sequence data, identify actionable variants in 13 PGx relevant genes, and generate reports annotated with specific interpretations and recommendations based on clinical practice guidelines. Verified with two independent methods, we have found that Pgxtools consistently identifies variants more accurately than the results in the G1K dataset on GRCh37 and GRCh38.
CONCLUSIONS: Pgxtools provides an integrated workflow for large-scale genomic data analysis and PGx clinical decision support. Implemented with cloud-native technologies, it is highly portable in a wide variety of environments from a single laptop to High-Performance Computing (HPC) clusters and cloud platforms for different production scales and requirements.},
}
@article {pmid38535044,
year = {2024},
author = {Kukkar, A and Kumar, Y and Sandhu, JK and Kaur, M and Walia, TS and Amoon, M},
title = {DengueFog: A Fog Computing-Enabled Weighted Random Forest-Based Smart Health Monitoring System for Automatic Dengue Prediction.},
journal = {Diagnostics (Basel, Switzerland)},
volume = {14},
number = {6},
pages = {},
pmid = {38535044},
issn = {2075-4418},
abstract = {Dengue is a distinctive and fatal infectious disease that spreads through female mosquitoes called Aedes aegypti. It is a notable concern for developing countries due to its low diagnosis rate. Dengue has the most astounding mortality level as compared to other diseases due to tremendous platelet depletion. Hence, it can be categorized as a life-threatening fever as compared to the same class of fevers. Additionally, it has been shown that dengue fever shares many of the same symptoms as other flu-based fevers. On the other hand, the research community is closely monitoring the popular research fields related to IoT, fog, and cloud computing for the diagnosis and prediction of diseases. IoT, fog, and cloud-based technologies are used for constructing a number of health care systems. Accordingly, in this study, a DengueFog monitoring system was created based on fog computing for prediction and detection of dengue sickness. Additionally, the proposed DengueFog system includes a weighted random forest (WRF) classifier to monitor and predict the dengue infection. The proposed system's efficacy was evaluated using data on dengue infection. This dataset was gathered between 2016 and 2018 from several hospitals in the Delhi-NCR region. The accuracy, F-value, recall, precision, error rate, and specificity metrics were used to assess the simulation results of the suggested monitoring system. It was demonstrated that the proposed DengueFog monitoring system with WRF outperforms the traditional classifiers.},
}
@article {pmid38531975,
year = {2024},
author = {Ali, I and Wassif, K and Bayomi, H},
title = {Dimensionality reduction for images of IoT using machine learning.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {7205},
pmid = {38531975},
issn = {2045-2322},
abstract = {Sensors, wearables, mobile devices, and other Internet of Things (IoT) devices are becoming increasingly integrated into all aspects of our lives. They are capable of gathering enormous amounts of data, such as image data, which can then be sent to the cloud for processing. However, this results in an increase in network traffic and latency. To overcome these difficulties, edge computing has been proposed as a paradigm for computing that brings processing closer to the location where data is produced. This paper explores the merging of cloud and edge computing for IoT and investigates approaches using machine learning for dimensionality reduction of images on the edge, employing the autoencoder deep learning-based approach and principal component analysis (PCA). The encoded data is then sent to the cloud server, where it is used directly for any machine learning task without significantly impacting the accuracy of the data processed in the cloud. The proposed approach has been evaluated on an object detection task using a set of 4000 images randomly chosen from three datasets: COCO, human detection, and HDA datasets. Results show that a 77% reduction in data did not have a significant impact on the object detection task's accuracy.},
}
@article {pmid38531933,
year = {2024},
author = {Huettmann, F and Andrews, P and Steiner, M and Das, AK and Philip, J and Mi, C and Bryans, N and Barker, B},
title = {A super SDM (species distribution model) 'in the cloud' for better habitat-association inference with a 'big data' application of the Great Gray Owl for Alaska.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {7213},
pmid = {38531933},
issn = {2045-2322},
support = {-EWHALE lab//University of Alaska Fairbanks/ ; -EWHALE lab//University of Alaska Fairbanks/ ; },
abstract = {The currently available distribution and range maps for the Great Grey Owl (GGOW; Strix nebulosa) are ambiguous, contradictory, imprecise, outdated, often hand-drawn and thus not quantified, not based on data or scientific. In this study, we present a proof of concept with a biological application for technical and biological workflow progress on latest global open access 'Big Data' sharing, Open-source methods of R and geographic information systems (OGIS and QGIS) assessed with six recent multi-evidence citizen-science sightings of the GGOW. This proposed workflow can be applied for quantified inference for any species-habitat model such as typically applied with species distribution models (SDMs). Using Random Forest-an ensemble-type model of Machine Learning following Leo Breiman's approach of inference from predictions-we present a Super SDM for GGOWs in Alaska running on Oracle Cloud Infrastructure (OCI). These Super SDMs were based on best publicly available data (410 occurrences + 1% new assessment sightings) and over 100 environmental GIS habitat predictors ('Big Data'). The compiled global open access data and the associated workflow overcome for the first time the limitations of traditionally used PC and laptops. It breaks new ground and has real-world implications for conservation and land management for GGOW, for Alaska, and for other species worldwide as a 'new' baseline. As this research field remains dynamic, Super SDMs can have limits, are not the ultimate and final statement on species-habitat associations yet, but they summarize all publicly available data and information on a topic in a quantified and testable fashion allowing fine-tuning and improvements as needed. At minimum, they allow for low-cost rapid assessment and a great leap forward to be more ecological and inclusive of all information at-hand. Using GGOWs, here we aim to correct the perception of this species towards a more inclusive, holistic, and scientifically correct assessment of this urban-adapted owl in the Anthropocene, rather than a mysterious wilderness-inhabiting species (aka 'Phantom of the North'). Such a Super SDM was never created for any bird species before and opens new perspectives for impact assessment policy and global sustainability.},
}
@article {pmid38528619,
year = {2024},
author = {Budge, J and Carrell, T and Yaqub, M and Wafa, H and Waltham, M and Pilecka, I and Kelly, J and Murphy, C and Palmer, S and Wang, Y and Clough, RE},
title = {The ARIA trial protocol: a randomised controlled trial to assess the clinical, technical, and cost-effectiveness of a cloud-based, ARtificially Intelligent image fusion system in comparison to standard treatment to guide endovascular Aortic aneurysm repair.},
journal = {Trials},
volume = {25},
number = {1},
pages = {214},
pmid = {38528619},
issn = {1745-6215},
support = {NIHR201004//Invention for Innovation Programme/ ; },
mesh = {Humans ; *Aortic Aneurysm, Abdominal/diagnostic imaging/surgery ; Cost-Benefit Analysis ; Cloud Computing ; *Endovascular Procedures/methods ; *Blood Vessel Prosthesis Implantation/adverse effects ; Treatment Outcome ; Retrospective Studies ; Randomized Controlled Trials as Topic ; Multicenter Studies as Topic ; },
abstract = {BACKGROUND: Endovascular repair of aortic aneurysmal disease is established due to perceived advantages in patient survival, reduced postoperative complications, and shorter hospital lengths of stay. High spatial and contrast resolution 3D CT angiography images are used to plan the procedures and inform device selection and manufacture, but in standard care, the surgery is performed using image-guidance from 2D X-ray fluoroscopy with injection of nephrotoxic contrast material to visualise the blood vessels. This study aims to assess the benefit to patients, practitioners, and the health service of a novel image fusion medical device (Cydar EV), which allows this high-resolution 3D information to be available to operators at the time of surgery.
METHODS: The trial is a multi-centre, open label, two-armed randomised controlled clinical trial of 340 patient, randomised 1:1 to either standard treatment in endovascular aneurysm repair or treatment using Cydar EV, a CE-marked medical device comprising of cloud computing, augmented intelligence, and computer vision. The primary outcome is procedural time, with secondary outcomes of procedural efficiency, technical effectiveness, patient outcomes, and cost-effectiveness. Patients with a clinical diagnosis of AAA or TAAA suitable for endovascular repair and able to provide written informed consent will be invited to participate.
DISCUSSION: This trial is the first randomised controlled trial evaluating advanced image fusion technology in endovascular aortic surgery and is well placed to evaluate the effect of this technology on patient outcomes and cost to the NHS.
TRIAL REGISTRATION: ISRCTN13832085. Dec. 3, 2021.},
}
@article {pmid38528564,
year = {2024},
author = {Zhang, S and Li, H and Jing, Q and Shen, W and Luo, W and Dai, R},
title = {Anesthesia decision analysis using a cloud-based big data platform.},
journal = {European journal of medical research},
volume = {29},
number = {1},
pages = {201},
pmid = {38528564},
issn = {2047-783X},
support = {2022JJ70061//Natural Science Foundation of Hunan Province/ ; 22A0011//Key Fund Project of Hunan Provincial Department of Education/ ; W20243113//Health Commission of Hunan Province/ ; 82103641 and 82071347//National Natural Science Foundation of China/ ; },
mesh = {Humans ; Big Data ; *Anesthesiology ; Cloud Computing ; *Anesthesia ; *Anesthetics ; Decision Support Techniques ; },
abstract = {Big data technologies have proliferated since the dawn of the cloud-computing era. Traditional data storage, extraction, transformation, and analysis technologies have thus become unsuitable for the large volume, diversity, high processing speed, and low value density of big data in medical strategies, which require the development of novel big data application technologies. In this regard, we investigated the most recent big data platform breakthroughs in anesthesiology and designed an anesthesia decision model based on a cloud system for storing and analyzing massive amounts of data from anesthetic records. The presented Anesthesia Decision Analysis Platform performs distributed computing on medical records via several programming tools, and provides services such as keyword search, data filtering, and basic statistics to reduce inaccurate and subjective judgments by decision-makers. Importantly, it can potentially to improve anesthetic strategy and create individualized anesthesia decisions, lowering the likelihood of perioperative complications.},
}
@article {pmid38524844,
year = {2024},
author = {Mukuka, A},
title = {Data on mathematics teacher educators' proficiency and willingness to use technology: A structural equation modelling analysis.},
journal = {Data in brief},
volume = {54},
number = {},
pages = {110307},
pmid = {38524844},
issn = {2352-3409},
abstract = {The role of Mathematics Teacher Educators (MTEs) in preparing future teachers to effectively integrate technology into their mathematics instruction is of paramount importance yet remains an underexplored domain. Technology has the potential to enhance the development of 21st-century skills, such as problem-solving and critical thinking, which are essential for students in the era of the fourth industrial revolution. However, the rapid evolution of technology and the emergence of new trends like data analytics, the Internet of Things, machine learning, cloud computing, and artificial intelligence present new challenges in the realm of mathematics teaching and learning. Consequently, MTEs need to equip prospective teachers with the knowledge and skills to harness technology in innovative ways within their future mathematics classrooms. This paper presents and describes data from a survey of 104 MTEs in Zambia. The study focuses on MTEs' proficiency, perceived usefulness, perceived ease of use, and willingness to incorporate technology in their classrooms. This data-driven article aims to unveil patterns and trends within the dataset, with the objective of offering insights rather than drawing definitive conclusions. The article also highlights the data collection process and outlines the procedure for assessing the measurement model of the hypothesised relationships among variables through structural equation modelling analysis. The data described in this article not only sheds light on the current landscape but also serves as a valuable resource for mathematics teacher training institutions and other stakeholders seeking to understand the requisites for MTEs to foster technological skills among prospective teachers of mathematics.},
}
@article {pmid38520921,
year = {2024},
author = {Tadi, AA and Alhadidi, D and Rueda, L},
title = {PPPCT: Privacy-Preserving framework for Parallel Clustering Transcriptomics data.},
journal = {Computers in biology and medicine},
volume = {173},
number = {},
pages = {108351},
doi = {10.1016/j.compbiomed.2024.108351},
pmid = {38520921},
issn = {1879-0534},
mesh = {Humans ; *Privacy ; *Software ; Algorithms ; Gene Expression Profiling ; Cluster Analysis ; Sequence Analysis, RNA ; },
abstract = {Single-cell transcriptomics data provides crucial insights into patients' health, yet poses significant privacy concerns. Genomic data privacy attacks can have deep implications, encompassing not only the patients' health information but also extending widely to compromise their families'. Moreover, the permanence of leaked data exacerbates the challenges, making retraction an impossibility. While extensive efforts have been directed towards clustering single-cell transcriptomics data, addressing critical challenges, especially in the realm of privacy, remains pivotal. This paper introduces an efficient, fast, privacy-preserving approach for clustering single-cell RNA-sequencing (scRNA-seq) datasets. The key contributions include ensuring data privacy, achieving high-quality clustering, accommodating the high dimensionality inherent in the datasets, and maintaining reasonable computation time for big-scale datasets. Our proposed approach utilizes the map-reduce scheme to parallelize clustering, addressing intensive calculation challenges. Intel Software Guard eXtension (SGX) processors are used to ensure the security of sensitive code and data during processing. Additionally, the approach incorporates a logarithm transformation as a preprocessing step, employs non-negative matrix factorization for dimensionality reduction, and utilizes parallel k-means for clustering. The approach fully leverages the computing capabilities of all processing resources within a secure private cloud environment. Experimental results demonstrate the efficacy of our approach in preserving patient privacy while surpassing state-of-the-art methods in both clustering quality and computation time. Our method consistently achieves a minimum of 7% higher Adjusted Rand Index (ARI) than existing approaches, contingent on dataset size. Additionally, due to parallel computations and dimensionality reduction, our approach exhibits efficiency, converging to very good results in less than 10 seconds for a scRNA-seq dataset with 5000 genes and 6000 cells when prioritizing privacy and under two seconds without privacy considerations. Availability and implementation Code and datasets availability: https://github.com/University-of-Windsor/PPPCT.},
}
@article {pmid38514837,
year = {2024},
author = {Hajiaghabozorgi, M and Fischbach, M and Albrecht, M and Wang, W and Myers, CL},
title = {BridGE: a pathway-based analysis tool for detecting genetic interactions from GWAS.},
journal = {Nature protocols},
volume = {},
number = {},
pages = {},
pmid = {38514837},
issn = {1750-2799},
support = {R21CA235352//U.S. Department of Health & Human Services | NIH | Center for Information Technology (Center for Information Technology, National Institutes of Health)/ ; R01HG005084//U.S. Department of Health & Human Services | NIH | Center for Information Technology (Center for Information Technology, National Institutes of Health)/ ; R01HG005853//U.S. Department of Health & Human Services | NIH | Center for Information Technology (Center for Information Technology, National Institutes of Health)/ ; BAND-19-615151//Weston Brain Institute/ ; },
abstract = {Genetic interactions have the potential to modulate phenotypes, including human disease. In principle, genome-wide association studies (GWAS) provide a platform for detecting genetic interactions; however, traditional methods for identifying them, which tend to focus on testing individual variant pairs, lack statistical power. In this protocol, we describe a novel computational approach, called Bridging Gene sets with Epistasis (BridGE), for discovering genetic interactions between biological pathways from GWAS data. We present a Python-based implementation of BridGE along with instructions for its application to a typical human GWAS cohort. The major stages include initial data processing and quality control, construction of a variant-level genetic interaction network, measurement of pathway-level genetic interactions, evaluation of statistical significance using sample permutations and generation of results in a standardized output format. The BridGE software pipeline includes options for running the analysis on multiple cores and multiple nodes for users who have access to computing clusters or a cloud computing environment. In a cluster computing environment with 10 nodes and 100 GB of memory per node, the method can be run in less than 24 h for typical human GWAS cohorts. Using BridGE requires knowledge of running Python programs and basic shell script programming experience.},
}
@article {pmid38506901,
year = {2024},
author = {Sahu, KS and Dubin, JA and Majowicz, SE and Liu, S and Morita, PP},
title = {Revealing the Mysteries of Population Mobility Amid the COVID-19 Pandemic in Canada: Comparative Analysis With Internet of Things-Based Thermostat Data and Google Mobility Insights.},
journal = {JMIR public health and surveillance},
volume = {10},
number = {},
pages = {e46903},
pmid = {38506901},
issn = {2369-2960},
mesh = {Humans ; Pandemics ; *Internet of Things ; Search Engine ; *COVID-19/epidemiology ; Alberta/epidemiology ; Health Policy ; },
abstract = {BACKGROUND: The COVID-19 pandemic necessitated public health policies to limit human mobility and curb infection spread. Human mobility, which is often underestimated, plays a pivotal role in health outcomes, impacting both infectious and chronic diseases. Collecting precise mobility data is vital for understanding human behavior and informing public health strategies. Google's GPS-based location tracking, which is compiled in Google Mobility Reports, became the gold standard for monitoring outdoor mobility during the pandemic. However, indoor mobility remains underexplored.
OBJECTIVE: This study investigates in-home mobility data from ecobee's smart thermostats in Canada (February 2020 to February 2021) and compares it directly with Google's residential mobility data. By assessing the suitability of smart thermostat data, we aim to shed light on indoor mobility patterns, contributing valuable insights to public health research and strategies.
METHODS: Motion sensor data were acquired from the ecobee "Donate Your Data" initiative via Google's BigQuery cloud platform. Concurrently, residential mobility data were sourced from the Google Mobility Report. This study centered on 4 Canadian provinces-Ontario, Quebec, Alberta, and British Columbia-during the period from February 15, 2020, to February 14, 2021. Data processing, analysis, and visualization were conducted on the Microsoft Azure platform using Python (Python Software Foundation) and R programming languages (R Foundation for Statistical Computing). Our investigation involved assessing changes in mobility relative to the baseline in both data sets, with the strength of this relationship assessed using Pearson and Spearman correlation coefficients. We scrutinized daily, weekly, and monthly variations in mobility patterns across the data sets and performed anomaly detection for further insights.
RESULTS: The results revealed noteworthy week-to-week and month-to-month shifts in population mobility within the chosen provinces, aligning with pandemic-driven policy adjustments. Notably, the ecobee data exhibited a robust correlation with Google's data set. Examination of Google's daily patterns detected more pronounced mobility fluctuations during weekdays, a trend not mirrored in the ecobee data. Anomaly detection successfully identified substantial mobility deviations coinciding with policy modifications and cultural events.
CONCLUSIONS: This study's findings illustrate the substantial influence of the Canadian stay-at-home and work-from-home policies on population mobility. This impact was discernible through both Google's out-of-house residential mobility data and ecobee's in-house smart thermostat data. As such, we deduce that smart thermostats represent a valid tool for facilitating intelligent monitoring of population mobility in response to policy-driven shifts.},
}
@article {pmid38495592,
year = {2024},
author = {Wang, H and Chen, H and Wang, Y},
title = {Analysis of Hot Topics Regarding Global Smart Elderly Care Research - 1997-2021.},
journal = {China CDC weekly},
volume = {6},
number = {9},
pages = {157-161},
pmid = {38495592},
issn = {2096-7071},
abstract = {With the assistance of the internet, big data, cloud computing, and other technologies, the concept of smart elderly care has emerged.
WHAT IS ADDED BY THIS REPORT?: This study presents information on the countries or regions that have conducted research on smart elderly care, as well as identifies global hotspots and development trends in this field.
The results of this study suggest that future research should focus on fall detection, health monitoring, and guidance systems that are user-friendly and contribute to the creation of smarter safer communities for the well-being of the elderly.},
}
@article {pmid38495055,
year = {2024},
author = {Li, J and Xiong, Y and Feng, S and Pan, C and Guo, X},
title = {CloudProteoAnalyzer: scalable processing of big data from proteomics using cloud computing.},
journal = {Bioinformatics advances},
volume = {4},
number = {1},
pages = {vbae024},
pmid = {38495055},
issn = {2635-0041},
support = {R01 AT011618/AT/NCCIH NIH HHS/United States ; },
abstract = {SUMMARY: Shotgun proteomics is widely used in many system biology studies to determine the global protein expression profiles of tissues, cultures, and microbiomes. Many non-distributed computer algorithms have been developed for users to process proteomics data on their local computers. However, the amount of data acquired in a typical proteomics study has grown rapidly in recent years, owing to the increasing throughput of mass spectrometry and the expanding scale of study designs. This presents a big data challenge for researchers to process proteomics data in a timely manner. To overcome this challenge, we developed a cloud-based parallel computing application to offer end-to-end proteomics data analysis software as a service (SaaS). A web interface was provided to users to upload mass spectrometry-based proteomics data, configure parameters, submit jobs, and monitor job status. The data processing was distributed across multiple nodes in a supercomputer to achieve scalability for large datasets. Our study demonstrated SaaS for proteomics as a viable solution for the community to scale up the data processing using cloud computing.
This application is available online at https://sipros.oscer.ou.edu/ or https://sipros.unt.edu for free use. The source code is available at https://github.com/Biocomputing-Research-Group/CloudProteoAnalyzer under the GPL version 3.0 license.},
}
@article {pmid38491365,
year = {2024},
author = {Clements, J and Goina, C and Hubbard, PM and Kawase, T and Olbris, DJ and Otsuna, H and Svirskas, R and Rokicki, K},
title = {NeuronBridge: an intuitive web application for neuronal morphology search across large data sets.},
journal = {BMC bioinformatics},
volume = {25},
number = {1},
pages = {114},
pmid = {38491365},
issn = {1471-2105},
mesh = {Animals ; *Software ; Neurons ; *Connectome ; Microscopy, Electron ; Drosophila ; },
abstract = {BACKGROUND: Neuroscience research in Drosophila is benefiting from large-scale connectomics efforts using electron microscopy (EM) to reveal all the neurons in a brain and their connections. To exploit this knowledge base, researchers relate a connectome's structure to neuronal function, often by studying individual neuron cell types. Vast libraries of fly driver lines expressing fluorescent reporter genes in sets of neurons have been created and imaged using confocal light microscopy (LM), enabling the targeting of neurons for experimentation. However, creating a fly line for driving gene expression within a single neuron found in an EM connectome remains a challenge, as it typically requires identifying a pair of driver lines where only the neuron of interest is expressed in both. This task and other emerging scientific workflows require finding similar neurons across large data sets imaged using different modalities.
RESULTS: Here, we present NeuronBridge, a web application for easily and rapidly finding putative morphological matches between large data sets of neurons imaged using different modalities. We describe the functionality and construction of the NeuronBridge service, including its user-friendly graphical user interface (GUI), extensible data model, serverless cloud architecture, and massively parallel image search engine.
CONCLUSIONS: NeuronBridge fills a critical gap in the Drosophila research workflow and is used by hundreds of neuroscience researchers around the world. We offer our software code, open APIs, and processed data sets for integration and reuse, and provide the application as a service at http://neuronbridge.janelia.org .},
}
@article {pmid38475170,
year = {2024},
author = {Tripathi, A and Waqas, A and Venkatesan, K and Yilmaz, Y and Rasool, G},
title = {Building Flexible, Scalable, and Machine Learning-Ready Multimodal Oncology Datasets.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {5},
pages = {},
pmid = {38475170},
issn = {1424-8220},
support = {2234836//National Science Foundation/ ; 2234468//National Science Foundation/ ; 1903466//National Science Foundation/ ; },
mesh = {Humans ; Reproducibility of Results ; *Neoplasms ; },
abstract = {The advancements in data acquisition, storage, and processing techniques have resulted in the rapid growth of heterogeneous medical data. Integrating radiological scans, histopathology images, and molecular information with clinical data is essential for developing a holistic understanding of the disease and optimizing treatment. The need for integrating data from multiple sources is further pronounced in complex diseases such as cancer for enabling precision medicine and personalized treatments. This work proposes Multimodal Integration of Oncology Data System (MINDS)-a flexible, scalable, and cost-effective metadata framework for efficiently fusing disparate data from public sources such as the Cancer Research Data Commons (CRDC) into an interconnected, patient-centric framework. MINDS consolidates over 41,000 cases from across repositories while achieving a high compression ratio relative to the 3.78 PB source data size. It offers sub-5-s query response times for interactive exploration. MINDS offers an interface for exploring relationships across data types and building cohorts for developing large-scale multimodal machine learning models. By harmonizing multimodal data, MINDS aims to potentially empower researchers with greater analytical ability to uncover diagnostic and prognostic insights and enable evidence-based personalized care. MINDS tracks granular end-to-end data provenance, ensuring reproducibility and transparency. The cloud-native architecture of MINDS can handle exponential data growth in a secure, cost-optimized manner while ensuring substantial storage optimization, replication avoidance, and dynamic access capabilities. Auto-scaling, access controls, and other mechanisms guarantee pipelines' scalability and security. MINDS overcomes the limitations of existing biomedical data silos via an interoperable metadata-driven approach that represents a pivotal step toward the future of oncology data integration.},
}
@article {pmid38475051,
year = {2024},
author = {Gaba, P and Raw, RS and Kaiwartya, O and Aljaidi, M},
title = {B-SAFE: Blockchain-Enabled Security Architecture for Connected Vehicle Fog Environment.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {5},
pages = {},
pmid = {38475051},
issn = {1424-8220},
support = {00//nottingham trent university/ ; },
abstract = {Vehicles are no longer stand-alone mechanical entities due to the advancements in vehicle-to-vehicle (V2V) and vehicle-to-infrastructure (V2I) communication-centric Internet of Connected Vehicles (IoV) frameworks. However, the advancement in connected vehicles leads to another serious security threat, online vehicle hijacking, where the steering control of vehicles can be hacked online. The feasibility of traditional security solutions in IoV environments is very limited, considering the intermittent network connectivity to cloud servers and vehicle-centric computing capability constraints. In this context, this paper presents a Blockchain-enabled Security Architecture for a connected vehicular Fog networking Environment (B-SAFE). Firstly, blockchain security and vehicular fog networking are introduced as preliminaries of the framework. Secondly, a three-layer architecture of B-SAFE is presented, focusing on vehicular communication, blockchain at fog nodes, and the cloud as trust and reward management for vehicles. Thirdly, details of the blockchain implementation at fog nodes is presented, along with a flowchart and algorithm. The performance of the evaluation of the proposed framework B-SAFE attests to the benefits in terms of trust, reward points, and threshold calculation.},
}
@article {pmid38474954,
year = {2024},
author = {Vercheval, N and Royen, R and Munteanu, A and Pižurica, A},
title = {PCGen: A Fully Parallelizable Point Cloud Generative Model.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {5},
pages = {},
pmid = {38474954},
issn = {1424-8220},
support = {174B0911//Flanders AI Research Programme/ ; G094122N//Fonds Wetenschappelijk Onderzoek (FWO) project/ ; },
abstract = {Generative models have the potential to revolutionize 3D extended reality. A primary obstacle is that augmented and virtual reality need real-time computing. Current state-of-the-art point cloud random generation methods are not fast enough for these applications. We introduce a vector-quantized variational autoencoder model (VQVAE) that can synthesize high-quality point clouds in milliseconds. Unlike previous work in VQVAEs, our model offers a compact sample representation suitable for conditional generation and data exploration with potential applications in rapid prototyping. We achieve this result by combining architectural improvements with an innovative approach for probabilistic random generation. First, we rethink current parallel point cloud autoencoder structures, and we propose several solutions to improve robustness, efficiency and reconstruction quality. Notable contributions in the decoder architecture include an innovative computation layer to process the shape semantic information, an attention mechanism that helps the model focus on different areas and a filter to cover possible sampling errors. Secondly, we introduce a parallel sampling strategy for VQVAE models consisting of a double encoding system, where a variational autoencoder learns how to generate the complex discrete distribution of the VQVAE, not only allowing quick inference but also describing the shape with a few global variables. We compare the proposed decoder and our VQVAE model with established and concurrent work, and we prove, one by one, the validity of the single contributions.},
}
@article {pmid38474952,
year = {2024},
author = {AlSaleh, I and Al-Samawi, A and Nissirat, L},
title = {Novel Machine Learning Approach for DDoS Cloud Detection: Bayesian-Based CNN and Data Fusion Enhancements.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {5},
pages = {},
pmid = {38474952},
issn = {1424-8220},
support = {GRANT5,340//King Faisal University/ ; },
abstract = {Cloud computing has revolutionized the information technology landscape, offering businesses the flexibility to adapt to diverse business models without the need for costly on-site servers and network infrastructure. A recent survey reveals that 95% of enterprises have already embraced cloud technology, with 79% of their workloads migrating to cloud environments. However, the deployment of cloud technology introduces significant cybersecurity risks, including network security vulnerabilities, data access control challenges, and the ever-looming threat of cyber-attacks such as Distributed Denial of Service (DDoS) attacks, which pose substantial risks to both cloud and network security. While Intrusion Detection Systems (IDS) have traditionally been employed for DDoS attack detection, prior studies have been constrained by various limitations. In response to these challenges, we present an innovative machine learning approach for DDoS cloud detection, known as the Bayesian-based Convolutional Neural Network (BaysCNN) model. Leveraging the CICDDoS2019 dataset, which encompasses 88 features, we employ Principal Component Analysis (PCA) for dimensionality reduction. Our BaysCNN model comprises 19 layers of analysis, forming the basis for training and validation. Our experimental findings conclusively demonstrate that the BaysCNN model significantly enhances the accuracy of DDoS cloud detection, achieving an impressive average accuracy rate of 99.66% across 13 multi-class attacks. To further elevate the model's performance, we introduce the Data Fusion BaysFusCNN approach, encompassing 27 layers. By leveraging Bayesian methods to estimate uncertainties and integrating features from multiple sources, this approach attains an even higher average accuracy of 99.79% across the same 13 multi-class attacks. Our proposed methodology not only offers valuable insights for the development of robust machine learning-based intrusion detection systems but also enhances the reliability and scalability of IDS in cloud computing environments. This empowers organizations to proactively mitigate security risks and fortify their defenses against malicious cyber-attacks.},
}
@article {pmid38469580,
year = {2024},
author = {Yakubu, B and Appiah, EM and Adu, AF},
title = {Pangenome Analysis of Helicobacter pylori Isolates from Selected Areas of Africa Indicated Diverse Antibiotic Resistance and Virulence Genes.},
journal = {International journal of genomics},
volume = {2024},
number = {},
pages = {5536117},
pmid = {38469580},
issn = {2314-4378},
abstract = {The challenge facing Helicobacter pylori (H. pylori) infection management in some parts of Africa is the evolution of drug-resistant species, the lack of gold standard in diagnostic methods, and the ineffectiveness of current vaccines against the bacteria. It is being established that even though clinical consequences linked to the bacteria vary geographically, there is rather a generic approach to treatment. This situation has remained problematic in the successful fight against the bacteria in parts of Africa. As a result, this study compared the genomes of selected H. pylori isolates from selected areas of Africa and evaluated their virulence and antibiotic drug resistance, those that are highly pathogenic and are associated with specific clinical outcomes and those that are less virulent and rarely associated with clinical outcomes. 146 genomes of H. pylori isolated from selected locations of Africa were sampled, and bioinformatic tools such as Abricate, CARD RGI, MLST, Prokka, Roary, Phandango, Google Sheets, and iTOLS were used to compare the isolates and their antibiotic resistance or susceptibility. Over 20 k virulence and AMR genes were observed. About 95% of the isolates were genetically diverse, 90% of the isolates harbored shell genes, and 50% harbored cloud and core genes. Some isolates did not retain the cagA and vacA genes. Clarithromycin, metronidazole, amoxicillin, and tinidazole were resistant to most AMR genes (vacA, cagA, oip, and bab). Conclusion. This study found both virulence and AMR genes in all H. pylori strains in all the selected geographies around Africa with differing quantities. MLST, Pangenome, and ORF analyses showed disparities among the isolates. This in general could imply diversities in terms of genetics, evolution, and protein production. Therefore, generic administration of antibiotics such as clarithromycin, amoxicillin, and erythromycin as treatment methods in the African subregion could be contributing to the spread of the bacterium's antibiotic resistance.},
}
@article {pmid38468957,
year = {2024},
author = {Tripathy, SS and Bebortta, S and Chowdhary, CL and Mukherjee, T and Kim, S and Shafi, J and Ijaz, MF},
title = {FedHealthFog: A federated learning-enabled approach towards healthcare analytics over fog computing platform.},
journal = {Heliyon},
volume = {10},
number = {5},
pages = {e26416},
pmid = {38468957},
issn = {2405-8440},
abstract = {The emergence of federated learning (FL) technique in fog-enabled healthcare system has leveraged enhanced privacy towards safeguarding sensitive patient information over heterogeneous computing platforms. In this paper, we introduce the FedHealthFog framework, which was meticulously developed to overcome the difficulties of distributed learning in resource-constrained IoT-enabled healthcare systems, particularly those sensitive to delays and energy efficiency. Conventional federated learning approaches face challenges stemming from substantial compute requirements and significant communication costs. This is primarily due to their reliance on a singular server for the aggregation of global data, which results in inefficient training models. We present a transformational approach to address these problems by elevating strategically placed fog nodes to the position of local aggregators within the federated learning architecture. A sophisticated greedy heuristic technique is used to optimize the choice of a fog node as the global aggregator in each communication cycle between edge devices and the cloud. The FedHealthFog system notably accounts for drop in communication latency of 87.01%, 26.90%, and 71.74%, and energy consumption of 57.98%, 34.36%, and 35.37% respectively, for three benchmark algorithms analyzed in this study. The effectiveness of FedHealthFog is strongly supported by outcomes of our experiments compared to cutting-edge alternatives while simultaneously reducing number of global aggregation cycles. These findings highlight FedHealthFog's potential to transform federated learning in resource-constrained IoT environments for delay-sensitive applications.},
}
@article {pmid38466691,
year = {2024},
author = {Shafi, I and Din, S and Farooq, S and Díez, IT and Breñosa, J and Espinosa, JCM and Ashraf, I},
title = {Design and development of patient health tracking, monitoring and big data storage using Internet of Things and real time cloud computing.},
journal = {PloS one},
volume = {19},
number = {3},
pages = {e0298582},
pmid = {38466691},
issn = {1932-6203},
mesh = {Humans ; *Cloud Computing ; *Internet of Things ; Pandemics ; Monitoring, Physiologic ; Information Storage and Retrieval ; },
abstract = {With the outbreak of the COVID-19 pandemic, social isolation and quarantine have become commonplace across the world. IoT health monitoring solutions eliminate the need for regular doctor visits and interactions among patients and medical personnel. Many patients in wards or intensive care units require continuous monitoring of their health. Continuous patient monitoring is a hectic practice in hospitals with limited staff; in a pandemic situation like COVID-19, it becomes much more difficult practice when hospitals are working at full capacity and there is still a risk of medical workers being infected. In this study, we propose an Internet of Things (IoT)-based patient health monitoring system that collects real-time data on important health indicators such as pulse rate, blood oxygen saturation, and body temperature but can be expanded to include more parameters. Our system is comprised of a hardware component that collects and transmits data from sensors to a cloud-based storage system, where it can be accessed and analyzed by healthcare specialists. The ESP-32 microcontroller interfaces with the multiple sensors and wirelessly transmits the collected data to the cloud storage system. A pulse oximeter is utilized in our system to measure blood oxygen saturation and body temperature, as well as a heart rate monitor to measure pulse rate. A web-based interface is also implemented, allowing healthcare practitioners to access and visualize the collected data in real-time, making remote patient monitoring easier. Overall, our IoT-based patient health monitoring system represents a significant advancement in remote patient monitoring, allowing healthcare practitioners to access real-time data on important health metrics and detect potential health issues before they escalate.},
}
@article {pmid38460568,
year = {2024},
author = {Ghiandoni, GM and Evertsson, E and Riley, DJ and Tyrchan, C and Rathi, PC},
title = {Augmenting DMTA using predictive AI modelling at AstraZeneca.},
journal = {Drug discovery today},
volume = {29},
number = {4},
pages = {103945},
doi = {10.1016/j.drudis.2024.103945},
pmid = {38460568},
issn = {1878-5832},
mesh = {*Artificial Intelligence ; *Biological Assay ; Drug Discovery ; },
abstract = {Design-Make-Test-Analyse (DMTA) is the discovery cycle through which molecules are designed, synthesised, and assayed to produce data that in turn are analysed to inform the next iteration. The process is repeated until viable drug candidates are identified, often requiring many cycles before reaching a sweet spot. The advent of artificial intelligence (AI) and cloud computing presents an opportunity to innovate drug discovery to reduce the number of cycles needed to yield a candidate. Here, we present the Predictive Insight Platform (PIP), a cloud-native modelling platform developed at AstraZeneca. The impact of PIP in each step of DMTA, as well as its architecture, integration, and usage, are discussed and used to provide insights into the future of drug discovery.},
}
@article {pmid38455562,
year = {2024},
author = {Gokool, S and Mahomed, M and Brewer, K and Naiken, V and Clulow, A and Sibanda, M and Mabhaudhi, T},
title = {Crop mapping in smallholder farms using unmanned aerial vehicle imagery and geospatial cloud computing infrastructure.},
journal = {Heliyon},
volume = {10},
number = {5},
pages = {e26913},
pmid = {38455562},
issn = {2405-8440},
abstract = {Smallholder farms are major contributors to agricultural production, food security, and socio-economic growth in many developing countries. However, they generally lack the resources to fully maximize their potential. Subsequently they require innovative, evidence-based and lower-cost solutions to optimize their productivity. Recently, precision agricultural practices facilitated by unmanned aerial vehicles (UAVs) have gained traction in the agricultural sector and have great potential for smallholder farm applications. Furthermore, advances in geospatial cloud computing have opened new and exciting possibilities in the remote sensing arena. In light of these recent developments, the focus of this study was to explore and demonstrate the utility of using the advanced image processing capabilities of the Google Earth Engine (GEE) geospatial cloud computing platform to process and analyse a very high spatial resolution multispectral UAV image for mapping land use land cover (LULC) within smallholder farms. The results showed that LULC could be mapped at a 0.50 m spatial resolution with an overall accuracy of 91%. Overall, we found GEE to be an extremely useful platform for conducting advanced image analysis on UAV imagery and rapid communication of results. Notwithstanding the limitations of the study, the findings presented herein are quite promising and clearly demonstrate how modern agricultural practices can be implemented to facilitate improved agricultural management in smallholder farmers.},
}
@article {pmid38453988,
year = {2024},
author = {Inam, S and Kanwal, S and Firdous, R and Hajjej, F},
title = {Blockchain based medical image encryption using Arnold's cat map in a cloud environment.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {5678},
pmid = {38453988},
issn = {2045-2322},
abstract = {Improved software for processing medical images has inspired tremendous interest in modern medicine in recent years. Modern healthcare equipment generates huge amounts of data, such as scanned medical images and computerized patient information, which must be secured for future use. Diversity in the healthcare industry, namely in the form of medical data, is one of the largest challenges for researchers. Cloud environment and the Block chain technology have both demonstrated their own use. The purpose of this study is to combine both technologies for safe and secure transaction. Storing or sending medical data through public clouds exposes information into potential eavesdropping, data breaches and unauthorized access. Encrypting data before transmission is crucial to mitigate these security risks. As a result, a Blockchain based Chaotic Arnold's cat map Encryption Scheme (BCAES) is proposed in this paper. The BCAES first encrypts the image using Arnold's cat map encryption scheme and then sends the encrypted image into Cloud Server and stores the signed document of plain image into blockchain. As blockchain is often considered more secure due to its distributed nature and consensus mechanism, data receiver will ensure data integrity and authenticity of image after decryption using signed document stored into the blockchain. Various analysis techniques have been used to examine the proposed scheme. The results of analysis like key sensitivity analysis, key space analysis, Information Entropy, histogram correlation of adjacent pixels, Number of Pixel Change Rate, Peak Signal Noise Ratio, Unified Average Changing Intensity, and similarity analysis like Mean Square Error, and Structural Similarity Index Measure illustrated that our proposed scheme is an efficient encryption scheme as compared to some recent literature. Our current achievements surpass all previous endeavors, setting a new standard of excellence.},
}
@article {pmid38452470,
year = {2024},
author = {Zhong, C and Darbandi, M and Nassr, M and Latifian, A and Hosseinzadeh, M and Jafari Navimipour, N},
title = {A new cloud-based method for composition of healthcare services using deep reinforcement learning and Kalman filtering.},
journal = {Computers in biology and medicine},
volume = {172},
number = {},
pages = {108152},
doi = {10.1016/j.compbiomed.2024.108152},
pmid = {38452470},
issn = {1879-0534},
mesh = {Humans ; *Cloud Computing ; Reproducibility of Results ; *Delivery of Health Care ; },
abstract = {Healthcare has significantly contributed to the well-being of individuals around the globe; nevertheless, further benefits could be derived from a more streamlined healthcare system without incurring additional costs. Recently, the main attributes of cloud computing, such as on-demand service, high scalability, and virtualization, have brought many benefits across many areas, especially in medical services. It is considered an important element in healthcare services, enhancing the performance and efficacy of the services. The current state of the healthcare industry requires the supply of healthcare products and services, increasing its viability for everyone involved. Developing new approaches for discovering and selecting healthcare services in the cloud has become more critical due to the rising popularity of these kinds of services. As a result of the diverse array of healthcare services, service composition enables the execution of intricate operations by integrating multiple services' functionalities into a single procedure. However, many methods in this field encounter several issues, such as high energy consumption, cost, and response time. This article introduces a novel layered method for selecting and evaluating healthcare services to find optimal service selection and composition solutions based on Deep Reinforcement Learning (Deep RL), Kalman filtering, and repeated training, addressing the aforementioned issues. The results revealed that the proposed method has achieved acceptable results in terms of availability, reliability, energy consumption, and response time when compared to other methods.},
}
@article {pmid38449567,
year = {2024},
author = {Wang, J and Yin, J and Nguyen, MH and Wang, J and Xu, W},
title = {Editorial: Big scientific data analytics on HPC and cloud.},
journal = {Frontiers in big data},
volume = {7},
number = {},
pages = {1353988},
doi = {10.3389/fdata.2024.1353988},
pmid = {38449567},
issn = {2624-909X},
}
@article {pmid38449564,
year = {2024},
author = {Saad, M and Enam, RN and Qureshi, R},
title = {Optimizing multi-objective task scheduling in fog computing with GA-PSO algorithm for big data application.},
journal = {Frontiers in big data},
volume = {7},
number = {},
pages = {1358486},
pmid = {38449564},
issn = {2624-909X},
abstract = {As the volume and velocity of Big Data continue to grow, traditional cloud computing approaches struggle to meet the demands of real-time processing and low latency. Fog computing, with its distributed network of edge devices, emerges as a compelling solution. However, efficient task scheduling in fog computing remains a challenge due to its inherently multi-objective nature, balancing factors like execution time, response time, and resource utilization. This paper proposes a hybrid Genetic Algorithm (GA)-Particle Swarm Optimization (PSO) algorithm to optimize multi-objective task scheduling in fog computing environments. The hybrid approach combines the strengths of GA and PSO, achieving effective exploration and exploitation of the search space, leading to improved performance compared to traditional single-algorithm approaches. The proposed hybrid algorithm results improved the execution time by 85.68% when compared with GA algorithm, by 84% when compared with Hybrid PWOA and by 51.03% when compared with PSO algorithm as well as it improved the response time by 67.28% when compared with GA algorithm, by 54.24% when compared with Hybrid PWOA and by 75.40% when compared with PSO algorithm as well as it improved the completion time by 68.69% when compared with GA algorithm, by 98.91% when compared with Hybrid PWOA and by 75.90% when compared with PSO algorithm when various tasks inputs are given. The proposed hybrid algorithm results also improved the execution time by 84.87% when compared with GA algorithm, by 88.64% when compared with Hybrid PWOA and by 85.07% when compared with PSO algorithm it improved the response time by 65.92% when compared with GA algorithm, by 80.51% when compared with Hybrid PWOA and by 85.26% when compared with PSO algorithm as well as it improved the completion time by 67.60% when compared with GA algorithm, by 81.34% when compared with Hybrid PWOA and by 85.23% when compared with PSO algorithm when various fog nodes are given.},
}
@article {pmid38435622,
year = {2024},
author = {Mehmood, T and Latif, S and Jamail, NSM and Malik, A and Latif, R},
title = {LSTMDD: an optimized LSTM-based drift detector for concept drift in dynamic cloud computing.},
journal = {PeerJ. Computer science},
volume = {10},
number = {},
pages = {e1827},
pmid = {38435622},
issn = {2376-5992},
abstract = {This study aims to investigate the problem of concept drift in cloud computing and emphasizes the importance of early detection for enabling optimum resource utilization and offering an effective solution. The analysis includes synthetic and real-world cloud datasets, stressing the need for appropriate drift detectors tailored to the cloud domain. A modified version of Long Short-Term Memory (LSTM) called the LSTM Drift Detector (LSTMDD) is proposed and compared with other top drift detection techniques using prediction error as the primary evaluation metric. LSTMDD is optimized to improve performance in detecting anomalies in non-Gaussian distributed cloud environments. The experiments show that LSTMDD outperforms other methods for gradual and sudden drift in the cloud domain. The findings suggest that machine learning techniques such as LSTMDD could be a promising approach to addressing the problem of concept drift in cloud computing, leading to more efficient resource allocation and improved performance.},
}
@article {pmid38429324,
year = {2024},
author = {Yin, X and Fang, W and Liu, Z and Liu, D},
title = {A novel multi-scale CNN and Bi-LSTM arbitration dense network model for low-rate DDoS attack detection.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {5111},
pmid = {38429324},
issn = {2045-2322},
support = {2021GX056//the Key Technologies R\&D Program of Weifang/ ; 2023GX063//the Key Technologies R\&D Program of Weifang/ ; KJRC2021002//the Foundation for the Talents by the Weifang University of Science and Technology/ ; ZR2021MF086//the Natural Science Foundation of Shandong Province/ ; 2019GNC106034//the Key R\&D Program of Shandong Province under Grant/ ; },
abstract = {Low-rate distributed denial of service attacks, as known as LDDoS attacks, pose the notorious security risks in cloud computing network. They overload the cloud servers and degrade network service quality with the stealthy strategy. Furthermore, this kind of small ratio and pulse-like abnormal traffic leads to a serious data scale problem. As a result, the existing models for detecting minority and adversary LDDoS attacks are insufficient in both detection accuracy and time consumption. This paper proposes a novel multi-scale Convolutional Neural Networks (CNN) and bidirectional Long-short Term Memory (bi-LSTM) arbitration dense network model (called MSCBL-ADN) for learning and detecting LDDoS attack behaviors under the condition of limited dataset and time consumption. The MSCBL-ADN incorporates CNN for preliminary spatial feature extraction and embedding-based bi-LSTM for time relationship extraction. And then, it employs arbitration network to re-weigh feature importance for higher accuracy. At last, it uses 2-block dense connection network to perform final classification. The experimental results conducted on popular ISCX-2016-SlowDos dataset have demonstrated that the proposed MSCBL-ADN model has a significant improvement with high detection accuracy and superior time performance over the state-of-the-art models.},
}
@article {pmid38421498,
year = {2024},
author = {Mahato, T and Parida, BR and Bar, S},
title = {Assessing tea plantations biophysical and biochemical characteristics in Northeast India using satellite data.},
journal = {Environmental monitoring and assessment},
volume = {196},
number = {3},
pages = {327},
pmid = {38421498},
issn = {1573-2959},
support = {F.4-5(209-FRP)/2015/BSR//University Grants Commission/ ; },
mesh = {*Environmental Monitoring ; *Camellia sinensis ; India ; Nitrogen ; Tea ; },
abstract = {Despite advancements in using multi-temporal satellite data to assess long-term changes in Northeast India's tea plantations, a research gap exists in understanding the intricate interplay between biophysical and biochemical characteristics. Further exploration is crucial for precise, sustainable monitoring and management. In this study, satellite-derived vegetation indices and near-proximal sensor data were deployed to deduce various physico-chemical characteristics and to evaluate the health conditions of tea plantations in northeast India. The districts, such as Sonitpur, Jorhat, Sibsagar, Dibrugarh, and Tinsukia in Assam were selected, which are the major contributors to the tea industry in India. The Sentinel-2A (2022) data was processed in the Google Earth Engine (GEE) cloud platform and utilized for analyzing tea plantations biochemical and biophysical properties. Leaf chlorophyll (Cab) and nitrogen contents are determined using the Normalized Area Over Reflectance Curve (NAOC) index and flavanol contents, respectively. Biophysical and biochemical parameters of the tea assessed during the spring season (March-April) 2022 revealed that tea plantations located in Tinsukia and Dibrugarh were much healthier than the other districts in Assam which are evident from satellite-derived Enhanced Vegetation Index (EVI), Modified Soil Adjusted Vegetation Index (MSAVI), Leaf Area Index (LAI), and Fraction of Absorbed Photosynthetically Active Radiation (fPAR), including the Cab and nitrogen contents. The Cab of healthy tea plants varied from 25 to 35 µg/cm[2]. Pearson correlation among satellite-derived Cab and nitrogen with field measurements showed R[2] of 0.61-0.62 (p-value < 0.001). This study offered vital information about land alternations and tea health conditions, which can be crucial for conservation, monitoring, and management practices.},
}
@article {pmid38420486,
year = {2024},
author = {Liu, X and Wider, W and Fauzi, MA and Jiang, L and Udang, LN and Hossain, SFA},
title = {The evolution of smart hotels: A bibliometric review of the past, present and future trends.},
journal = {Heliyon},
volume = {10},
number = {4},
pages = {e26472},
pmid = {38420486},
issn = {2405-8440},
abstract = {This study provides a bibliometric analysis of smart hotel research, drawing from 613 publications in the Web of Science (WoS) database to examine scholarly trends and developments in this dynamic field. Smart hotels, characterized by integrating advanced technologies such as AI, IoT, cloud computing, and big data, aim to redefine customer experiences and operational efficiency. Utilizing co-citation and co-word analysis techniques, the research delves into the depth of literature from past to future trends. In co-citation analysis, clusters including "Sustainable Hotel and Green Hotel", "Theories Integration in Smart Hotel Research", and "Consumers' Decisions about Green Hotels" underscore the pivotal areas of past and current research. Co-word analysis further reveals emergent trend clusters: "The New Era of Sustainable Tourism", "Elevating Standards and Guest Loyalty", and "Hotels' New Sustainable Blueprint in Modern Travel". These clusters reflect the industry's evolving focus on sustainability and technology-enhanced guest experiences. Theoretically, this research bridges gaps in smart hotel literature, proposing new frameworks for understanding customer decisions amid technological advancements and environmental responsibilities. Practically, it offers valuable insights for hotel managers, guiding technology integration strategies for enhanced efficiency and customer loyalty while underscoring the critical role of green strategies and sustainability.},
}
@article {pmid38420393,
year = {2024},
author = {Mukred, M and Mokhtar, UA and Hawash, B and AlSalman, H and Zohaib, M},
title = {The adoption and use of learning analytics tools to improve decision making in higher learning institutions: An extension of technology acceptance model.},
journal = {Heliyon},
volume = {10},
number = {4},
pages = {e26315},
pmid = {38420393},
issn = {2405-8440},
abstract = {Learning Analytics Tools (LATs) can be used for informed decision-making regarding teaching strategies and their continuous enhancement. Therefore, LATs must be adopted in higher learning institutions, but several factors hinder its implementation, primarily due to the lack of an implementation model. Therefore, in this study, the focus is directed towards examining LATs adoption in Higher Learning Institutions (HLIs), with emphasis on the determinants of the adoption process. The study mainly aims to design a model of LAT adoption and use it in the above context to improve the institutions' decision-making and accordingly, the study adopted an extended version of Technology Acceptance Model (TAM) as the underpinning theory. Five experts validated the employed survey instrument, and 500 questionnaire copies were distributed through e-mails, from which 275 copies were retrieved from Saudi employees working at public HLIs. Data gathered was exposed to Partial Least Square-Structural Equation Modeling (PLS-SEM) for analysis and to test the proposed conceptual model. Based on the findings, the perceived usefulness of LAT plays a significant role as a determinant of its adoption. Other variables include top management support, financial support, and the government's role in LATs acceptance and adoption among HLIs. The findings also supported the contribution of LAT adoption and acceptance towards making informed decisions and highlighted the need for big data facility and cloud computing ability towards LATs usefulness. The findings have significant implications towards LATs implementation success among HLIs, providing clear insights into the factors that can enhance its adoption and acceptance. They also lay the basis for future studies in the area to validate further the effect of LATs on decision-making among HLIs institutions. Furthermore, the obtained findings are expected to serve as practical implications for policy makers and educational leaders in their objective to implement LAT using a multi-layered method that considers other aspects in addition to the perceptions of the individual user.},
}
@article {pmid38409183,
year = {2024},
author = {Grossman, RL and Boyles, RR and Davis-Dusenbery, BN and Haddock, A and Heath, AP and O'Connor, BD and Resnick, AC and Taylor, DM and Ahalt, S},
title = {A Framework for the Interoperability of Cloud Platforms: Towards FAIR Data in SAFE Environments.},
journal = {Scientific data},
volume = {11},
number = {1},
pages = {241},
pmid = {38409183},
issn = {2052-4463},
support = {HHSN261201400008C/CA/NCI NIH HHS/United States ; },
mesh = {*Cloud Computing ; *Electronic Health Records ; },
abstract = {As the number of cloud platforms supporting scientific research grows, there is an increasing need to support interoperability between two or more cloud platforms. A well accepted core concept is to make data in cloud platforms Findable, Accessible, Interoperable and Reusable (FAIR). We introduce a companion concept that applies to cloud-based computing environments that we call a Secure and Authorized FAIR Environment (SAFE). SAFE environments require data and platform governance structures and are designed to support the interoperability of sensitive or controlled access data, such as biomedical data. A SAFE environment is a cloud platform that has been approved through a defined data and platform governance process as authorized to hold data from another cloud platform and exposes appropriate APIs for the two platforms to interoperate.},
}
@article {pmid38404043,
year = {2024},
author = {Rusinovich, Y and Rusinovich, V and Buhayenka, A and Liashko, V and Sabanov, A and Holstein, DJF and Aldmour, S and Doss, M and Branzan, D},
title = {Classification of anatomic patterns of peripheral artery disease with automated machine learning (AutoML).},
journal = {Vascular},
volume = {},
number = {},
pages = {17085381241236571},
doi = {10.1177/17085381241236571},
pmid = {38404043},
issn = {1708-539X},
abstract = {AIM: The aim of this study was to investigate the potential of novel automated machine learning (AutoML) in vascular medicine by developing a discriminative artificial intelligence (AI) model for the classification of anatomical patterns of peripheral artery disease (PAD).
MATERIAL AND METHODS: Random open-source angiograms of lower limbs were collected using a web-indexed search. An experienced researcher in vascular medicine labelled the angiograms according to the most applicable grade of femoropopliteal disease in the Global Limb Anatomic Staging System (GLASS). An AutoML model was trained using the Vertex AI (Google Cloud) platform to classify the angiograms according to the GLASS grade with a multi-label algorithm. Following deployment, we conducted a test using 25 random angiograms (five from each GLASS grade). Model tuning through incremental training by introducing new angiograms was executed to the limit of the allocated quota following the initial evaluation to determine its effect on the software's performance.
RESULTS: We collected 323 angiograms to create the AutoML model. Among these, 80 angiograms were labelled as grade 0 of femoropopliteal disease in GLASS, 114 as grade 1, 34 as grade 2, 25 as grade 3 and 70 as grade 4. After 4.5 h of training, the AI model was deployed. The AI self-assessed average precision was 0.77 (0 is minimal and 1 is maximal). During the testing phase, the AI model successfully determined the GLASS grade in 100% of the cases. The agreement with the researcher was almost perfect with the number of observed agreements being 22 (88%), Kappa = 0.85 (95% CI 0.69-1.0). The best results were achieved in predicting GLASS grade 0 and grade 4 (initial precision: 0.76 and 0.84). However, the AI model exhibited poorer results in classifying GLASS grade 3 (initial precision: 0.2) compared to other grades. Disagreements between the AI and the researcher were associated with the low resolution of the test images. Incremental training expanded the initial dataset by 23% to a total of 417 images, which improved the model's average precision by 11% to 0.86.
CONCLUSION: After a brief training period with a limited dataset, AutoML has demonstrated its potential in identifying and classifying the anatomical patterns of PAD, operating unhindered by the factors that can affect human analysts, such as fatigue or lack of experience. This technology bears the potential to revolutionize outcome prediction and standardize evidence-based revascularization strategies for patients with PAD, leveraging its adaptability and ability to continuously improve with additional data. The pursuit of further research in AutoML within the field of vascular medicine is both promising and warranted. However, it necessitates additional financial support to realize its full potential.},
}
@article {pmid38403304,
year = {2024},
author = {Wu, ZF and Yang, SJ and Yang, YQ and Wang, ZQ and Ai, L and Zhu, GH and Zhu, WF},
title = {[Current situation and development trend of digital traditional Chinese medicine pharmacy].},
journal = {Zhongguo Zhong yao za zhi = Zhongguo zhongyao zazhi = China journal of Chinese materia medica},
volume = {49},
number = {2},
pages = {285-293},
doi = {10.19540/j.cnki.cjcmm.20230904.301},
pmid = {38403304},
issn = {1001-5302},
mesh = {Humans ; Medicine, Chinese Traditional ; Artificial Intelligence ; Technology, Pharmaceutical ; Drug Industry ; *Pharmacy ; *Drugs, Chinese Herbal ; },
abstract = {The 21st century is a highly information-driven era, and traditional Chinese medicine(TCM) pharmacy is also moving towards digitization and informatization. New technologies such as artificial intelligence and big data with information technology as the core are being integrated into various aspects of drug research, manufacturing, evaluation, and application, promoting interaction between these stages and improving the quality and efficiency of TCM preparations. This, in turn, provides better healthcare services to the general population. The deep integration of emerging technologies such as artificial intelligence, big data, and cloud computing with the TCM pharmaceutical industry will innovate TCM pharmaceutical technology, accelerate the research and industrialization process of TCM pharmacy, provide cutting-edge technological support to the global scientific community, boost the efficiency of the TCM industry, and promote economic and social development. Drawing from recent developments in TCM pharmacy in China, this paper discussed the current research status and future trends in digital TCM pharmacy, aiming to provide a reference for future research in this field.},
}
@article {pmid38400504,
year = {2024},
author = {Alasmary, H},
title = {ScalableDigitalHealth (SDH): An IoT-Based Scalable Framework for Remote Patient Monitoring.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {4},
pages = {},
pmid = {38400504},
issn = {1424-8220},
support = {The authors extend their appreciation to the Deanship of Scientific Research at King Khalid University for funding this work through large group Research Project under grant number RGP2/312/44//King Khalid University/ ; },
mesh = {Aged ; Humans ; *Awareness ; *Benchmarking ; Blood Pressure ; Body Temperature ; Monitoring, Physiologic ; },
abstract = {Addressing the increasing demand for remote patient monitoring, especially among the elderly and mobility-impaired, this study proposes the "ScalableDigitalHealth" (SDH) framework. The framework integrates smart digital health solutions with latency-aware edge computing autoscaling, providing a novel approach to remote patient monitoring. By leveraging IoT technology and application autoscaling, the "SDH" enables the real-time tracking of critical health parameters, such as ECG, body temperature, blood pressure, and oxygen saturation. These vital metrics are efficiently transmitted in real time to AWS cloud storage through a layered networking architecture. The contributions are two-fold: (1) establishing real-time remote patient monitoring and (2) developing a scalable architecture that features latency-aware horizontal pod autoscaling for containerized healthcare applications. The architecture incorporates a scalable IoT-based architecture and an innovative microservice autoscaling strategy in edge computing, driven by dynamic latency thresholds and enhanced by the integration of custom metrics. This work ensures heightened accessibility, cost-efficiency, and rapid responsiveness to patient needs, marking a significant leap forward in the field. By dynamically adjusting pod numbers based on latency, the system optimizes system responsiveness, particularly in edge computing's proximity-based processing. This innovative fusion of technologies not only revolutionizes remote healthcare delivery but also enhances Kubernetes performance, preventing unresponsiveness during high usage.},
}
@article {pmid38400486,
year = {2024},
author = {Dhiman, P and Saini, N and Gulzar, Y and Turaev, S and Kaur, A and Nisa, KU and Hamid, Y},
title = {A Review and Comparative Analysis of Relevant Approaches of Zero Trust Network Model.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {4},
pages = {},
pmid = {38400486},
issn = {1424-8220},
support = {This research was funded by the United Arab Emirates UAEU-ZU Joint Research Grant G00003819 (Fund No.: 12R138) Emirates Center for Mobility Research.//United Arab Emirates University/ ; },
abstract = {The Zero Trust safety architecture emerged as an intriguing approach for overcoming the shortcomings of standard network security solutions. This extensive survey study provides a meticulous explanation of the underlying principles of Zero Trust, as well as an assessment of the many strategies and possibilities for effective implementation. The survey begins by examining the role of authentication and access control within Zero Trust Architectures, and subsequently investigates innovative authentication, as well as access control solutions across different scenarios. It more deeply explores traditional techniques for encryption, micro-segmentation, and security automation, emphasizing their importance in achieving a secure Zero Trust environment. Zero Trust Architecture is explained in brief, along with the Taxonomy of Zero Trust Network Features. This review article provides useful insights into the Zero Trust paradigm, its approaches, problems, and future research objectives for scholars, practitioners, and policymakers. This survey contributes to the growth and implementation of secure network architectures in critical infrastructures by developing a deeper knowledge of Zero Trust.},
}
@article {pmid38400360,
year = {2024},
author = {Li, W and Zhou, H and Lu, Z and Kamarthi, S},
title = {Navigating the Evolution of Digital Twins Research through Keyword Co-Occurence Network Analysis.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {4},
pages = {},
pmid = {38400360},
issn = {1424-8220},
abstract = {Digital twin technology has become increasingly popular and has revolutionized data integration and system modeling across various industries, such as manufacturing, energy, and healthcare. This study aims to explore the evolving research landscape of digital twins using Keyword Co-occurrence Network (KCN) analysis. We analyze metadata from 9639 peer-reviewed articles published between 2000 and 2023. The results unfold in two parts. The first part examines trends and keyword interconnection over time, and the second part maps sensing technology keywords to six application areas. This study reveals that research on digital twins is rapidly diversifying, with focused themes such as predictive and decision-making functions. Additionally, there is an emphasis on real-time data and point cloud technologies. The advent of federated learning and edge computing also highlights a shift toward distributed computation, prioritizing data privacy. This study confirms that digital twins have evolved into complex systems that can conduct predictive operations through advanced sensing technologies. The discussion also identifies challenges in sensor selection and empirical knowledge integration.},
}
@article {pmid38400338,
year = {2024},
author = {Wiryasaputra, R and Huang, CY and Lin, YJ and Yang, CT},
title = {An IoT Real-Time Potable Water Quality Monitoring and Prediction Model Based on Cloud Computing Architecture.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {4},
pages = {},
pmid = {38400338},
issn = {1424-8220},
support = {112-2622-E-029-003,112-2621-M-029-004, and 110-2221-E-029-020-MY3//the National Science and Technology Council (NSTC), Taiwan R.O.C./ ; },
mesh = {Humans ; Artificial Intelligence ; Cloud Computing ; *Drinking Water ; *Internet of Things ; Data Accuracy ; },
abstract = {In order to achieve the Sustainable Development Goals (SDG), it is imperative to ensure the safety of drinking water. The characteristics of each drinkable water, encompassing taste, aroma, and appearance, are unique. Inadequate water infrastructure and treatment can affect these features and may also threaten public health. This study utilizes the Internet of Things (IoT) in developing a monitoring system, particularly for water quality, to reduce the risk of contracting diseases. Water quality components data, such as water temperature, alkalinity or acidity, and contaminants, were obtained through a series of linked sensors. An Arduino microcontroller board acquired all the data and the Narrow Band-IoT (NB-IoT) transmitted them to the web server. Due to limited human resources to observe the water quality physically, the monitoring was complemented by real-time notifications alerts via a telephone text messaging application. The water quality data were monitored using Grafana in web mode, and the binary classifiers of machine learning techniques were applied to predict whether the water was drinkable or not based on the data collected, which were stored in a database. The non-decision tree, as well as the decision tree, were evaluated based on the improvements of the artificial intelligence framework. With a ratio of 60% for data training: at 20% for data validation, and 10% for data testing, the performance of the decision tree (DT) model was more prominent in comparison with the Gradient Boosting (GB), Random Forest (RF), Neural Network (NN), and Support Vector Machine (SVM) modeling approaches. Through the monitoring and prediction of results, the authorities can sample the water sources every two weeks.},
}
@article {pmid38400323,
year = {2024},
author = {Pan, S and Huang, C and Fan, J and Shi, Z and Tong, J and Wang, H},
title = {Optimizing Internet of Things Fog Computing: Through Lyapunov-Based Long Short-Term Memory Particle Swarm Optimization Algorithm for Energy Consumption Optimization.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {4},
pages = {},
pmid = {38400323},
issn = {1424-8220},
abstract = {In the era of continuous development in Internet of Things (IoT) technology, smart services are penetrating various facets of societal life, leading to a growing demand for interconnected devices. Many contemporary devices are no longer mere data producers but also consumers of data. As a result, massive amounts of data are transmitted to the cloud, but the latency generated in edge-to-cloud communication is unacceptable for many tasks. In response to this, this paper introduces a novel contribution-a layered computing network built on the principles of fog computing, accompanied by a newly devised algorithm designed to optimize user tasks and allocate computing resources within rechargeable networks. The proposed algorithm, a synergy of Lyapunov-based, dynamic Long Short-Term Memory (LSTM) networks, and Particle Swarm Optimization (PSO), allows for predictive task allocation. The fog servers dynamically train LSTM networks to effectively forecast the data features of user tasks, facilitating proper unload decisions based on task priorities. In response to the challenge of slower hardware upgrades in edge devices compared to user demands, the algorithm optimizes the utilization of low-power devices and addresses performance limitations. Additionally, this paper considers the unique characteristics of rechargeable networks, where computing nodes acquire energy through charging. Utilizing Lyapunov functions for dynamic resource control enables nodes with abundant resources to maximize their potential, significantly reducing energy consumption and enhancing overall performance. The simulation results demonstrate that our algorithm surpasses traditional methods in terms of energy efficiency and resource allocation optimization. Despite the limitations of prediction accuracy in Fog Servers (FS), the proposed results significantly promote overall performance. The proposed approach improves the efficiency and the user experience of Internet of Things systems in terms of latency and energy consumption.},
}
@article {pmid38400319,
year = {2024},
author = {Brata, KC and Funabiki, N and Panduman, YYF and Fajrianti, ED},
title = {An Enhancement of Outdoor Location-Based Augmented Reality Anchor Precision through VSLAM and Google Street View.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {4},
pages = {},
pmid = {38400319},
issn = {1424-8220},
abstract = {Outdoor Location-Based Augmented Reality (LAR) applications require precise positioning for seamless integrations of virtual content into immersive experiences. However, common solutions in outdoor LAR applications rely on traditional smartphone sensor fusion methods, such as the Global Positioning System (GPS) and compasses, which often lack the accuracy needed for precise AR content alignments. In this paper, we introduce an innovative approach to enhance LAR anchor precision in outdoor environments. We leveraged Visual Simultaneous Localization and Mapping (VSLAM) technology, in combination with innovative cloud-based methodologies, and harnessed the extensive visual reference database of Google Street View (GSV), to address the accuracy limitation problems. For the evaluation, 10 Point of Interest (POI) locations were used as anchor point coordinates in the experiments. We compared the accuracies between our approach and the common sensor fusion LAR solution comprehensively involving accuracy benchmarking and running load performance testing. The results demonstrate substantial enhancements in overall positioning accuracies compared to conventional GPS-based approaches for aligning AR anchor content in the real world.},
}
@article {pmid38376453,
year = {2024},
author = {Horstmann, A and Riggs, S and Chaban, Y and Clare, DK and de Freitas, G and Farmer, D and Howe, A and Morris, KL and Hatton, D},
title = {A service-based approach to cryoEM facility processing pipelines at eBIC.},
journal = {Acta crystallographica. Section D, Structural biology},
volume = {80},
number = {Pt 3},
pages = {174-180},
pmid = {38376453},
issn = {2059-7983},
mesh = {*Software ; *Image Processing, Computer-Assisted/methods ; Cryoelectron Microscopy/methods ; Workflow ; Cloud Computing ; },
abstract = {Electron cryo-microscopy image-processing workflows are typically composed of elements that may, broadly speaking, be categorized as high-throughput workloads which transition to high-performance workloads as preprocessed data are aggregated. The high-throughput elements are of particular importance in the context of live processing, where an optimal response is highly coupled to the temporal profile of the data collection. In other words, each movie should be processed as quickly as possible at the earliest opportunity. The high level of disconnected parallelization in the high-throughput problem directly allows a completely scalable solution across a distributed computer system, with the only technical obstacle being an efficient and reliable implementation. The cloud computing frameworks primarily developed for the deployment of high-availability web applications provide an environment with a number of appealing features for such high-throughput processing tasks. Here, an implementation of an early-stage processing pipeline for electron cryotomography experiments using a service-based architecture deployed on a Kubernetes cluster is discussed in order to demonstrate the benefits of this approach and how it may be extended to scenarios of considerably increased complexity.},
}
@article {pmid38370642,
year = {2024},
author = {McMurry, AJ and Gottlieb, DI and Miller, TA and Jones, JR and Atreja, A and Crago, J and Desai, PM and Dixon, BE and Garber, M and Ignatov, V and Kirchner, LA and Payne, PRO and Saldanha, AJ and Shankar, PRV and Solad, YV and Sprouse, EA and Terry, M and Wilcox, AB and Mandl, KD},
title = {Cumulus: A federated EHR-based learning system powered by FHIR and AI.},
journal = {medRxiv : the preprint server for health sciences},
volume = {},
number = {},
pages = {},
pmid = {38370642},
support = {NU38OT000286/OT/OSTLTS CDC HHS/United States ; U01 TR002623/TR/NCATS NIH HHS/United States ; U01 TR002997/TR/NCATS NIH HHS/United States ; U18DP006500/ACL/ACL HHS/United States ; },
abstract = {OBJECTIVE: To address challenges in large-scale electronic health record (EHR) data exchange, we sought to develop, deploy, and test an open source, cloud-hosted app 'listener' that accesses standardized data across the SMART/HL7 Bulk FHIR Access application programming interface (API).
METHODS: We advance a model for scalable, federated, data sharing and learning. Cumulus software is designed to address key technology and policy desiderata including local utility, control, and administrative simplicity as well as privacy preservation during robust data sharing, and AI for processing unstructured text.
RESULTS: Cumulus relies on containerized, cloud-hosted software, installed within a healthcare organization's security envelope. Cumulus accesses EHR data via the Bulk FHIR interface and streamlines automated processing and sharing. The modular design enables use of the latest AI and natural language processing tools and supports provider autonomy and administrative simplicity. In an initial test, Cumulus was deployed across five healthcare systems each partnered with public health. Cumulus output is patient counts which were aggregated into a table stratifying variables of interest to enable population health studies. All code is available open source. A policy stipulating that only aggregate data leave the institution greatly facilitated data sharing agreements.
DISCUSSION AND CONCLUSION: Cumulus addresses barriers to data sharing based on (1) federally required support for standard APIs (2), increasing use of cloud computing, and (3) advances in AI. There is potential for scalability to support learning across myriad network configurations and use cases.},
}
@article {pmid38370229,
year = {2024},
author = {Yadav, N and Pattabiraman, B and Tummuru, NR and Soundharajan, BS and Kasiviswanathan, KS and Adeloye, AJ and Sen, S and Maurya, M and Vijayalakshmanan, S},
title = {Toward improving water-energy-food nexus through dynamic energy management of solar powered automated irrigation system.},
journal = {Heliyon},
volume = {10},
number = {4},
pages = {e25359},
pmid = {38370229},
issn = {2405-8440},
abstract = {This paper focuses on developing a water and energy-saving reliable irrigation system using state-of-the-art computing, communication, and optimal energy management framework. The framework integrates real-time soil moisture and weather forecasting information to decide the time of irrigation and quantity of water required for potato crops, which is made available to the users across a region through the cloud-based irrigation decision support system. This is accomplished through various modules such as data acquisition, soil moisture forecasting, smart irrigation scheduling, and energy management scheme. The main emphasizes is on the electrical segment which demonstrates an energy management scheme for PV-battery based grid-connected system to operate the irrigation system valves and water pump. The proposed scheme is verified through simulation and dSpace-based real-time experiment studies. Overall, the proposed energy management system demonstrates an improvement in the optimal onsite solar power generation and storage capacity to power the solar pump which save the electrical energy as well as the water in order to establish an improved solar-irrigation system. Finally, the proposed system achieved water and energy savings of around 9.24 % for potato crop with full irrigation enhancing the Water-Energy-Food Nexus at field scale.},
}
@article {pmid38365804,
year = {2024},
author = {Beteri, J and Lyimo, JG and Msinde, JV},
title = {The influence of climatic and environmental variables on sunflower planting season suitability in Tanzania.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {3906},
pmid = {38365804},
issn = {2045-2322},
mesh = {Seasons ; *Helianthus ; Tanzania ; Temperature ; Plants ; *Asteraceae ; },
abstract = {Crop survival and growth requires identification of correlations between appropriate suitable planting season and relevant climatic and environmental characteristics. Climatic and environmental conditions may cause water and heat stress at critical stages of crop development and thus affecting planting suitability. Consequently, this may affect crop yield and productivity. This study assesses the influence of climate and environmental variables on rain-fed sunflower planting season suitability in Tanzania. Data on rainfall, temperature, slope, elevation, soil and land use/or cover were accessed from publicly available sources using Google Earth Engine. This is a cloud-based geospatial computing platform for remote sensed datasets. Tanzania sunflower production calendar of 2022 was adopted to mark the start and end limits of planting across the country. The default climate and environmental parameters from FAO database were used. In addition, Pearson correlation was used to evaluate the relationship between rainfall, temperature over Normalized Difference Vegetation Index (NDVI) from 2000 to 2020 at five-year interval for January-April and June-September, for high and poor suitability season. The results showed that planting suitability of sunflower in Tanzania is driven more by rainfall than temperature. It was revealed that intra-annual planting suitability increases gradually from short to long- rain season and diminishes towards dry season of the year. January-April planting season window showing highest suitability (41.65%), whereas June-September indicating lowest suitability (0.05%). Though, not statistically significant, rainfall and NDVI were positively correlated with r = 0.65 and 0.75 whereas negative correlation existed between temperature and NDVI with r = -- 0.6 and - 0.77. We recommend sunflower subsector interventions that consider appropriate intra-regional and seasonal diversity as an important adaptive mechanism to ensure high sunflower yields.},
}
@article {pmid38360949,
year = {2024},
author = {Periola, AA and Alonge, AA and Ogudo, KA},
title = {Ocean warming events resilience capability in underwater computing platforms.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {3781},
pmid = {38360949},
issn = {2045-2322},
abstract = {Underwater data centers (UDCs) use the ocean's cold-water resources for free cooling and have low cooling costs. However, UDC cooling is affected by marine heat waves, and underwater seismic events thereby affecting UDC functioning continuity. Though feasible, the use of reservoirs for UDC cooling is non-scalable due to the high computing overhead, and inability to support continuity for long duration marine heat waves. The presented research proposes a mobile UDC (capable of migration) to address this challenge. The proposed UDC migrates from high underwater ground displacement ocean regions to regions having no or small underwater ground displacement. It supports multiple client underwater applications without requiring clients to develop, deploy, and launch own UDCs. The manner of resource utilization is influenced by the client's service level agreement. Hence, the proposed UDC provides resilient services to the clients and the requiring applications. Analysis shows that using the mobile UDC instead of the existing reservoir UDC approach enhances the operational duration and power usage effectiveness by 8.9-48.5% and 55.6-70.7% on average, respectively. In addition, the overhead is reduced by an average of 95.8-99.4%.},
}
@article {pmid38355983,
year = {2024},
author = {Kashyap, P and Shivgan, K and Patil, S and Raja, BR and Mahajan, S and Banerjee, S and Tallur, S},
title = {Unsupervised deep learning framework for temperature-compensated damage assessment using ultrasonic guided waves on edge device.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {3751},
pmid = {38355983},
issn = {2045-2322},
support = {RD/0118-ISROC00-006//Indian Space Research Organisation/ ; CRG/2021/001959//Science and Engineering Research Board/ ; },
abstract = {Fueled by the rapid development of machine learning (ML) and greater access to cloud computing and graphics processing units, various deep learning based models have been proposed for improving performance of ultrasonic guided wave structural health monitoring (GW-SHM) systems, especially to counter complexity and heterogeneity in data due to varying environmental factors (e.g., temperature) and types of damages. Such models typically comprise of millions of trainable parameters, and therefore add to cost of deployment due to requirements of cloud connectivity and processing, thus limiting the scale of deployment of GW-SHM. In this work, we propose an alternative solution that leverages TinyML framework for development of light-weight ML models that could be directly deployed on embedded edge devices. The utility of our solution is illustrated by presenting an unsupervised learning framework for damage detection in honeycomb composite sandwich structure with disbond and delamination type of damages, validated using data generated by finite element simulations and experiments performed at various temperatures in the range 0-90 °C. We demonstrate a fully-integrated solution using a Xilinx Artix-7 FPGA for data acquisition and control, and edge-inference of damage. Despite the limited number of features, the lightweight model shows reasonably high accuracy, thereby enabling detection of small size defects with improved sensitivity on an edge device for online GW-SHM.},
}
@article {pmid38351164,
year = {2024},
author = {Feng, Q and Niu, B and Ren, Y and Su, S and Wang, J and Shi, H and Yang, J and Han, M},
title = {A 10-m national-scale map of ground-mounted photovoltaic power stations in China of 2020.},
journal = {Scientific data},
volume = {11},
number = {1},
pages = {198},
pmid = {38351164},
issn = {2052-4463},
support = {42001367//National Natural Science Foundation of China (National Science Foundation of China)/ ; },
abstract = {We provide a remote sensing derived dataset for large-scale ground-mounted photovoltaic (PV) power stations in China of 2020, which has high spatial resolution of 10 meters. The dataset is based on the Google Earth Engine (GEE) cloud computing platform via random forest classifier and active learning strategy. Specifically, ground samples are carefully collected across China via both field survey and visual interpretation. Afterwards, spectral and texture features are calculated from publicly available Sentinel-2 imagery. Meanwhile, topographic features consisting of slope and aspect that are sensitive to PV locations are also included, aiming to construct a multi-dimensional and discriminative feature space. Finally, the trained random forest model is adopted to predict PV power stations of China parallelly on GEE. Technical validation has been carefully performed across China which achieved a satisfactory accuracy over 89%. Above all, as the first publicly released 10-m national-scale distribution dataset of China's ground-mounted PV power stations, it can provide data references for relevant researchers in fields such as energy, land, remote sensing and environmental sciences.},
}
@article {pmid38351065,
year = {2024},
author = {Chuntakaruk, H and Hengphasatporn, K and Shigeta, Y and Aonbangkhen, C and Lee, VS and Khotavivattana, T and Rungrotmongkol, T and Hannongbua, S},
title = {FMO-guided design of darunavir analogs as HIV-1 protease inhibitors.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {3639},
pmid = {38351065},
issn = {2045-2322},
mesh = {Humans ; Darunavir/pharmacology ; *HIV Protease Inhibitors/pharmacology/chemistry ; *HIV-1/genetics ; Molecular Docking Simulation ; Sulfonamides/pharmacology ; *HIV Infections ; Viral Proteins/genetics ; HIV Protease/metabolism ; Mutation ; Drug Resistance, Viral/genetics ; },
abstract = {The prevalence of HIV-1 infection continues to pose a significant global public health issue, highlighting the need for antiretroviral drugs that target viral proteins to reduce viral replication. One such target is HIV-1 protease (PR), responsible for cleaving viral polyproteins, leading to the maturation of viral proteins. While darunavir (DRV) is a potent HIV-1 PR inhibitor, drug resistance can arise due to mutations in HIV-1 PR. To address this issue, we developed a novel approach using the fragment molecular orbital (FMO) method and structure-based drug design to create DRV analogs. Using combinatorial programming, we generated novel analogs freely accessible via an on-the-cloud mode implemented in Google Colab, Combined Analog generator Tool (CAT). The designed analogs underwent cascade screening through molecular docking with HIV-1 PR wild-type and major mutations at the active site. Molecular dynamics (MD) simulations confirmed the assess ligand binding and susceptibility of screened designed analogs. Our findings indicate that the three designed analogs guided by FMO, 19-0-14-3, 19-8-10-0, and 19-8-14-3, are superior to DRV and have the potential to serve as efficient PR inhibitors. These findings demonstrate the effectiveness of our approach and its potential to be used in further studies for developing new antiretroviral drugs.},
}
@article {pmid38350039,
year = {2024},
author = {Bell, J and Decker, B and Eichmann, A and Palkovich, C and Reji, C},
title = {Effectiveness of Virtual Reality for Upper Extremity Function and Motor Performance of Children With Cerebral Palsy: A Systematic Review.},
journal = {The American journal of occupational therapy : official publication of the American Occupational Therapy Association},
volume = {78},
number = {2},
pages = {},
doi = {10.5014/ajot.2024.050374},
pmid = {38350039},
issn = {0272-9490},
mesh = {Child ; Humans ; Young Adult ; Adult ; *Cerebral Palsy ; Upper Extremity ; *Virtual Reality ; Language ; },
abstract = {IMPORTANCE: Research on the functional and motor performance impact of virtual reality (VR) as an intervention tool for children with cerebral palsy (CP) is limited.
OBJECTIVE: To understand whether VR is an effective intervention to improve upper extremity (UE) function and motor performance of children diagnosed with CP.
DATA SOURCES: Databases used in the search were EBSCOhost, One Search, PubMed, Cloud Source, CINAHL, SPORTDiscus, and Google Scholar.
Studies published from 2006 to 2021 were included if children had a diagnosis of CP and were age 21 yr or younger, VR was used as an intervention, and measures of UE function and motor performance were used.
FINDINGS: Twenty-one studies were included, and the results provided promising evidence for improvements in areas of UE function, motor performance, and fine motor skills when VR is used as an intervention. To yield noticeable UE improvements in children with CP, VR should be implemented for 30 to 60 min/session and for at least 360 min over more than 3 wk. Additional areas of improvement include gross motor skills, functional mobility, occupational performance, and intrinsic factors.
CONCLUSIONS AND RELEVANCE: The use of VR as an intervention for children with CP to improve UE function and motor performance is supported. More randomized controlled trials with larger sample sizes focusing on similar outcomes and intervention frequencies are needed to determine the most effective type of VR for use in clinical occupational therapy. Plain-Language Summary: This systematic review explains how virtual reality (VR) has been used as an intervention with children with cerebral palsy (CP). The review synthesizes the results of 21 research studies of children who had a diagnosis of CP and who were 21 years old or younger. The findings support using VR to improve upper extremity performance, motor performance, and fine motor skills. The findings also show that occupational therapy practitioners should use a VR intervention at a minimum frequency of 30 to 60 minutes per session and for at least 360 minutes over more than 3 weeks to yield noticeable improvements in upper extremity, motor performance, and fine motor skills for children with CP.},
}
@article {pmid38347885,
year = {2024},
author = {Bhattacharjee, T and Kiwuwa-Muyingo, S and Kanjala, C and Maoyi, ML and Amadi, D and Ochola, M and Kadengye, D and Gregory, A and Kiragga, A and Taylor, A and Greenfield, J and Slaymaker, E and Todd, J and , },
title = {INSPIRE datahub: a pan-African integrated suite of services for harmonising longitudinal population health data using OHDSI tools.},
journal = {Frontiers in digital health},
volume = {6},
number = {},
pages = {1329630},
pmid = {38347885},
issn = {2673-253X},
abstract = {INTRODUCTION: Population health data integration remains a critical challenge in low- and middle-income countries (LMIC), hindering the generation of actionable insights to inform policy and decision-making. This paper proposes a pan-African, Findable, Accessible, Interoperable, and Reusable (FAIR) research architecture and infrastructure named the INSPIRE datahub. This cloud-based Platform-as-a-Service (PaaS) and on-premises setup aims to enhance the discovery, integration, and analysis of clinical, population-based surveys, and other health data sources.
METHODS: The INSPIRE datahub, part of the Implementation Network for Sharing Population Information from Research Entities (INSPIRE), employs the Observational Health Data Sciences and Informatics (OHDSI) open-source stack of tools and the Observational Medical Outcomes Partnership (OMOP) Common Data Model (CDM) to harmonise data from African longitudinal population studies. Operating on Microsoft Azure and Amazon Web Services cloud platforms, and on on-premises servers, the architecture offers adaptability and scalability for other cloud providers and technology infrastructure. The OHDSI-based tools enable a comprehensive suite of services for data pipeline development, profiling, mapping, extraction, transformation, loading, documentation, anonymization, and analysis.
RESULTS: The INSPIRE datahub's "On-ramp" services facilitate the integration of data and metadata from diverse sources into the OMOP CDM. The datahub supports the implementation of OMOP CDM across data producers, harmonizing source data semantically with standard vocabularies and structurally conforming to OMOP table structures. Leveraging OHDSI tools, the datahub performs quality assessment and analysis of the transformed data. It ensures FAIR data by establishing metadata flows, capturing provenance throughout the ETL processes, and providing accessible metadata for potential users. The ETL provenance is documented in a machine- and human-readable Implementation Guide (IG), enhancing transparency and usability.
CONCLUSION: The pan-African INSPIRE datahub presents a scalable and systematic solution for integrating health data in LMICs. By adhering to FAIR principles and leveraging established standards like OMOP CDM, this architecture addresses the current gap in generating evidence to support policy and decision-making for improving the well-being of LMIC populations. The federated research network provisions allow data producers to maintain control over their data, fostering collaboration while respecting data privacy and security concerns. A use-case demonstrated the pipeline using OHDSI and other open-source tools.},
}
@article {pmid38345858,
year = {2024},
author = {Zandesh, Z},
title = {Privacy, Security, and Legal Issues in the Health Cloud: Structured Review for Taxonomy Development.},
journal = {JMIR formative research},
volume = {8},
number = {},
pages = {e38372},
pmid = {38345858},
issn = {2561-326X},
abstract = {BACKGROUND: Privacy in our digital world is a very complicated topic, especially when meeting cloud computing technological achievements with its multidimensional context. Here, privacy is an extended concept that is sometimes referred to as legal, philosophical, or even technical. Consequently, there is a need to harmonize it with other aspects in health care in order to provide a new ecosystem. This new ecosystem can lead to a paradigm shift involving the reconstruction and redesign of some of the most important and essential requirements like privacy concepts, legal issues, and security services. Cloud computing in the health domain has markedly contributed to other technologies, such as mobile health, health Internet of Things, and wireless body area networks, with their increasing numbers of embedded applications. Other dependent applications, which are usually used in health businesses like social networks, or some newly introduced applications have issues regarding privacy transparency boundaries and privacy-preserving principles, which have made policy making difficult in the field.
OBJECTIVE: One way to overcome this challenge is to develop a taxonomy to identify all relevant factors. A taxonomy serves to bring conceptual clarity to the set of alternatives in in-person health care delivery. This study aimed to construct a comprehensive taxonomy for privacy in the health cloud, which also provides a prospective landscape for privacy in related technologies.
METHODS: A search was performed for relevant published English papers in databases, including Web of Science, IEEE Digital Library, Google Scholar, Scopus, and PubMed. A total of 2042 papers were related to the health cloud privacy concept according to predefined keywords and search strings. Taxonomy designing was performed using the deductive methodology.
RESULTS: This taxonomy has 3 layers. The first layer has 4 main dimensions, including cloud, data, device, and legal. The second layer has 15 components, and the final layer has related subcategories (n=57). This taxonomy covers some related concepts, such as privacy, security, confidentiality, and legal issues, which are categorized here and defined by their expansion and distinctive boundaries. The main merits of this taxonomy are its ability to clarify privacy terms for different scenarios and signalize the privacy multidisciplinary objectification in eHealth.
CONCLUSIONS: This taxonomy can cover health industry requirements with its specifications like health data and scenarios, which are considered as the most complicated among businesses and industries. Therefore, the use of this taxonomy could be generalized and customized to other domains and businesses that have less complications. Moreover, this taxonomy has different stockholders, including people, organizations, and systems. If the antecedent effort in the taxonomy is proven, subject matter experts could enhance the extent of privacy in the health cloud by verifying, evaluating, and revising this taxonomy.},
}
@article {pmid38345524,
year = {2024},
author = {McCoy, ES and Park, SK and Patel, RP and Ryan, DF and Mullen, ZJ and Nesbitt, JJ and Lopez, JE and Taylor-Blake, B and Vanden, KA and Krantz, JL and Hu, W and Garris, RL and Snyder, MG and Lima, LV and Sotocinal, SG and Austin, JS and Kashlan, AD and Shah, S and Trocinski, AK and Pudipeddi, SS and Major, RM and Bazick, HO and Klein, MR and Mogil, JS and Wu, G and Zylka, MJ},
title = {Development of PainFace software to simplify, standardize, and scale up mouse grimace analyses.},
journal = {Pain},
volume = {},
number = {},
pages = {},
doi = {10.1097/j.pain.0000000000003187},
pmid = {38345524},
issn = {1872-6623},
support = {R01NS114259/NS/NINDS NIH HHS/United States ; },
abstract = {Facial grimacing is used to quantify spontaneous pain in mice and other mammals, but scoring relies on humans with different levels of proficiency. Here, we developed a cloud-based software platform called PainFace (http://painface.net) that uses machine learning to detect 4 facial action units of the mouse grimace scale (orbitals, nose, ears, whiskers) and score facial grimaces of black-coated C57BL/6 male and female mice on a 0 to 8 scale. Platform accuracy was validated in 2 different laboratories, with 3 conditions that evoke grimacing-laparotomy surgery, bilateral hindpaw injection of carrageenan, and intraplantar injection of formalin. PainFace can generate up to 1 grimace score per second from a standard 30 frames/s video, making it possible to quantify facial grimacing over time, and operates at a speed that scales with computing power. By analyzing the frequency distribution of grimace scores, we found that mice spent 7x more time in a "high grimace" state following laparotomy surgery relative to sham surgery controls. Our study shows that PainFace reproducibly quantifies facial grimaces indicative of nonevoked spontaneous pain and enables laboratories to standardize and scale-up facial grimace analyses.},
}
@article {pmid38344670,
year = {2024},
author = {Simpson, RL and Lee, JA and Li, Y and Kang, YJ and Tsui, C and Cimiotti, JP},
title = {Medicare meets the cloud: the development of a secure platform for the storage and analysis of claims data.},
journal = {JAMIA open},
volume = {7},
number = {1},
pages = {ooae007},
pmid = {38344670},
issn = {2574-2531},
support = {R01 HS026232/HS/AHRQ HHS/United States ; },
abstract = {INTRODUCTION: Cloud-based solutions are a modern-day necessity for data intense computing. This case report describes in detail the development and implementation of Amazon Web Services (AWS) at Emory-a secure, reliable, and scalable platform to store and analyze identifiable research data from the Centers for Medicare and Medicaid Services (CMS).
MATERIALS AND METHODS: Interdisciplinary teams from CMS, MBL Technologies, and Emory University collaborated to ensure compliance with CMS policy that consolidates laws, regulations, and other drivers of information security and privacy.
RESULTS: A dedicated team of individuals ensured successful transition from a physical storage server to a cloud-based environment. This included implementing access controls, vulnerability scanning, and audit logs that are reviewed regularly with a remediation plan. User adaptation required specific training to overcome the challenges of cloud computing.
CONCLUSION: Challenges created opportunities for lessons learned through the creation of an end-product accepted by CMS and shared across disciplines university-wide.},
}
@article {pmid38339714,
year = {2024},
author = {González-Herbón, R and González-Mateos, G and Rodríguez-Ossorio, JR and Domínguez, M and Alonso, S and Fuertes, JJ},
title = {An Approach to Develop Digital Twins in Industry.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {3},
pages = {},
pmid = {38339714},
issn = {1424-8220},
support = {Grant PID2020-117890RB-I00//Ministerio de Ciencia e Innovación/ ; },
abstract = {The industry is currently undergoing a digital revolution driven by the integration of several enabling technologies. These include automation, robotics, cloud computing, industrial cybersecurity, systems integration, digital twins, etc. Of particular note is the increasing use of digital twins, which offer significant added value by providing realistic and fully functional process simulations. This paper proposes an approach for developing digital twins in industrial environments. The novelty lies in not only focusing on obtaining the model of the industrial system and integrating virtual reality and/or augmented reality but also in emphasizing the importance of incorporating other enabled technologies of Industry 4.0, such as system integration, connectivity with standard and specific industrial protocols, cloud services, or new industrial automation systems, to enhance the capabilities of the digital twin. Furthermore, a proposal of the software tools that can be used to achieve this incorporation is made. Unity is chosen as the real-time 3D development tool for its cross-platform capability and streamlined industrial system modeling. The integration of augmented reality is facilitated by the Vuforia SDK. Node-RED is selected as the system integration option, and communications are carried out with MQTT protocol. Finally, cloud-based services are recommended for effective data storage and processing. Furthermore, this approach has been used to develop a digital twin of a robotic electro-pneumatic cell.},
}
@article {pmid38339672,
year = {2024},
author = {Lu, Y and Zhou, L and Zhang, A and Zha, S and Zhuo, X and Ge, S},
title = {Application of Deep Learning and Intelligent Sensing Analysis in Smart Home.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {3},
pages = {},
pmid = {38339672},
issn = {1424-8220},
abstract = {Deep learning technology can improve sensing efficiency and has the ability to discover potential patterns in data; the efficiency of user behavior recognition in the field of smart homes has been further improved, making the recognition process more intelligent and humanized. This paper analyzes the optical sensors commonly used in smart homes and their working principles through case studies and explores the technical framework of user behavior recognition based on optical sensors. At the same time, CiteSpace (Basic version 6.2.R6) software is used to visualize and analyze the related literature, elaborate the main research hotspots and evolutionary changes of optical sensor-based smart home user behavior recognition, and summarize the future research trends. Finally, fully utilizing the advantages of cloud computing technology, such as scalability and on-demand services, combining typical life situations and the requirements of smart home users, a smart home data collection and processing technology framework based on elderly fall monitoring scenarios is designed. Based on the comprehensive research results, the application and positive impact of optical sensors in smart home user behavior recognition were analyzed, and inspiration was provided for future smart home user experience research.},
}
@article {pmid38339591,
year = {2024},
author = {Ehtisham, M and Hassan, MU and Al-Awady, AA and Ali, A and Junaid, M and Khan, J and Abdelrahman Ali, YA and Akram, M},
title = {Internet of Vehicles (IoV)-Based Task Scheduling Approach Using Fuzzy Logic Technique in Fog Computing Enables Vehicular Ad Hoc Network (VANET).},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {3},
pages = {},
pmid = {38339591},
issn = {1424-8220},
support = {NU/IFC/2/SERC/-/47//Najran University/ ; Authors would like to acknowledge the support of the Deputy for Research and Innovation Ministry of Education, Kingdom of Saudi Arabia, for this research through a grant (NU/IFC/2/SERC/-/47) under the Institutional Funding Committee at Najran University,//Najran University/ ; },
abstract = {The intelligent transportation system (ITS) relies heavily on the vehicular ad hoc network (VANET) and the internet of vehicles (IoVs), which combine cloud and fog to improve task processing capabilities. As a cloud extension, the fog processes' infrastructure is close to VANET, fostering an environment favorable to smart cars with IT equipment and effective task management oversight. Vehicle processing power, bandwidth, time, and high-speed mobility are all limited in VANET. It is critical to satisfy the vehicles' requirements for minimal latency and fast reaction times while offloading duties to the fog layer. We proposed a fuzzy logic-based task scheduling system in VANET to minimize latency and improve the enhanced response time when offloading tasks in the IoV. The proposed method effectively transfers workloads to the fog computing layer while considering the constrained resources of car nodes. After choosing a suitable processing unit, the algorithm sends the job and its associated resources to the fog layer. The dataset is related to crisp values for fog computing for system utilization, latency, and task deadline time for over 5000 values. The task execution, latency, deadline of task, storage, CPU, and bandwidth utilizations are used for fuzzy set values. We proved the effectiveness of our proposed task scheduling framework via simulation tests, outperforming current algorithms in terms of task ratio by 13%, decreasing average turnaround time by 9%, minimizing makespan time by 15%, and effectively overcoming average latency time within the network parameters. The proposed technique shows better results and responses than previous techniques by scheduling the tasks toward fog layers with less response time and minimizing the overall time from task submission to completion.},
}
@article {pmid38339582,
year = {2024},
author = {Hassan, MU and Al-Awady, AA and Ali, A and Iqbal, MM and Akram, M and Jamil, H},
title = {Smart Resource Allocation in Mobile Cloud Next-Generation Network (NGN) Orchestration with Context-Aware Data and Machine Learning for the Cost Optimization of Microservice Applications.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {3},
pages = {},
pmid = {38339582},
issn = {1424-8220},
support = {NU/IFC/2/SERC/-/47//Deputy for Research and Innovation Ministry of Education, Kingdom of Saudi Arabia/ ; },
abstract = {Mobile cloud computing (MCC) provides resources to users to handle smart mobile applications. In MCC, task scheduling is the solution for mobile users' context-aware computation resource-rich applications. Most existing approaches have achieved a moderate service reliability rate due to a lack of instance-centric resource estimations and task offloading, a statistical NP-hard problem. The current intelligent scheduling process cannot address NP-hard problems due to traditional task offloading approaches. To address this problem, the authors design an efficient context-aware service offloading approach based on instance-centric measurements. The revised machine learning model/algorithm employs task adaptation to make decisions regarding task offloading. The proposed MCVS scheduling algorithm predicts the usage rates of individual microservices for a practical task scheduling scheme, considering mobile device time, cost, network, location, and central processing unit (CPU) power to train data. One notable feature of the microservice software architecture is its capacity to facilitate the scalability, flexibility, and independent deployment of individual components. A series of simulation results show the efficiency of the proposed technique based on offloading, CPU usage, and execution time metrics. The experimental results efficiently show the learning rate in training and testing in comparison with existing approaches, showing efficient training and task offloading phases. The proposed system has lower costs and uses less energy to offload microservices in MCC. Graphical results are presented to define the effectiveness of the proposed model. For a service arrival rate of 80%, the proposed model achieves an average 4.5% service offloading rate and 0.18% CPU usage rate compared with state-of-the-art approaches. The proposed method demonstrates efficiency in terms of cost and energy savings for microservice offloading in mobile cloud computing (MCC).},
}
@article {pmid38339552,
year = {2024},
author = {Parracciani, C and Gigante, D and Bonini, F and Grassi, A and Morbidini, L and Pauselli, M and Valenti, B and Lilli, E and Antonielli, F and Vizzari, M},
title = {Leveraging Google Earth Engine for a More Effective Grassland Management: A Decision Support Application Perspective.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {3},
pages = {},
pmid = {38339552},
issn = {1424-8220},
mesh = {Animals ; Humans ; *Ecosystem ; *Grassland ; Search Engine ; Biodiversity ; Agriculture ; Livestock ; },
abstract = {Grasslands cover a substantial portion of the earth's surface and agricultural land and is crucial for human well-being and livestock farming. Ranchers and grassland management authorities face challenges in effectively controlling herders' grazing behavior and grassland utilization due to underdeveloped infrastructure and poor communication in pastoral areas. Cloud-based grazing management and decision support systems (DSS) are needed to address this issue, promote sustainable grassland use, and preserve their ecosystem services. These systems should enable rapid and large-scale grassland growth and utilization monitoring, providing a basis for decision-making in managing grazing and grassland areas. In this context, this study contributes to the objectives of the EU LIFE IMAGINE project, aiming to develop a Web-GIS app for conserving and monitoring Umbria's grasslands and promoting more informed decisions for more sustainable livestock management. The app, called "Praterie" and developed in Google Earth Engine, utilizes historical Sentinel-2 satellite data and harmonic modeling of the EVI (Enhanced Vegetation Index) to estimate vegetation growth curves and maturity periods for the forthcoming vegetation cycle. The app is updated in quasi-real time and enables users to visualize estimates for the upcoming vegetation cycle, including the maximum greenness, the days remaining to the subsequent maturity period, the accuracy of the harmonic models, and the grassland greenness status in the previous 10 days. Even though future additional developments can improve the informative value of the Praterie app, this platform can contribute to optimizing livestock management and biodiversity conservation by providing timely and accurate data about grassland status and growth curves.},
}
@article {pmid38339545,
year = {2024},
author = {Gragnaniello, M and Borghese, A and Marrazzo, VR and Maresca, L and Breglio, G and Irace, A and Riccio, M},
title = {Real-Time Myocardial Infarction Detection Approaches with a Microcontroller-Based Edge-AI Device.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {3},
pages = {},
pmid = {38339545},
issn = {1424-8220},
support = {PNC0000007//Italian Ministry for Universities and Research (MUR)/ ; },
mesh = {Humans ; *Myocardial Infarction/diagnosis ; Heart ; *Heart Diseases ; Myocardium ; Algorithms ; },
abstract = {Myocardial Infarction (MI), commonly known as heart attack, is a cardiac condition characterized by damage to a portion of the heart, specifically the myocardium, due to the disruption of blood flow. Given its recurring and often asymptomatic nature, there is the need for continuous monitoring using wearable devices. This paper proposes a single-microcontroller-based system designed for the automatic detection of MI based on the Edge Computing paradigm. Two solutions for MI detection are evaluated, based on Machine Learning (ML) and Deep Learning (DL) techniques. The developed algorithms are based on two different approaches currently available in the literature, and they are optimized for deployment on low-resource hardware. A feasibility assessment of their implementation on a single 32-bit microcontroller with an ARM Cortex-M4 core was examined, and a comparison in terms of accuracy, inference time, and memory usage was detailed. For ML techniques, significant data processing for feature extraction, coupled with a simpler Neural Network (NN) is involved. On the other hand, the second method, based on DL, employs a Spectrogram Analysis for feature extraction and a Convolutional Neural Network (CNN) with a longer inference time and higher memory utilization. Both methods employ the same low power hardware reaching an accuracy of 89.40% and 94.76%, respectively. The final prototype is an energy-efficient system capable of real-time detection of MI without the need to connect to remote servers or the cloud. All processing is performed at the edge, enabling NN inference on the same microcontroller.},
}
@article {pmid38332408,
year = {2024},
author = {Huang, Z and Herbozo Contreras, LF and Yu, L and Truong, ND and Nikpour, A and Kavehei, O},
title = {S4D-ECG: A Shallow State-of-the-Art Model for Cardiac Abnormality Classification.},
journal = {Cardiovascular engineering and technology},
volume = {},
number = {},
pages = {},
pmid = {38332408},
issn = {1869-4098},
abstract = {PURPOSE: This study introduces an algorithm specifically designed for processing unprocessed 12-lead electrocardiogram (ECG) data, with the primary aim of detecting cardiac abnormalities.
METHODS: The proposed model integrates Diagonal State Space Sequence (S4D) model into its architecture, leveraging its effectiveness in capturing dynamics within time-series data. The S4D model is designed with stacked S4D layers for processing raw input data and a simplified decoder using a dense layer for predicting abnormality types. Experimental optimization determines the optimal number of S4D layers, striking a balance between computational efficiency and predictive performance. This comprehensive approach ensures the model's suitability for real-time processing on hardware devices with limited capabilities, offering a streamlined yet effective solution for heart monitoring.
RESULTS: Among the notable features of this algorithm is its strong resilience to noise, enabling the algorithm to achieve an average F1-score of 81.2% and an AUROC of 95.5% in generalization. The model underwent testing specifically on the lead II ECG signal, exhibiting consistent performance with an F1-score of 79.5% and an AUROC of 95.7%.
CONCLUSION: It is characterized by the elimination of pre-processing features and the availability of a low-complexity architecture that makes it suitable for implementation on numerous computing devices because it is easily implementable. Consequently, this algorithm exhibits considerable potential for practical applications in analyzing real-world ECG data. This model can be placed on the cloud for diagnosis. The model was also tested on lead II of the ECG alone and has demonstrated promising results, supporting its potential for on-device application.},
}
@article {pmid38327871,
year = {2024},
author = {Schönherr, S and Schachtl-Riess, JF and Di Maio, S and Filosi, M and Mark, M and Lamina, C and Fuchsberger, C and Kronenberg, F and Forer, L},
title = {Performing highly parallelized and reproducible GWAS analysis on biobank-scale data.},
journal = {NAR genomics and bioinformatics},
volume = {6},
number = {1},
pages = {lqae015},
pmid = {38327871},
issn = {2631-9268},
abstract = {Genome-wide association studies (GWAS) are transforming genetic research and enable the detection of novel genotype-phenotype relationships. In the last two decades, over 60 000 genetic associations across thousands of traits have been discovered using a GWAS approach. Due to increasing sample sizes, researchers are increasingly faced with computational challenges. A reproducible, modular and extensible pipeline with a focus on parallelization is essential to simplify data analysis and to allow researchers to devote their time to other essential tasks. Here we present nf-gwas, a Nextflow pipeline to run biobank-scale GWAS analysis. The pipeline automatically performs numerous pre- and post-processing steps, integrates regression modeling from the REGENIE package and supports single-variant, gene-based and interaction testing. It includes an extensive reporting functionality that allows to inspect thousands of phenotypes and navigate interactive Manhattan plots directly in the web browser. The pipeline is tested using the unit-style testing framework nf-test, a crucial requirement in clinical and pharmaceutical settings. Furthermore, we validated the pipeline against published GWAS datasets and benchmarked the pipeline on high-performance computing and cloud infrastructures to provide cost estimations to end users. nf-gwas is a highly parallelized, scalable and well-tested Nextflow pipeline to perform GWAS analysis in a reproducible manner.},
}
@article {pmid38324613,
year = {2024},
author = {Swetnam, TL and Antin, PB and Bartelme, R and Bucksch, A and Camhy, D and Chism, G and Choi, I and Cooksey, AM and Cosi, M and Cowen, C and Culshaw-Maurer, M and Davey, R and Davey, S and Devisetty, U and Edgin, T and Edmonds, A and Fedorov, D and Frady, J and Fonner, J and Gillan, JK and Hossain, I and Joyce, B and Lang, K and Lee, T and Littin, S and McEwen, I and Merchant, N and Micklos, D and Nelson, A and Ramsey, A and Roberts, S and Sarando, P and Skidmore, E and Song, J and Sprinkle, MM and Srinivasan, S and Stanzione, D and Strootman, JD and Stryeck, S and Tuteja, R and Vaughn, M and Wali, M and Wall, M and Walls, R and Wang, L and Wickizer, T and Williams, J and Wregglesworth, J and Lyons, E},
title = {CyVerse: Cyberinfrastructure for open science.},
journal = {PLoS computational biology},
volume = {20},
number = {2},
pages = {e1011270},
pmid = {38324613},
issn = {1553-7358},
mesh = {Humans ; *Artificial Intelligence ; *Software ; Cloud Computing ; Publishing ; },
abstract = {CyVerse, the largest publicly-funded open-source research cyberinfrastructure for life sciences, has played a crucial role in advancing data-driven research since the 2010s. As the technology landscape evolved with the emergence of cloud computing platforms, machine learning and artificial intelligence (AI) applications, CyVerse has enabled access by providing interfaces, Software as a Service (SaaS), and cloud-native Infrastructure as Code (IaC) to leverage new technologies. CyVerse services enable researchers to integrate institutional and private computational resources, custom software, perform analyses, and publish data in accordance with open science principles. Over the past 13 years, CyVerse has registered more than 124,000 verified accounts from 160 countries and was used for over 1,600 peer-reviewed publications. Since 2011, 45,000 students and researchers have been trained to use CyVerse. The platform has been replicated and deployed in three countries outside the US, with additional private deployments on commercial clouds for US government agencies and multinational corporations. In this manuscript, we present a strategic blueprint for creating and managing SaaS cyberinfrastructure and IaC as free and open-source software.},
}
@article {pmid38323147,
year = {2024},
author = {Lewis, EC and Zhu, S and Oladimeji, AT and Igusa, T and Martin, NM and Poirier, L and Trujillo, AJ and Reznar, MM and Gittelsohn, J},
title = {Design of an innovative digital application to facilitate access to healthy foods in low-income urban settings.},
journal = {mHealth},
volume = {10},
number = {},
pages = {2},
pmid = {38323147},
issn = {2306-9740},
support = {R34 HL145368/HL/NHLBI NIH HHS/United States ; T32 DK062707/DK/NIDDK NIH HHS/United States ; },
abstract = {BACKGROUND: Under-resourced urban minority communities in the United States are characterized by food environments with low access to healthy foods, high food insecurity, and high rates of diet-related chronic disease. In Baltimore, Maryland, low access to healthy food largely results from a distribution gap between small food sources (retailers) and their suppliers. Digital interventions have the potential to address this gap, while keeping costs low.
METHODS: In this paper, we describe the technical (I) front-end design and (II) back-end development process of the Baltimore Urban food Distribution (BUD) application (app). We identify and detail four main phases of the process: (I) information architecture; (II) low and high-fidelity wireframes; (III) prototype; and (IV) back-end components, while considering formative research and a pre-pilot test of a preliminary version of the BUD app.
RESULTS: Our lessons learned provide valuable insight into developing a stable app with a user-friendly experience and interface, and accessible cloud computing services for advanced technical features.
CONCLUSIONS: Next steps will involve a pilot trial of the app in Baltimore, and eventually, other urban and rural settings nationwide. Once iterative feedback is incorporated into the app, all code will be made publicly available via an open source repository to encourage adaptation for desired communities.
TRIAL REGISTRATION: ClinicalTrials.gov NCT05010018.},
}
@article {pmid38321247,
year = {2024},
author = {Pacios, D and Vázquez-Poletti, JL and Dhuri, DB and Atri, D and Moreno-Vozmediano, R and Lillis, RJ and Schetakis, N and Gómez-Sanz, J and Iorio, AD and Vázquez, L},
title = {A serverless computing architecture for Martian aurora detection with the Emirates Mars Mission.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {3029},
pmid = {38321247},
issn = {2045-2322},
support = {101007638//Horizon 2020 Framework Programme/ ; G1502//New York University Abu Dhabi/ ; S1560//Advanced Technology Research Council/ ; },
abstract = {Remote sensing technologies are experiencing a surge in adoption for monitoring Earth's environment, demanding more efficient and scalable methods for image analysis. This paper presents a new approach for the Emirates Mars Mission (Hope probe); A serverless computing architecture designed to analyze images of Martian auroras, a key aspect in understanding the Martian atmosphere. Harnessing the power of OpenCV and machine learning algorithms, our architecture offers image classification, object detection, and segmentation in a swift and cost-effective manner. Leveraging the scalability and elasticity of cloud computing, this innovative system is capable of managing high volumes of image data, adapting to fluctuating workloads. This technology, applied to the study of Martian auroras within the HOPE Mission, not only solves a complex problem but also paves the way for future applications in the broad field of remote sensing.},
}
@article {pmid38315519,
year = {2024},
author = {Xu, J},
title = {The Current Status and Promotional Strategies for Cloud Migration of Hospital Information Systems in China: Strengths, Weaknesses, Opportunities, and Threats Analysis.},
journal = {JMIR medical informatics},
volume = {12},
number = {},
pages = {e52080},
pmid = {38315519},
issn = {2291-9694},
abstract = {BACKGROUND: In the 21st century, Chinese hospitals have witnessed innovative medical business models, such as online diagnosis and treatment, cross-regional multidepartment consultation, and real-time sharing of medical test results, that surpass traditional hospital information systems (HISs). The introduction of cloud computing provides an excellent opportunity for hospitals to address these challenges. However, there is currently no comprehensive research assessing the cloud migration of HISs in China. This lack may hinder the widespread adoption and secure implementation of cloud computing in hospitals.
OBJECTIVE: The objective of this study is to comprehensively assess external and internal factors influencing the cloud migration of HISs in China and propose promotional strategies.
METHODS: Academic articles from January 1, 2007, to February 21, 2023, on the topic were searched in PubMed and HuiyiMd databases, and relevant documents such as national policy documents, white papers, and survey reports were collected from authoritative sources for analysis. A systematic assessment of factors influencing cloud migration of HISs in China was conducted by combining a Strengths, Weaknesses, Opportunities, and Threats (SWOT) analysis and literature review methods. Then, various promotional strategies based on different combinations of external and internal factors were proposed.
RESULTS: After conducting a thorough search and review, this study included 94 academic articles and 37 relevant documents. The analysis of these documents reveals the increasing application of and research on cloud computing in Chinese hospitals, and that it has expanded to 22 disciplinary domains. However, more than half (n=49, 52%) of the documents primarily focused on task-specific cloud-based systems in hospitals, while only 22% (n=21 articles) discussed integrated cloud platforms shared across the entire hospital, medical alliance, or region. The SWOT analysis showed that cloud computing adoption in Chinese hospitals benefits from policy support, capital investment, and social demand for new technology. However, it also faces threats like loss of digital sovereignty, supplier competition, cyber risks, and insufficient supervision. Factors driving cloud migration for HISs include medical big data analytics and use, interdisciplinary collaboration, health-centered medical service provision, and successful cases. Barriers include system complexity, security threats, lack of strategic planning and resource allocation, relevant personnel shortages, and inadequate investment. This study proposes 4 promotional strategies: encouraging more hospitals to migrate, enhancing hospitals' capabilities for migration, establishing a provincial-level unified medical hybrid multi-cloud platform, strengthening legal frameworks, and providing robust technical support.
CONCLUSIONS: Cloud computing is an innovative technology that has gained significant attention from both the Chinese government and the global community. In order to effectively support the rapid growth of a novel, health-centered medical industry, it is imperative for Chinese health authorities and hospitals to seize this opportunity by implementing comprehensive strategies aimed at encouraging hospitals to migrate their HISs to the cloud.},
}
@article {pmid38312948,
year = {2024},
author = {Ssekagiri, A and Jjingo, D and Bbosa, N and Bugembe, DL and Kateete, DP and Jordan, IK and Kaleebu, P and Ssemwanga, D},
title = {HIVseqDB: a portable resource for NGS and sample metadata integration for HIV-1 drug resistance analysis.},
journal = {Bioinformatics advances},
volume = {4},
number = {1},
pages = {vbae008},
pmid = {38312948},
issn = {2635-0041},
support = {MC_UU_00027/5/MRC_/Medical Research Council/United Kingdom ; },
abstract = {SUMMARY: Human immunodeficiency virus (HIV) remains a public health threat, with drug resistance being a major concern in HIV treatment. Next-generation sequencing (NGS) is a powerful tool for identifying low-abundance drug resistance mutations (LA-DRMs) that conventional Sanger sequencing cannot reliably detect. To fully understand the significance of LA-DRMs, it is necessary to integrate NGS data with clinical and demographic data. However, freely available tools for NGS-based HIV-1 drug resistance analysis do not integrate these data. This poses a challenge in interpretation of the impact of LA-DRMs, mainly for resource-limited settings due to the shortage of bioinformatics expertise. To address this challenge, we present HIVseqDB, a portable, secure, and user-friendly resource for integrating NGS data with associated clinical and demographic data for analysis of HIV drug resistance. HIVseqDB currently supports uploading of NGS data and associated sample data, HIV-1 drug resistance data analysis, browsing of uploaded data, and browsing and visualizing of analysis results. Each function of HIVseqDB corresponds to an individual Django application. This ensures efficient incorporation of additional features with minimal effort. HIVseqDB can be deployed on various computing environments, such as on-premises high-performance computing facilities and cloud-based platforms.
HIVseqDB is available at https://github.com/AlfredUg/HIVseqDB. A deployed instance of HIVseqDB is available at https://hivseqdb.org.},
}
@article {pmid38308984,
year = {2024},
author = {Lan, L and Wang, YG and Chen, HS and Gao, XR and Wang, XK and Yan, XF},
title = {Improving on mapping long-term surface water with a novel framework based on the Landsat imagery series.},
journal = {Journal of environmental management},
volume = {353},
number = {},
pages = {120202},
doi = {10.1016/j.jenvman.2024.120202},
pmid = {38308984},
issn = {1095-8630},
mesh = {*Water ; *Environmental Monitoring/methods ; Satellite Imagery ; Environment ; Algorithms ; },
abstract = {Surface water plays a crucial role in the ecological environment and societal development. Remote sensing detection serves as a significant approach to understand the temporal and spatial change in surface water series (SWS) and to directly construct long-term SWS. Limited by various factors such as cloud, cloud shadow, and problematic satellite sensor monitoring, the existent surface water mapping datasets might be short and incomplete due to losing raw information on certain dates. Improved algorithms are desired to increase the completeness and quality of SWS datasets. The present study proposes an automated framework to detect SWS, based on the Google Earth Engine and Landsat satellite imagery. This framework incorporates implementing a raw image filtering algorithm to increase available images, thereby expanding the completeness. It improves OTSU thresholding by replacing anomaly thresholds with the median value, thus enhancing the accuracy of SWS datasets. Gaps caused by Landsat7 ETM + SLC-off are respired with the random forest algorithm and morphological operations. The results show that this novel framework effectively expands the long-term series of SWS for three surface water bodies with distinct geomorphological patterns. The evaluation of confusion matrices suggests the good performance of extracting surface water, with the overall accuracy ranging from 0.96 to 0.97, and user's accuracy between 0.96 and 0.98, producer's accuracy ranging from 0.83 to 0.89, and Matthews correlation coefficient ranging from 0.87 to 0.9 for several spectral water indices (NDWI, MNDWI, ANNDWI, and AWEI). Compared with the Global Reservoirs Surface Area Dynamics (GRSAD) dataset, our constructed datasets promote greater completeness of SWS datasets by 27.01%-91.89% for the selected water bodies. The proposed framework for detecting SWS shows good potential in enlarging and completing long-term global-scale SWS datasets, capable of supporting assessments of surface-water-related environmental management and disaster prevention.},
}
@article {pmid38303478,
year = {2024},
author = {Lv, W and Chen, J and Cheng, S and Qiu, X and Li, D},
title = {QoS-driven resource allocation in fog radio access network: A VR service perspective.},
journal = {Mathematical biosciences and engineering : MBE},
volume = {21},
number = {1},
pages = {1573-1589},
doi = {10.3934/mbe.2024068},
pmid = {38303478},
issn = {1551-0018},
abstract = {While immersive media services represented by virtual reality (VR) are booming, They are facing fundamental challenges, i.e., soaring multimedia applications, large operation costs and scarce spectrum resources. It is difficult to simultaneously address these service challenges in a conventional radio access network (RAN) system. These problems motivated us to explore a quality-of-service (QoS)-driven resource allocation framework from VR service perspective based on the fog radio access network (F-RAN) architecture. We elaborated details of deployment on the caching allocation, dynamic base station (BS) clustering, statistical beamforming and cost strategy under the QoS constraints in the F-RAN architecture. The key solutions aimed to break through the bottleneck of the network design and to deep integrate the network-computing resources from different perspectives of cloud, network, edge, terminal and use of collaboration and integration. Accordingly, we provided a tailored algorithm to solve the corresponding formulation problem. This is the first design of VR services based on caching and statistical beamforming under the F-RAN. A case study provided to demonstrate the advantage of our proposed framework compared with existing schemes. Finally, we concluded the article and discussed possible open research problems.},
}
@article {pmid38303438,
year = {2024},
author = {Niu, Q and Li, H and Liu, Y and Qin, Z and Zhang, LB and Chen, J and Lyu, Z},
title = {Toward the Internet of Medical Things: Architecture, trends and challenges.},
journal = {Mathematical biosciences and engineering : MBE},
volume = {21},
number = {1},
pages = {650-678},
doi = {10.3934/mbe.2024028},
pmid = {38303438},
issn = {1551-0018},
mesh = {*Artificial Intelligence ; Big Data ; Cloud Computing ; Internet ; *Internet of Things ; },
abstract = {In recent years, the growing pervasiveness of wearable technology has created new opportunities for medical and emergency rescue operations to protect users' health and safety, such as cost-effective medical solutions, more convenient healthcare and quick hospital treatments, which make it easier for the Internet of Medical Things (IoMT) to evolve. The study first presents an overview of the IoMT before introducing the IoMT architecture. Later, it portrays an overview of the core technologies of the IoMT, including cloud computing, big data and artificial intelligence, and it elucidates their utilization within the healthcare system. Further, several emerging challenges, such as cost-effectiveness, security, privacy, accuracy and power consumption, are discussed, and potential solutions for these challenges are also suggested.},
}
@article {pmid38301786,
year = {2024},
author = {Shrestha, N and Kolarik, NE and Brandt, JS},
title = {Mesic vegetation persistence: A new approach for monitoring spatial and temporal changes in water availability in dryland regions using cloud computing and the sentinel and Landsat constellations.},
journal = {The Science of the total environment},
volume = {917},
number = {},
pages = {170491},
doi = {10.1016/j.scitotenv.2024.170491},
pmid = {38301786},
issn = {1879-1026},
abstract = {Climate change and anthropogenic activity pose severe threats to water availability in drylands. A better understanding of water availability response to these threats could improve our ability to adapt and mitigate climate and anthropogenic effects. Here, we present a Mesic Vegetation Persistence (MVP) workflow that takes every usable image in the Sentinel (10-m) and Landsat (30-m) archives to generate a dense time-series of water availability that is continuously updated as new images become available in Google Earth Engine. MVP takes advantage of the fact that mesic vegetation can be used as a proxy of available water in drylands. Our MVP workflow combines a novel moisture-based index (moisture change index - MCI) with a vegetation index (Modified Chlorophyll Absorption Ratio Vegetation Index (MCARI2)). MCI is the difference in soil moisture condition between an individual pixel's state and the dry and wet reference reflectance in the image, derived using 5th and 95th percentiles of the visible and shortwave infra-red drought index (VSDI). We produced and validated our MVP products across drylands of the western U.S., covering a broad range of elevation, land use, and ecoregions. MVP outperforms NDVI, a commonly-employed index for mesic ecosystem health, in both rangeland and forested ecosystems, and in mesic habitats with particularly high and low vegetation cover. We applied our MVP product at case study sites and found that MVP more accurately characterizes differences in mesic persistence, late-season water availability, and restoration success compared to NDVI. MVP could be applied as an indicator of change in a variety of contexts to provide a greater understanding of how water availability changes as a result of climate and management. Our MVP product for the western U.S. is freely available within a Google Earth Engine Web App, and the MVP workflow is replicable for other dryland regions.},
}
@article {pmid38293581,
year = {2024},
author = {Zurqani, HA},
title = {The first generation of a regional-scale 1-m forest canopy cover dataset using machine learning and google earth engine cloud computing platform: A case study of Arkansas, USA.},
journal = {Data in brief},
volume = {52},
number = {},
pages = {109986},
pmid = {38293581},
issn = {2352-3409},
abstract = {Forest canopy cover (FCC) is essential in forest assessment and management, affecting ecosystem services such as carbon sequestration, wildlife habitat, and water regulation. Ongoing advancements in techniques for accurately and efficiently mapping and extracting FCC information require a thorough evaluation of their validity and reliability. The primary objectives of this study are to: (1) create a large-scale forest FCC dataset with a 1-meter spatial resolution, (2) assess the regional spatial distribution of FCC at a regional scale, and (3) investigate differences in FCC areas among the Global Forest Change (Hansen et al., 2013) and U.S. Forest Service Tree Canopy Cover products at various spatial scales in Arkansas (i.e., county and city levels). This study utilized high-resolution aerial imagery and a machine learning algorithm processed and analyzed using the Google Earth Engine cloud computing platform to produce the FCC dataset. The accuracy of this dataset was validated using one-third of the reference locations obtained from the Global Forest Change (Hansen et al., 2013) dataset and the National Agriculture Imagery Program (NAIP) aerial imagery with a 0.6-m spatial resolution. The results showed that the dataset successfully identified FCC at a 1-m resolution in the study area, with overall accuracy ranging between 83.31% and 94.35% per county. Spatial comparison results between the produced FCC dataset and the Hansen et al., 2013 and USFS products indicated a strong positive correlation, with R[2] values ranging between 0.94 and 0.98 for county and city levels. This dataset provides valuable information for monitoring, forecasting, and managing forest resources in Arkansas and beyond. The methodology followed in this study enhances efficiency, cost-effectiveness, and scalability, as it enables the processing of large-scale datasets with high computational demands in a cloud-based environment. It also demonstrates that machine learning and cloud computing technologies can generate high-resolution forest cover datasets, which might be helpful in other regions of the world.},
}
@article {pmid38292471,
year = {2024},
author = {Li, W and Zhang, Z and Xie, B and He, Y and He, K and Qiu, H and Lu, Z and Jiang, C and Pan, X and He, Y and Hu, W and Liu, W and Que, T and Hu, Y},
title = {HiOmics: A cloud-based one-stop platform for the comprehensive analysis of large-scale omics data.},
journal = {Computational and structural biotechnology journal},
volume = {23},
number = {},
pages = {659-668},
pmid = {38292471},
issn = {2001-0370},
abstract = {Analyzing the vast amount of omics data generated comprehensively by high-throughput sequencing technology is of utmost importance for scientists. In this context, we propose HiOmics, a cloud-based platform equipped with nearly 300 plugins designed for the comprehensive analysis and visualization of omics data. HiOmics utilizes the Element Plus framework to craft a user-friendly interface and harnesses Docker container technology to ensure the reliability and reproducibility of data analysis results. Furthermore, HiOmics employs the Workflow Description Language and Cromwell engine to construct workflows, ensuring the portability of data analysis and simplifying the examination of intricate data. Additionally, HiOmics has developed DataCheck, a tool based on Golang, which verifies and converts data formats. Finally, by leveraging the object storage technology and batch computing capabilities of public cloud platforms, HiOmics enables the storage and processing of large-scale data while maintaining resource independence among users.},
}
@article {pmid38289970,
year = {2024},
author = {Abbasi, IA and Jan, SU and Alqahtani, AS and Khan, AS and Algarni, F},
title = {A lightweight and robust authentication scheme for the healthcare system using public cloud server.},
journal = {PloS one},
volume = {19},
number = {1},
pages = {e0294429},
pmid = {38289970},
issn = {1932-6203},
mesh = {Humans ; *Confidentiality ; *Telemedicine ; Computer Security ; Delivery of Health Care ; Privacy ; },
abstract = {Cloud computing is vital in various applications, such as healthcare, transportation, governance, and mobile computing. When using a public cloud server, it is mandatory to be secured from all known threats because a minor attacker's disturbance severely threatens the whole system. A public cloud server is posed with numerous threats; an adversary can easily enter the server to access sensitive information, especially for the healthcare industry, which offers services to patients, researchers, labs, and hospitals in a flexible way with minimal operational costs. It is challenging to make it a reliable system and ensure the privacy and security of a cloud-enabled healthcare system. In this regard, numerous security mechanisms have been proposed in past decades. These protocols either suffer from replay attacks, are completed in three to four round trips or have maximum computation, which means the security doesn't balance with performance. Thus, this work uses a fuzzy extractor method to propose a robust security method for a cloud-enabled healthcare system based on Elliptic Curve Cryptography (ECC). The proposed scheme's security analysis has been examined formally with BAN logic, ROM and ProVerif and informally using pragmatic illustration and different attacks' discussions. The proposed security mechanism is analyzed in terms of communication and computation costs. Upon comparing the proposed protocol with prior work, it has been demonstrated that our scheme is 33.91% better in communication costs and 35.39% superior to its competitors in computation costs.},
}
@article {pmid38289917,
year = {2024},
author = {Sun, Y and Du, X and Niu, S and Zhou, S},
title = {A lightweight attribute-based signcryption scheme based on cloud-fog assisted in smart healthcare.},
journal = {PloS one},
volume = {19},
number = {1},
pages = {e0297002},
pmid = {38289917},
issn = {1932-6203},
mesh = {Humans ; *Computer Security ; *Algorithms ; Big Data ; Cloud Computing ; Delivery of Health Care ; },
abstract = {In the environment of big data of the Internet of Things, smart healthcare is developed in combination with cloud computing. However, with the generation of massive data in smart healthcare systems and the need for real-time data processing, traditional cloud computing is no longer suitable for resources-constrained devices in the Internet of Things. In order to address this issue, we combine the advantages of fog computing and propose a cloud-fog assisted attribute-based signcryption for smart healthcare. In the constructed "cloud-fog-terminal" three-layer model, before the patient (data owner)signcryption, it first offloads some heavy computation burden to fog nodes and the doctor (data user) also outsources some complicated operations to fog nodes before unsigncryption by providing a blinded private key, which greatly reduces the calculation overhead of resource-constrained devices of patient and doctor, improves the calculation efficiency. Thus it implements a lightweight signcryption algorithm. Security analysis confirms that the proposed scheme achieves indistinguishability under chosen ciphertext attack and existential unforgeability under chosen message attack if the computational bilinear Diffie-Hellman problem and the decisional bilinear Diffie-Hellman problem holds. Furthermore, performance analysis demonstrates that our new scheme has less computational overhead for both doctors and patients, so it offers higher computational efficiency and is well-suited for application scenarios of smart healthcare.},
}
@article {pmid38283301,
year = {2024},
author = {Amjad, S and Akhtar, A and Ali, M and Afzal, A and Shafiq, B and Vaidya, J and Shamail, S and Rana, O},
title = {Orchestration and Management of Adaptive IoT-centric Distributed Applications.},
journal = {IEEE internet of things journal},
volume = {11},
number = {3},
pages = {3779-3791},
pmid = {38283301},
issn = {2327-4662},
support = {R35 GM134927/GM/NIGMS NIH HHS/United States ; },
abstract = {Current Internet of Things (IoT) devices provide a diverse range of functionalities, ranging from measurement and dissemination of sensory data observation, to computation services for real-time data stream processing. In extreme situations such as emergencies, a significant benefit of IoT devices is that they can help gain a more complete situational understanding of the environment. However, this requires the ability to utilize IoT resources while taking into account location, battery life, and other constraints of the underlying edge and IoT devices. A dynamic approach is proposed for orchestration and management of distributed workflow applications using services available in cloud data centers, deployed on servers, or IoT devices at the network edge. Our proposed approach is specifically designed for knowledge-driven business process workflows that are adaptive, interactive, evolvable and emergent. A comprehensive empirical evaluation shows that the proposed approach is effective and resilient to situational changes.},
}
@article {pmid38273718,
year = {2024},
author = {Wu, Y and Sanati, O and Uchimiya, M and Krishnamurthy, K and Wedell, J and Hoch, JC and Edison, AS and Delaglio, F},
title = {SAND: Automated Time-Domain Modeling of NMR Spectra Applied to Metabolite Quantification.},
journal = {Analytical chemistry},
volume = {96},
number = {5},
pages = {1843-1851},
pmid = {38273718},
issn = {1520-6882},
support = {P41 GM111135/GM/NIGMS NIH HHS/United States ; },
mesh = {*Algorithms ; Magnetic Resonance Spectroscopy ; *Magnetic Resonance Imaging ; Software ; Metabolomics ; },
abstract = {Developments in untargeted nuclear magnetic resonance (NMR) metabolomics enable the profiling of thousands of biological samples. The exploitation of this rich source of information requires a detailed quantification of spectral features. However, the development of a consistent and automatic workflow has been challenging because of extensive signal overlap. To address this challenge, we introduce the software Spectral Automated NMR Decomposition (SAND). SAND follows on from the previous success of time-domain modeling and automatically quantifies entire spectra without manual interaction. The SAND approach uses hybrid optimization with Markov chain Monte Carlo methods, employing subsampling in both time and frequency domains. In particular, SAND randomly divides the time-domain data into training and validation sets to help avoid overfitting. We demonstrate the accuracy of SAND, which provides a correlation of ∼0.9 with ground truth on cases including highly overlapped simulated data sets, a two-compound mixture, and a urine sample spiked with different amounts of a four-compound mixture. We further demonstrate an automated annotation using correlation networks derived from SAND decomposed peaks, and on average, 74% of peaks for each compound can be recovered in single clusters. SAND is available in NMRbox, the cloud computing environment for NMR software hosted by the Network for Advanced NMR (NAN). Since the SAND method uses time-domain subsampling (i.e., random subset of time-domain points), it has the potential to be extended to a higher dimensionality and nonuniformly sampled data.},
}
@article {pmid38270978,
year = {2024},
author = {Dral, PO and Ge, F and Hou, YF and Zheng, P and Chen, Y and Barbatti, M and Isayev, O and Wang, C and Xue, BX and Pinheiro, M and Su, Y and Dai, Y and Chen, Y and Zhang, L and Zhang, S and Ullah, A and Zhang, Q and Ou, Y},
title = {MLatom 3: A Platform for Machine Learning-Enhanced Computational Chemistry Simulations and Workflows.},
journal = {Journal of chemical theory and computation},
volume = {20},
number = {3},
pages = {1193-1213},
pmid = {38270978},
issn = {1549-9626},
abstract = {Machine learning (ML) is increasingly becoming a common tool in computational chemistry. At the same time, the rapid development of ML methods requires a flexible software framework for designing custom workflows. MLatom 3 is a program package designed to leverage the power of ML to enhance typical computational chemistry simulations and to create complex workflows. This open-source package provides plenty of choice to the users who can run simulations with the command-line options, input files, or with scripts using MLatom as a Python package, both on their computers and on the online XACS cloud computing service at XACScloud.com. Computational chemists can calculate energies and thermochemical properties, optimize geometries, run molecular and quantum dynamics, and simulate (ro)vibrational, one-photon UV/vis absorption, and two-photon absorption spectra with ML, quantum mechanical, and combined models. The users can choose from an extensive library of methods containing pretrained ML models and quantum mechanical approximations such as AIQM1 approaching coupled-cluster accuracy. The developers can build their own models using various ML algorithms. The great flexibility of MLatom is largely due to the extensive use of the interfaces to many state-of-the-art software packages and libraries.},
}
@article {pmid38269892,
year = {2024},
author = {Renato, A and Luna, D and Benítez, S},
title = {Development of an ASR System for Medical Conversations.},
journal = {Studies in health technology and informatics},
volume = {310},
number = {},
pages = {664-668},
doi = {10.3233/SHTI231048},
pmid = {38269892},
issn = {1879-8365},
mesh = {Humans ; *Communication ; Language ; Speech ; Acoustics ; *Physicians ; },
abstract = {In this work we document the development of an ASR system for the transcription of conversations between patient and doctor and we will point out the critical aspects of the domain. The system was trained with an acoustic base of spontaneous speech that has a domain language model and a supervised phonetic dictionary. Its performance was compared with two systems: a) NeMo End-to-End Conformers in Spanish and b) Google API ASR (Automatic Speech Recognition) Cloud. The evaluation was carried out on a set of 208 teleconsultations recorded during the year 2020. The WER (Word Error Rate) was evaluated in ASR, and Recall and F1 for recognized medical entities. In conclusion, the developed system performed better, reaching 72.5% accuracy in the domain of teleconsultations and an F1 for entity recognition of 0.80.},
}
@article {pmid38257526,
year = {2024},
author = {Malik, AW and Bhatti, DS and Park, TJ and Ishtiaq, HU and Ryou, JC and Kim, KI},
title = {Cloud Digital Forensics: Beyond Tools, Techniques, and Challenges.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {2},
pages = {},
pmid = {38257526},
issn = {1424-8220},
support = {RS-2022-00144000//National Research Foundation of Korea/ ; 2022-0-01200//Information & Communications Technology Planning & Evaluation/ ; },
abstract = {Cloud computing technology is rapidly becoming ubiquitous and indispensable. However, its widespread adoption also exposes organizations and individuals to a broad spectrum of potential threats. Despite the multiple advantages the cloud offers, organizations remain cautious about migrating their data and applications to the cloud due to fears of data breaches and security compromises. In light of these concerns, this study has conducted an in-depth examination of a variety of articles to enhance the comprehension of the challenges related to safeguarding and fortifying data within the cloud environment. Furthermore, the research has scrutinized several well-documented data breaches, analyzing the financial consequences they inflicted. Additionally, it scrutinizes the distinctions between conventional digital forensics and the forensic procedures specific to cloud computing. As a result of this investigation, the study has concluded by proposing potential opportunities for further research in this critical domain. By doing so, it contributes to our collective understanding of the complex panorama of cloud data protection and security, while acknowledging the evolving nature of technology and the need for ongoing exploration and innovation in this field. This study also helps in understanding the compound annual growth rate (CAGR) of cloud digital forensics, which is found to be quite high at ≈16.53% from 2023 to 2031. Moreover, its market is expected to reach ≈USD 36.9 billion by the year 2031; presently, it is ≈USD 11.21 billion, which shows that there are great opportunities for investment in this area. This study also strategically addresses emerging challenges in cloud digital forensics, providing a comprehensive approach to navigating and overcoming the complexities associated with the evolving landscape of cloud computing.},
}
@article {pmid38248999,
year = {2024},
author = {Molnár, T and Király, G},
title = {Forest Disturbance Monitoring Using Cloud-Based Sentinel-2 Satellite Imagery and Machine Learning.},
journal = {Journal of imaging},
volume = {10},
number = {1},
pages = {},
pmid = {38248999},
issn = {2313-433X},
support = {TKP2021-NKTA-43//Ministry of Innovation and Technology of Hungary/ ; },
abstract = {Forest damage has become more frequent in Hungary in the last decades, and remote sensing offers a powerful tool for monitoring them rapidly and cost-effectively. A combined approach was developed to utilise high-resolution ESA Sentinel-2 satellite imagery and Google Earth Engine cloud computing and field-based forest inventory data. Maps and charts were derived from vegetation indices (NDVI and Z∙NDVI) of satellite images to detect forest disturbances in the Hungarian study site for the period of 2017-2020. The NDVI maps were classified to reveal forest disturbances, and the cloud-based method successfully showed drought and frost damage in the oak-dominated Nagyerdő forest of Debrecen. Differences in the reactions to damage between tree species were visible on the index maps; therefore, a random forest machine learning classifier was applied to show the spatial distribution of dominant species. An accuracy assessment was accomplished with confusion matrices that compared classified index maps to field-surveyed data, demonstrating 99.1% producer, 71% user, and 71% total accuracies for forest damage and 81.9% for tree species. Based on the results of this study and the resilience of Google Earth Engine, the presented method has the potential to be extended to monitor all of Hungary in a faster, more accurate way using systematically collected field-data, the latest satellite imagery, and artificial intelligence.},
}
@article {pmid38248542,
year = {2024},
author = {Willingham, TB and Stowell, J and Collier, G and Backus, D},
title = {Leveraging Emerging Technologies to Expand Accessibility and Improve Precision in Rehabilitation and Exercise for People with Disabilities.},
journal = {International journal of environmental research and public health},
volume = {21},
number = {1},
pages = {},
pmid = {38248542},
issn = {1660-4601},
support = {90REGE0011/ACL/ACL HHS/United States ; },
mesh = {Humans ; Artificial Intelligence ; Quality of Life ; *Medicine ; Exercise ; *Disabled Persons ; },
abstract = {Physical rehabilitation and exercise training have emerged as promising solutions for improving health, restoring function, and preserving quality of life in populations that face disparate health challenges related to disability. Despite the immense potential for rehabilitation and exercise to help people with disabilities live longer, healthier, and more independent lives, people with disabilities can experience physical, psychosocial, environmental, and economic barriers that limit their ability to participate in rehabilitation, exercise, and other physical activities. Together, these barriers contribute to health inequities in people with disabilities, by disproportionately limiting their ability to participate in health-promoting physical activities, relative to people without disabilities. Therefore, there is great need for research and innovation focusing on the development of strategies to expand accessibility and promote participation in rehabilitation and exercise programs for people with disabilities. Here, we discuss how cutting-edge technologies related to telecommunications, wearables, virtual and augmented reality, artificial intelligence, and cloud computing are providing new opportunities to improve accessibility in rehabilitation and exercise for people with disabilities. In addition, we highlight new frontiers in digital health technology and emerging lines of scientific research that will shape the future of precision care strategies for people with disabilities.},
}
@article {pmid38248199,
year = {2024},
author = {Yan, Z and Lin, X and Zhang, X and Xu, J and Qu, H},
title = {Identity-Based Matchmaking Encryption with Equality Test.},
journal = {Entropy (Basel, Switzerland)},
volume = {26},
number = {1},
pages = {},
pmid = {38248199},
issn = {1099-4300},
abstract = {The identity-based encryption with equality test (IBEET) has become a hot research topic in cloud computing as it provides an equality test for ciphertexts generated under different identities while preserving the confidentiality. Subsequently, for the sake of the confidentiality and authenticity of the data, the identity-based signcryption with equality test (IBSC-ET) has been put forward. Nevertheless, the existing schemes do not consider the anonymity of the sender and the receiver, which leads to the potential leakage of sensitive personal information. How to ensure confidentiality, authenticity, and anonymity in the IBEET setting remains a significant challenge. In this paper, we put forward the concept of the identity-based matchmaking encryption with equality test (IBME-ET) to address this issue. We formalized the system model, the definition, and the security models of the IBME-ET and, then, put forward a concrete scheme. Furthermore, our scheme was confirmed to be secure and practical by proving its security and evaluating its performance.},
}
@article {pmid38247937,
year = {2024},
author = {Kim, J and Jang, H and Koh, H},
title = {MiMultiCat: A Unified Cloud Platform for the Analysis of Microbiome Data with Multi-Categorical Responses.},
journal = {Bioengineering (Basel, Switzerland)},
volume = {11},
number = {1},
pages = {},
pmid = {38247937},
issn = {2306-5354},
support = {2021R1C1C1013861//National Research Foundation of Korea/ ; },
abstract = {The field of the human microbiome is rapidly growing due to the recent advances in high-throughput sequencing technologies. Meanwhile, there have also been many new analytic pipelines, methods and/or tools developed for microbiome data preprocessing and analytics. They are usually focused on microbiome data with continuous (e.g., body mass index) or binary responses (e.g., diseased vs. healthy), yet multi-categorical responses that have more than two categories are also common in reality. In this paper, we introduce a new unified cloud platform, named MiMultiCat, for the analysis of microbiome data with multi-categorical responses. The two main distinguishing features of MiMultiCat are as follows: First, MiMultiCat streamlines a long sequence of microbiome data preprocessing and analytic procedures on user-friendly web interfaces; as such, it is easy to use for many people in various disciplines (e.g., biology, medicine, public health). Second, MiMultiCat performs both association testing and prediction modeling extensively. For association testing, MiMultiCat handles both ecological (e.g., alpha and beta diversity) and taxonomical (e.g., phylum, class, order, family, genus, species) contexts through covariate-adjusted or unadjusted analysis. For prediction modeling, MiMultiCat employs the random forest and gradient boosting algorithms that are well suited to microbiome data while providing nice visual interpretations. We demonstrate its use through the reanalysis of gut microbiome data on obesity with body mass index categories. MiMultiCat is freely available on our web server.},
}
@article {pmid38235187,
year = {2024},
author = {Xun, D and Wang, R and Zhang, X and Wang, Y},
title = {Microsnoop: A generalist tool for microscopy image representation.},
journal = {Innovation (Cambridge (Mass.))},
volume = {5},
number = {1},
pages = {100541},
pmid = {38235187},
issn = {2666-6758},
abstract = {Accurate profiling of microscopy images from small scale to high throughput is an essential procedure in basic and applied biological research. Here, we present Microsnoop, a novel deep learning-based representation tool trained on large-scale microscopy images using masked self-supervised learning. Microsnoop can process various complex and heterogeneous images, and we classified images into three categories: single-cell, full-field, and batch-experiment images. Our benchmark study on 10 high-quality evaluation datasets, containing over 2,230,000 images, demonstrated Microsnoop's robust and state-of-the-art microscopy image representation ability, surpassing existing generalist and even several custom algorithms. Microsnoop can be integrated with other pipelines to perform tasks such as superresolution histopathology image and multimodal analysis. Furthermore, Microsnoop can be adapted to various hardware and can be easily deployed on local or cloud computing platforms. We will regularly retrain and reevaluate the model using community-contributed data to consistently improve Microsnoop.},
}
@article {pmid38235176,
year = {2024},
author = {Putra, IMS and Siahaan, D and Saikhu, A},
title = {SNLI Indo: A recognizing textual entailment dataset in Indonesian derived from the Stanford Natural Language Inference dataset.},
journal = {Data in brief},
volume = {52},
number = {},
pages = {109998},
pmid = {38235176},
issn = {2352-3409},
abstract = {Recognizing textual entailment (RTE) is an essential task in natural language processing (NLP). It is the task of determining the inference relationship between text fragments (premise and hypothesis), of which the inference relationship is either entailment (true), contradiction (false), or neutral (undetermined). The most popular approach for RTE is neural networks, which has resulted in the best RTE models. Neural network approaches, in particular deep learning, are data-driven and, consequently, the quantity and quality of the data significantly influences the performance of these approaches. Therefore, we introduce SNLI Indo, a large-scale RTE dataset in the Indonesian language, which was derived from the Stanford Natural Language Inference (SNLI) corpus by translating the original sentence pairs. SNLI is a large-scale dataset that contains premise-hypothesis pairs that were generated using a crowdsourcing framework. The SNLI dataset is comprised of a total of 569,027 sentence pairs with the distribution of sentence pairs as follows: 549,365 pairs for training, 9,840 pairs for model validation, and 9,822 pairs for testing. We translated the original sentence pairs of the SNLI dataset from English to Indonesian using the Google Cloud Translation API. The existence of SNLI Indo addresses the resource gap in the field of NLP for the Indonesian language. Even though large datasets are available in other languages, in particular English, the SNLI Indo dataset enables a more optimal development of deep learning models for RTE in the Indonesian language.},
}
@article {pmid38235174,
year = {2024},
author = {Koulgi, P and Jumani, S},
title = {Dataset of temporal trends of surface water area across India's rivers and basins.},
journal = {Data in brief},
volume = {52},
number = {},
pages = {109991},
pmid = {38235174},
issn = {2352-3409},
abstract = {This dataset [1] quantifies the extent and rate of annual change in surface water area (SWA) across India's rivers and basins over a period of 30 years spanning 1991 to 2020. This data has been derived from the Global Surface Water Explorer, which maps historical terrestrial surface water occurrence globally using the Landsat satellite image archive since 1984, at a spatial resolution of 30 m/pixel and a temporal resolution of once a month. This monthly time-series was used to create annual composites of wet-season (October, November, December), dry-season (February, March, April), and permanent (October, November, December, February, March, April) surface water extent, which were then used to estimate annual rates of change. To estimate SWA trends for both river networks and their basins, we conducted our analysis at two spatial scales - (1) cross-sectional reaches (transects) across river networks, and (2) sub-basins within river catchments. For each reach and sub-basin (henceforth basin), temporal trends in wet-season, dry-season, and permanent SWA were estimated using the non-parametric Sen's slope estimator. For every valid reach and basin, the temporal timeseries of invalid or missing data was also computed as a fractional area to inform the level of certainty associated with reported SWA trends estimates. In addition to a Zenodo data repository, this data [1] is presented as an interactive web application (https://sites.google.com/view/surface-water-trends-india/; henceforth Website) to allow users to visualize the trends of permanent, wet-season, and dry-season water along with the extent of missing data for individual transects or basins across India. The Website provides a simple user interface to enable users to download seasonal time-series of SWA for any region of interest at the scale of the river network or basin. The Website also provides details about accessing the annual permanent, dry and wet season composites, which are stored as publicly accessible cloud assets on the Google Earth Engine platform. The spatial (basin and reach) and temporal (wet season, dry season, and permanent water scenarios) scales of information provided in this dataset yield a granular understanding of water systems in India. We envision this dataset to serve as a baseline information layer that can be used in combination with other data sources to support regional analysis of hydrologic trends, watershed-based analysis, and conservation planning. Specific applications include, but are not limited to, monitoring and identifying at-risk wetlands, visualizing and measuring changes to surface water extent before and after water infrastructure projects (such as dams and water abstraction projects), mapping drought prone regions, and mapping natural and anthropogenic changes to SWA along river networks. Intended users include, but are not limited to, students, academics, decision-makers, planners, policymakers, activists, and others interested in water-related issues.},
}
@article {pmid38231538,
year = {2024},
author = {Gheisari, M and Ghaderzadeh, M and Li, H and Taami, T and Fernández-Campusano, C and Sadeghsalehi, H and Afzaal Abbasi, A},
title = {Mobile Apps for COVID-19 Detection and Diagnosis for Future Pandemic Control: Multidimensional Systematic Review.},
journal = {JMIR mHealth and uHealth},
volume = {12},
number = {},
pages = {e44406},
pmid = {38231538},
issn = {2291-5222},
mesh = {Humans ; *COVID-19 ; Pandemics/prevention & control ; Artificial Intelligence ; SARS-CoV-2 ; *Mobile Applications ; COVID-19 Testing ; },
abstract = {BACKGROUND: In the modern world, mobile apps are essential for human advancement, and pandemic control is no exception. The use of mobile apps and technology for the detection and diagnosis of COVID-19 has been the subject of numerous investigations, although no thorough analysis of COVID-19 pandemic prevention has been conducted using mobile apps, creating a gap.
OBJECTIVE: With the intention of helping software companies and clinical researchers, this study provides comprehensive information regarding the different fields in which mobile apps were used to diagnose COVID-19 during the pandemic.
METHODS: In this systematic review, 535 studies were found after searching 5 major research databases (ScienceDirect, Scopus, PubMed, Web of Science, and IEEE). Of these, only 42 (7.9%) studies concerned with diagnosing and detecting COVID-19 were chosen after applying inclusion and exclusion criteria using the PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) protocol.
RESULTS: Mobile apps were categorized into 6 areas based on the content of these 42 studies: contact tracing, data gathering, data visualization, artificial intelligence (AI)-based diagnosis, rule- and guideline-based diagnosis, and data transformation. Patients with COVID-19 were identified via mobile apps using a variety of clinical, geographic, demographic, radiological, serological, and laboratory data. Most studies concentrated on using AI methods to identify people who might have COVID-19. Additionally, symptoms, cough sounds, and radiological images were used more frequently compared to other data types. Deep learning techniques, such as convolutional neural networks, performed comparatively better in the processing of health care data than other types of AI techniques, which improved the diagnosis of COVID-19.
CONCLUSIONS: Mobile apps could soon play a significant role as a powerful tool for data collection, epidemic health data analysis, and the early identification of suspected cases. These technologies can work with the internet of things, cloud storage, 5th-generation technology, and cloud computing. Processing pipelines can be moved to mobile device processing cores using new deep learning methods, such as lightweight neural networks. In the event of future pandemics, mobile apps will play a critical role in rapid diagnosis using various image data and clinical symptoms. Consequently, the rapid diagnosis of these diseases can improve the management of their effects and obtain excellent results in treating patients.},
}
@article {pmid38228707,
year = {2024},
author = {Simaiya, S and Lilhore, UK and Sharma, YK and Rao, KBVB and Maheswara Rao, VVR and Baliyan, A and Bijalwan, A and Alroobaea, R},
title = {A hybrid cloud load balancing and host utilization prediction method using deep learning and optimization techniques.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {1337},
pmid = {38228707},
issn = {2045-2322},
abstract = {Virtual machine (VM) integration methods have effectively proven an optimized load balancing in cloud data centers. The main challenge with VM integration methods is the trade-off among cost effectiveness, quality of service, performance, optimal resource utilization and compliance with service level agreement violations. Deep Learning methods are widely used in existing research on cloud load balancing. However, there is still a problem with acquiring noisy multilayered fluctuations in workload due to the limited resource-level provisioning. The long short-term memory (LSTM) model plays a vital role in the prediction of server load and workload provisioning. This research presents a hybrid model using deep learning with Particle Swarm Intelligence and Genetic Algorithm ("DPSO-GA") for dynamic workload provisioning in cloud computing. The proposed model works in two phases. The first phase utilizes a hybrid PSO-GA approach to address the prediction challenge by combining the benefits of these two methods in fine-tuning the Hyperparameters. In the second phase, CNN-LSTM is utilized. Before using the CNN-LSTM approach to forecast the consumption of resources, a hybrid approach, PSO-GA, is used for training it. In the proposed framework, a one-dimensional CNN and LSTM are used to forecast the cloud resource utilization at various subsequent time steps. The LSTM module simulates temporal information that predicts the upcoming VM workload, while a CNN module extracts complicated distinguishing features gathered from VM workload statistics. The proposed model simultaneously integrates the resource utilization in a multi-resource utilization, which helps overcome the load balancing and over-provisioning issues. Comprehensive simulations are carried out utilizing the Google cluster traces benchmarks dataset to verify the efficiency of the proposed DPSO-GA technique in enhancing the distribution of resources and load balancing for the cloud. The proposed model achieves outstanding results in terms of better precision, accuracy and load allocation.},
}
@article {pmid38218894,
year = {2024},
author = {Zhao, Y and Sazlina, SG and Rokhani, FZ and Chinna, K and Su, J and Chew, BH},
title = {The expectations and acceptability of a smart nursing home model among Chinese older adults: a mixed methods study.},
journal = {BMC nursing},
volume = {23},
number = {1},
pages = {40},
pmid = {38218894},
issn = {1472-6955},
abstract = {BACKGROUND: Smart nursing homes (SNHs) integrate advanced technologies, including IoT, digital health, big data, AI, and cloud computing to optimise remote clinical services, monitor abnormal events, enhance decision-making, and support daily activities for older residents, ensuring overall well-being in a safe and cost-effective environment. This study developed and validated a 24-item Expectation and Acceptability of Smart Nursing Homes Questionnaire (EASNH-Q), and examined the levels of expectations and acceptability of SNHs and associated factors among older adults in China.
METHODS: This was an exploratory sequential mixed methods study, where the qualitative case study was conducted in Hainan and Dalian, while the survey was conducted in Xi'an, Nanjing, Shenyang, and Xiamen. The validation of EASNH-Q also included exploratory and confirmatory factor analyses. Multinomial logistic regression analysis was used to estimate the determinants of expectations and acceptability of SNHs.
RESULTS: The newly developed EASNH-Q uses a Likert Scale ranging from 1 (strongly disagree) to 5 (strongly agree), and underwent validation and refinement from 49 items to the final 24 items. The content validity indices for relevance, comprehensibility, and comprehensiveness were all above 0.95. The expectations and acceptability of SNHs exhibited a strong correlation (r = 0.85, p < 0.01), and good test-retest reliability for expectation (0.90) and acceptability (0.81). The highest tertile of expectations (X[2]=28.89, p < 0.001) and acceptability (X[2]=25.64, p < 0.001) towards SNHs were significantly associated with the willingness to relocate to such facilities. Older adults with self-efficacy in applying smart technologies (OR: 28.0) and those expressing a willingness to move to a nursing home (OR: 3.0) were more likely to have the highest tertile of expectations compared to those in the lowest tertile. Similarly, older adults with self-efficacy in applying smart technologies were more likely to be in the highest tertile of acceptability of SNHs (OR: 13.8).
CONCLUSIONS: EASNH-Q demonstrated commendable validity, reliability, and stability. The majority of Chinese older adults have high expectations for and accept SNHs. Self-efficacy in applying smart technologies and willingness to relocate to a nursing home associated with high expectations and acceptability of SNHs.},
}
@article {pmid38218892,
year = {2024},
author = {Putzier, M and Khakzad, T and Dreischarf, M and Thun, S and Trautwein, F and Taheri, N},
title = {Implementation of cloud computing in the German healthcare system.},
journal = {NPJ digital medicine},
volume = {7},
number = {1},
pages = {12},
pmid = {38218892},
issn = {2398-6352},
abstract = {With the advent of artificial intelligence and Big Data - projects, the necessity for a transition from analog medicine to modern-day solutions such as cloud computing becomes unavoidable. Even though this need is now common knowledge, the process is not always easy to start. Legislative changes, for example at the level of the European Union, are helping the respective healthcare systems to take the necessary steps. This article provides an overview of how a German university hospital is dealing with European data protection laws on the integration of cloud computing into everyday clinical practice. By describing our model approach, we aim to identify opportunities and possible pitfalls to sustainably influence digitization in Germany.},
}
@article {pmid38218746,
year = {2024},
author = {Chen, M and Wei, Z and Li, L and Zhang, K},
title = {Edge computing-based proactive control method for industrial product manufacturing quality prediction.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {1288},
pmid = {38218746},
issn = {2045-2322},
abstract = {With the emergence of intelligent manufacturing, new-generation information technologies such as big data and artificial intelligence are rapidly integrating with the manufacturing industry. One of the primary applications is to assist manufacturing plants in predicting product quality. Traditional predictive models primarily focus on establishing high-precision classification or regression models, with less emphasis on imbalanced data. This is a specific but common scenario in practical industrial environments concerning quality prediction. A SMOTE-XGboost quality prediction active control method based on joint optimization hyperparameters is proposed to address the problem of imbalanced data classification in product quality prediction. In addition, edge computing technology is introduced to address issues in industrial manufacturing, such as the large bandwidth load and resource limitations associated with traditional cloud computing models. Finally, the practicality and effectiveness of the proposed method are validated through a case study of the brake disc production line. Experimental results indicate that the proposed method outperforms other classification methods in brake disc quality prediction.},
}
@article {pmid38215330,
year = {2024},
author = {Zhao, B and Chen, WN and Wei, FF and Liu, X and Pei, Q and Zhang, J},
title = {PEGA: A Privacy-Preserving Genetic Algorithm for Combinatorial Optimization.},
journal = {IEEE transactions on cybernetics},
volume = {PP},
number = {},
pages = {},
doi = {10.1109/TCYB.2023.3346863},
pmid = {38215330},
issn = {2168-2275},
abstract = {EA, such as the genetic algorithm (GA), offer an elegant way to handle combinatorial optimization problems (COPs). However, limited by expertise and resources, most users lack the capability to implement evolutionary algorithms (EAs) for solving COPs. An intuitive and promising solution is to outsource evolutionary operations to a cloud server, however, it poses privacy concerns. To this end, this article proposes a novel computing paradigm called evolutionary computation as a service (ECaaS), where a cloud server renders evolutionary computation services for users while ensuring their privacy. Following the concept of ECaaS, this article presents privacy-preserving genetic algorithm (PEGA), a privacy-preserving GA designed specifically for COPs. PEGA enables users, regardless of their domain expertise or resource availability, to outsource COPs to the cloud server that holds a competitive GA and approximates the optimal solution while safeguarding privacy. Notably, PEGA features the following characteristics. First, PEGA empowers users without domain expertise or sufficient resources to solve COPs effectively. Second, PEGA protects the privacy of users by preventing the leakage of optimization problem details. Third, PEGA performs comparably to the conventional GA when approximating the optimal solution. To realize its functionality, we implement PEGA falling in a twin-server architecture and evaluate it on two widely known COPs: 1) the traveling Salesman problem (TSP) and 2) the 0/1 knapsack problem (KP). Particularly, we utilize encryption cryptography to protect users' privacy and carefully design a suite of secure computing protocols to support evolutionary operators of GA on encrypted chromosomes. Privacy analysis demonstrates that PEGA successfully preserves the confidentiality of COP contents. Experimental evaluation results on several TSP datasets and KP datasets reveal that PEGA performs equivalently to the conventional GA in approximating the optimal solution.},
}
@article {pmid38215168,
year = {2024},
author = {Sun, X and Sun, W and Wang, Z},
title = {Novel enterprises digital transformation influence empirical study.},
journal = {PloS one},
volume = {19},
number = {1},
pages = {e0296693},
pmid = {38215168},
issn = {1932-6203},
mesh = {China ; *Big Data ; *Cloud Computing ; Commerce ; Empirical Research ; },
abstract = {With the rapid development of technologies such as cloud computing and big data, various levels of government departments in the country have successively introduced digital subsidy policies to promote enterprises' digital transformation. However, the effectiveness of these policies and their ability to truly achieve policy objectives have become pressing concerns across society. Against this backdrop, this paper employs a moderated mediation effects model to empirically analyze the incentive effects of financial subsidies on the digital transformation of A-share listed manufacturing companies in the Shanghai and Shenzhen stock markets from 2013 to 2022. The research findings indicate a significant promotion effect of financial subsidies on the digital transformation of manufacturing enterprises, especially demonstrating a notable incentive impact on the digital transformation of large enterprises, non-asset-intensive enterprises, technology-intensive enterprises, and non-labor-intensive enterprises. However, the incentive effect on the digital transformation of small and medium-sized enterprises (SMEs), asset-intensive enterprises, non-technology-intensive enterprises, and labor-intensive enterprises is not significant. Notably, the expansion of financial subsidies positively influences the augmentation of R&D investment within manufacturing enterprises, subsequently providing indirect encouragement for their digital transformation. Additionally, the incorporation of the degree of marketization implies its potential to moderate both the direct and indirect impacts of financial subsidies on enterprise digital transformation. This study enriches the research on the mechanism of the role of financial subsidies in digital transformation and provides empirical evidence on how market participation influences the effects of financial subsidies, thereby assisting policymakers in comprehensively understanding the impact of financial subsidy policies on different types of enterprises.},
}
@article {pmid38215070,
year = {2024},
author = {Fan, Y},
title = {Load balance -aware dynamic cloud-edge-end collaborative offloading strategy.},
journal = {PloS one},
volume = {19},
number = {1},
pages = {e0296897},
pmid = {38215070},
issn = {1932-6203},
mesh = {*Awareness ; *Cloud Computing ; },
abstract = {Cloud-edge-end (CEE) computing is a hybrid computing paradigm that converges the principles of edge and cloud computing. In the design of CEE systems, a crucial challenge is to develop efficient offloading strategies to achieve the collaboration of edge and cloud offloading. Although CEE offloading problems have been widely studied under various backgrounds and methodologies, load balance, which is an indispensable scheme in CEE systems to ensure the full utilization of edge resources, is still a factor that has not yet been accounted for. To fill this research gap, we are devoted to developing a dynamic load balance -aware CEE offloading strategy. First, we propose a load evolution model to characterize the influences of offloading strategies on the system load dynamics and, on this basis, establish a latency model as a performance metric of different offloading strategies. Then, we formulate an optimal control model to seek the optimal offloading strategy that minimizes the latency. Second, we analyze the feasibility of typical optimal control numerical methods in solving our proposed model, and develop a numerical method based on the framework of genetic algorithm. Third, through a series of numerical experiments, we verify our proposed method. Results show that our method is effective.},
}
@article {pmid38212989,
year = {2024},
author = {Peltzer, A and Mohr, C and Stadermann, KB and Zwick, M and Schmid, R},
title = {nf-core/nanostring: a pipeline for reproducible NanoString nCounter analysis.},
journal = {Bioinformatics (Oxford, England)},
volume = {40},
number = {1},
pages = {},
pmid = {38212989},
issn = {1367-4811},
support = {//Boehringer Ingelheim Pharma GmbH & Co/ ; },
mesh = {*Software ; *Language ; Cloud Computing ; Workflow ; Quality Control ; },
abstract = {MOTIVATION: The NanoString™ nCounter® technology platform is a widely used targeted quantification platform for the analysis of gene expression of up to ∼800 genes. Whereas the software tools by the manufacturer can perform the analysis in an interactive and GUI driven approach, there is no portable and user-friendly workflow available that can be used to perform reproducible analysis of multiple samples simultaneously in a scalable fashion on different computing infrastructures.
RESULTS: Here, we present the nf-core/nanostring open-source pipeline to perform a comprehensive analysis including quality control and additional features such as expression visualization, annotation with additional metadata and input creation for differential gene expression analysis. The workflow features an easy installation, comprehensive documentation, open-source code with the possibility for further extensions, a strong portability across multiple computing environments and detailed quality metrics reporting covering all parts of the pipeline. nf-core/nanostring has been implemented in the Nextflow workflow language and supports Docker, Singularity, Podman container technologies as well as Conda environments, enabling easy deployment on any Nextflow supported compatible system, including most widely used cloud computing environments such as Google GCP or Amazon AWS.
The source code, documentation and installation instructions as well as results for continuous tests are freely available at https://github.com/nf-core/nanostring and https://nf-co.re/nanostring.},
}
@article {pmid38212192,
year = {2024},
author = {Ayeni, KI and Berry, D and Ezekiel, CN and Warth, B},
title = {Enhancing microbiome research in sub-Saharan Africa.},
journal = {Trends in microbiology},
volume = {32},
number = {2},
pages = {111-115},
doi = {10.1016/j.tim.2023.11.003},
pmid = {38212192},
issn = {1878-4380},
mesh = {Humans ; Africa South of the Sahara ; *Microbiota ; },
abstract = {While there are lighthouse examples of microbiome research in sub-Saharan Africa (SSA), a significant proportion of local researchers face several challenges. Here, we highlight prevailing issues limiting microbiome research in SSA and suggest potential technological, societal, and research-based solutions. We emphasize the need for considerable investment in infrastructures, training, and appropriate funding to democratize modern technologies with a view to providing useful data to improve human health.},
}
@article {pmid38203138,
year = {2024},
author = {An, X and Cai, B and Chai, L},
title = {Research on Over-the-Horizon Perception Distance Division of Optical Fiber Communication Based on Intelligent Roadways.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {1},
pages = {},
pmid = {38203138},
issn = {1424-8220},
abstract = {With the construction and application of more and more intelligent networking demonstration projects, a large number of advanced roadside digital infrastructures are deployed on both sides of the intelligent road. These devices sense the road situation in real time through algorithms and transmit it to edge computing units and cloud control platforms through high-speed optical fiber transmission networks. This article proposes a cloud edge terminal architecture system based on cloud edge cooperation, as well as a data exchange protocol for cloud control basic platforms. The over-the-horizon scene division and optical fiber network communication model are verified by deploying intelligent roadside devices on the intelligent highway. At the same time, this article uses the optical fiber network communication algorithm and ModelScope large model to model inference on real-time video data. The actual data results show that the StreamYOLO (Stream You Only Look Once) model can use the Streaming Perception method to detect and continuously track target vehicles in real-time videos. Finally, the method proposed in this article was experimentally validated in an actual smart highway digital infrastructure construction project. The experimental results demonstrate the high application value and promotion prospects of the fiber optic network in the division of over the horizon perception distance in intelligent roadways construction.},
}
@article {pmid38203103,
year = {2023},
author = {Sheik, AT and Maple, C and Epiphaniou, G and Dianati, M},
title = {Securing Cloud-Assisted Connected and Autonomous Vehicles: An In-Depth Threat Analysis and Risk Assessment.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {1},
pages = {},
doi = {10.3390/s24010241},
pmid = {38203103},
issn = {1424-8220},
abstract = {As threat vectors and adversarial capabilities evolve, Cloud-Assisted Connected and Autonomous Vehicles (CCAVs) are becoming more vulnerable to cyberattacks. Several established threat analysis and risk assessment (TARA) methodologies are publicly available to address the evolving threat landscape. However, these methodologies inadequately capture the threat data of CCAVs, resulting in poorly defined threat boundaries or the reduced efficacy of the TARA. This is due to multiple factors, including complex hardware-software interactions, rapid technological advancements, outdated security frameworks, heterogeneous standards and protocols, and human errors in CCAV systems. To address these factors, this study begins by systematically evaluating TARA methods and applying the Spoofing, Tampering, Repudiation, Information disclosure, Denial of service, and Elevation of privileges (STRIDE) threat model and Damage, Reproducibility, Exploitability, Affected Users, and Discoverability (DREAD) risk assessment to target system architectures. This study identifies vulnerabilities, quantifies risks, and methodically examines defined data processing components. In addition, this study offers an attack tree to delineate attack vectors and provides a novel defense taxonomy against identified risks. This article demonstrates the efficacy of the TARA in systematically capturing compromised security requirements, threats, limits, and associated risks with greater precision. By doing so, we further discuss the challenges in protecting hardware-software assets against multi-staged attacks due to emerging vulnerabilities. As a result, this research informs advanced threat analyses and risk management strategies for enhanced security engineering of cyberphysical CCAV systems.},
}
@article {pmid38203078,
year = {2023},
author = {Suo, L and Ma, H and Jiao, W and Liu, X},
title = {Job-Deadline-Guarantee-Based Joint Flow Scheduling and Routing Scheme in Data Center Networks.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {1},
pages = {},
pmid = {38203078},
issn = {1424-8220},
support = {62101415//National Natural Science Foundation of China/ ; },
abstract = {Many emerging Internet of Things (IoT) applications deployed on cloud platforms have strict latency requirements or deadline constraints, and thus meeting the deadlines is crucial to ensure the quality of service for users and the revenue for service providers in these delay-stringent IoT applications. Efficient flow scheduling in data center networks (DCNs) plays a major role in reducing the execution time of jobs and has garnered significant attention in recent years. However, only few studies have attempted to combine job-level flow scheduling and routing to guarantee meeting the deadlines of multi-stage jobs. In this paper, an efficient heuristic joint flow scheduling and routing (JFSR) scheme is proposed. First, targeting maximizing the number of jobs for which the deadlines have been met, we formulate the joint flow scheduling and routing optimization problem for multiple multi-stage jobs. Second, due to its mathematical intractability, this problem is decomposed into two sub-problems: inter-coflow scheduling and intra-coflow scheduling. In the first sub-problem, coflows from different jobs are scheduled according to their relative remaining times; in the second sub-problem, an iterative coflow scheduling and routing (ICSR) algorithm is designed to alternately optimize the routing path and bandwidth allocation for each scheduled coflow. Finally, simulation results demonstrate that the proposed JFSR scheme can significantly increase the number of jobs for which the deadlines have been met in DCNs.},
}
@article {pmid38203015,
year = {2023},
author = {Oyucu, S and Polat, O and Türkoğlu, M and Polat, H and Aksöz, A and Ağdaş, MT},
title = {Ensemble Learning Framework for DDoS Detection in SDN-Based SCADA Systems.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {1},
pages = {},
pmid = {38203015},
issn = {1424-8220},
support = {101084323//European Union's Horizon Europe research and innovation programme/ ; },
abstract = {Supervisory Control and Data Acquisition (SCADA) systems play a crucial role in overseeing and controlling renewable energy sources like solar, wind, hydro, and geothermal resources. Nevertheless, with the expansion of conventional SCADA network infrastructures, there arise significant challenges in managing and scaling due to increased size, complexity, and device diversity. Using Software Defined Networking (SDN) technology in traditional SCADA network infrastructure offers management, scaling and flexibility benefits. However, as the integration of SDN-based SCADA systems with modern technologies such as the Internet of Things, cloud computing, and big data analytics increases, cybersecurity becomes a major concern for these systems. Therefore, cyber-physical energy systems (CPES) should be considered together with all energy systems. One of the most dangerous types of cyber-attacks against SDN-based SCADA systems is Distributed Denial of Service (DDoS) attacks. DDoS attacks disrupt the management of energy resources, causing service interruptions and increasing operational costs. Therefore, the first step to protect against DDoS attacks in SDN-based SCADA systems is to develop an effective intrusion detection system. This paper proposes a Decision Tree-based Ensemble Learning technique to detect DDoS attacks in SDN-based SCADA systems by accurately distinguishing between normal and DDoS attack traffic. For training and testing the ensemble learning models, normal and DDoS attack traffic data are obtained over a specific simulated experimental network topology. Techniques based on feature selection and hyperparameter tuning are used to optimize the performance of the decision tree ensemble models. Experimental results show that feature selection, combination of different decision tree ensemble models, and hyperparameter tuning can lead to a more accurate machine learning model with better performance detecting DDoS attacks against SDN-based SCADA systems.},
}
@article {pmid38203012,
year = {2023},
author = {Rodríguez-Azar, PI and Mejía-Muñoz, JM and Cruz-Mejía, O and Torres-Escobar, R and López, LVR},
title = {Fog Computing for Control of Cyber-Physical Systems in Industry Using BCI.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {1},
pages = {},
pmid = {38203012},
issn = {1424-8220},
abstract = {Brain-computer interfaces use signals from the brain, such as EEG, to determine brain states, which in turn can be used to issue commands, for example, to control industrial machinery. While Cloud computing can aid in the creation and operation of industrial multi-user BCI systems, the vast amount of data generated from EEG signals can lead to slow response time and bandwidth problems. Fog computing reduces latency in high-demand computation networks. Hence, this paper introduces a fog computing solution for BCI processing. The solution consists in using fog nodes that incorporate machine learning algorithms to convert EEG signals into commands to control a cyber-physical system. The machine learning module uses a deep learning encoder to generate feature images from EEG signals that are subsequently classified into commands by a random forest. The classification scheme is compared using various classifiers, being the random forest the one that obtained the best performance. Additionally, a comparison was made between the fog computing approach and using only cloud computing through the use of a fog computing simulator. The results indicate that the fog computing method resulted in less latency compared to the solely cloud computing approach.},
}
@article {pmid38202896,
year = {2023},
author = {Feng, YC and Zeng, SY and Liang, TY},
title = {Part2Point: A Part-Oriented Point Cloud Reconstruction Framework.},
journal = {Sensors (Basel, Switzerland)},
volume = {24},
number = {1},
pages = {},
pmid = {38202896},
issn = {1424-8220},
support = {NSTC112-2221-E992-068//National Science and Technology Council in Taiwan/ ; },
abstract = {Three-dimensional object modeling is necessary for developing virtual and augmented reality applications. Traditionally, application engineers must manually use art software to edit object shapes or exploit LIDAR to scan physical objects for constructing 3D models. This is very time-consuming and costly work. Fortunately, GPU recently provided a cost-effective solution for massive data computation. With GPU support, many studies have proposed 3D model generators based on different learning architectures, which can automatically convert 2D object pictures into 3D object models with good performance. However, as the demand for model resolution increases, the required computing time and memory space increase as significantly as the parameters of the learning architecture, which seriously degrades the efficiency of 3D model construction and the feasibility of resolution improvement. To resolve this problem, this paper proposes a part-oriented point cloud reconstruction framework called Part2Point. This framework segments the object's parts, reconstructs the point cloud for individual object parts, and combines the part point clouds into the complete object point cloud. Therefore, it can reduce the number of learning network parameters at the exact resolution, effectively minimizing the calculation time cost and the required memory space. Moreover, it can improve the resolution of the reconstructed point cloud so that the reconstructed model can present more details of object parts.},
}
@article {pmid38200074,
year = {2024},
author = {Chen, C and Gong, L and Luo, X and Wang, F},
title = {Research on a new management model of distribution Internet of Things.},
journal = {Scientific reports},
volume = {14},
number = {1},
pages = {995},
pmid = {38200074},
issn = {2045-2322},
support = {2020-KJLH-PH-006//Science and Technology Project of Zhejiang Electric Power Company/ ; },
abstract = {Based on the characteristics of controllable intelligence of the Internet of Things (IoT) and the requirements of the new distribution Network for function and transmission delay, this study proposes a method of combining edge collaborative computing and distribution Network station area, and builds a distribution Network management structure model by combining the Packet Transport Network (PTN) Network structure. The multi-terminal node distribution model of distributed IoT is established. Finally, a distribution IoT management model is constructed based on the edge multi-node cooperative reasoning algorithm and collaborative computing architecture model. The purpose of this paper is to solve the problem of large reasoning delay caused by heavy computing tasks in distribution cloud servers. The final results show that the model reduces the inference delay of cloud computing when a large number of smart device terminals of distribution IoT are connected to the network.},
}
@article {pmid38197934,
year = {2024},
author = {Cheong, RCT and Jawad, S and Adams, A and Campion, T and Lim, ZH and Papachristou, N and Unadkat, S and Randhawa, P and Joseph, J and Andrews, P and Taylor, P and Kunz, H},
title = {Enhancing paranasal sinus disease detection with AutoML: efficient AI development and evaluation via magnetic resonance imaging.},
journal = {European archives of oto-rhino-laryngology : official journal of the European Federation of Oto-Rhino-Laryngological Societies (EUFOS) : affiliated with the German Society for Oto-Rhino-Laryngology - Head and Neck Surgery},
volume = {281},
number = {4},
pages = {2153-2158},
pmid = {38197934},
issn = {1434-4726},
mesh = {Humans ; *Artificial Intelligence ; Machine Learning ; Magnetic Resonance Imaging ; Head ; *Paranasal Sinus Diseases/diagnostic imaging ; },
abstract = {PURPOSE: Artificial intelligence (AI) in the form of automated machine learning (AutoML) offers a new potential breakthrough to overcome the barrier of entry for non-technically trained physicians. A Clinical Decision Support System (CDSS) for screening purposes using AutoML could be beneficial to ease the clinical burden in the radiological workflow for paranasal sinus diseases.
METHODS: The main target of this work was the usage of automated evaluation of model performance and the feasibility of the Vertex AI image classification model on the Google Cloud AutoML platform to be trained to automatically classify the presence or absence of sinonasal disease. The dataset is a consensus labelled Open Access Series of Imaging Studies (OASIS-3) MRI head dataset by three specialised head and neck consultant radiologists. A total of 1313 unique non-TSE T2w MRI head sessions were used from the OASIS-3 repository.
RESULTS: The best-performing image classification model achieved a precision of 0.928. Demonstrating the feasibility and high performance of the Vertex AI image classification model to automatically detect the presence or absence of sinonasal disease on MRI.
CONCLUSION: AutoML allows for potential deployment to optimise diagnostic radiology workflows and lay the foundation for further AI research in radiology and otolaryngology. The usage of AutoML could serve as a formal requirement for a feasibility study.},
}
@article {pmid38195165,
year = {2024},
author = {Chen, J and Yin, D and Wong, HYH and Duan, X and Yu, KHO and Ho, JWK},
title = {Vulture: cloud-enabled scalable mining of microbial reads in public scRNA-seq data.},
journal = {GigaScience},
volume = {13},
number = {},
pages = {},
pmid = {38195165},
issn = {2047-217X},
support = {//Innovation and Technology Commission - Hong Kong/ ; },
mesh = {Humans ; Benchmarking ; *Carcinoma, Hepatocellular/genetics ; DNA Copy Number Variations ; Hepatitis B virus ; *Liver Neoplasms ; Single-Cell Gene Expression Analysis ; },
abstract = {The rapidly growing collection of public single-cell sequencing data has become a valuable resource for molecular, cellular, and microbial discovery. Previous studies mostly overlooked detecting pathogens in human single-cell sequencing data. Moreover, existing bioinformatics tools lack the scalability to deal with big public data. We introduce Vulture, a scalable cloud-based pipeline that performs microbial calling for single-cell RNA sequencing (scRNA-seq) data, enabling meta-analysis of host-microbial studies from the public domain. In our benchmarking experiments, Vulture is 66% to 88% faster than local tools (PathogenTrack and Venus) and 41% faster than the state-of-the-art cloud-based tool Cumulus, while achieving comparable microbial read identification. In terms of the cost on cloud computing systems, Vulture also shows a cost reduction of 83% ($12 vs. ${\$}
$70). We applied Vulture to 2 coronavirus disease 2019, 3 hepatocellular carcinoma (HCC), and 2 gastric cancer human patient cohorts with public sequencing reads data from scRNA-seq experiments and discovered cell type-specific enrichment of severe acute respiratory syndrome coronavirus 2, hepatitis B virus (HBV), and Helicobacter pylori-positive cells, respectively. In the HCC analysis, all cohorts showed hepatocyte-only enrichment of HBV, with cell subtype-associated HBV enrichment based on inferred copy number variations. In summary, Vulture presents a scalable and economical framework to mine unknown host-microbial interactions from large-scale public scRNA-seq data. Vulture is available via an open-source license at https://github.com/holab-hku/Vulture.},
}
@article {pmid38192752,
year = {2024},
author = {Tan, X and Zhao, D and Wang, M and Wang, X and Wang, X and Liu, W and Ghobaei-Arani, M},
title = {A decision-making mechanism for task offloading using learning automata and deep learning in mobile edge networks.},
journal = {Heliyon},
volume = {10},
number = {1},
pages = {e23651},
pmid = {38192752},
issn = {2405-8440},
abstract = {The development of mobile networks has led to the emergence of challenges such as high delays in storage, computing and traffic management. To deal with these challenges, fifth-generation networks emphasize the use of technologies such as mobile cloud computing and mobile edge computing. Mobile Edge Cloud Computing (MECC) is an emerging distributed computing model that provides access to cloud computing services at the edge of the network and near mobile users. With offloading tasks at the edge of the network instead of transferring them to a remote cloud, MECC can realize flexibility and real-time processing. During computation offloading, the requirements of Internet of Things (IoT) applications may change at different stages, which is ignored in existing works. With this motivation, we propose a task offloading method under dynamic resource requirements during the use of IoT applications, which focuses on the problem of workload fluctuations. The proposed method uses a learning automata-based offload decision-maker to offload requests to the edge layer. An auto-scaling strategy is then developed using a long short-term memory network which can estimate the expected number of future requests. Finally, an Asynchronous Advantage Actor-Critic algorithm as a deep reinforcement learning-based approach decides to scale down or scale up. The effectiveness of the proposed method has been confirmed through extensive experiments using the iFogSim simulator. The numerical results show that the proposed method has better scalability and performance in terms of delay and energy consumption than the existing state-of-the-art methods.},
}
@article {pmid38192482,
year = {2023},
author = {Alabadi, M and Habbal, A},
title = {Next-generation predictive maintenance: leveraging blockchain and dynamic deep learning in a domain-independent system.},
journal = {PeerJ. Computer science},
volume = {9},
number = {},
pages = {e1712},
pmid = {38192482},
issn = {2376-5992},
abstract = {The fourth industrial revolution, often referred to as Industry 4.0, has revolutionized the manufacturing sector by integrating emerging technologies such as artificial intelligence (AI), machine and deep learning, Industrial Internet of Things (IIoT), cloud computing, cyber physical systems (CPSs) and cognitive computing, throughout the production life cycle. Predictive maintenance (PdM) emerges as a critical component, utilizing data analytic to track machine health and proactively detect machinery failures. Deep learning (DL), is pivotal in this context, offering superior accuracy in prediction through neural networks' data processing capabilities. However, DL adoption in PdM faces challenges, including continuous model updates and domain dependence. Meanwhile, centralized DL models, prevalent in PdM, pose security risks such as central points of failure and unauthorized access. To address these issues, this study presents an innovative decentralized PdM system integrating DL, blockchain, and decentralized storage based on the InterPlanetary File System (IPFS) for accurately predicting Remaining Useful Lifetime (RUL). DL handles predictive tasks, while blockchain secures data orchestration. Decentralized storage safeguards model metadata and training data for dynamic models. The system features synchronized two DL pipelines for time series data, encompassing prediction and training mechanisms. The detailed material and methods of this research shed light on the system's development and validation processes. Rigorous validation confirms the system's accuracy, performance, and security through an experimental testbed. The results demonstrate the system's dynamic updating and domain independence. Prediction model surpass state-of-the-art models in terms of the root mean squared error (RMSE) score. Blockchain-based scalability performance was tested based on smart contract gas usage, and the analysis shows efficient performance across varying input and output data scales. A comprehensive CIA analysis highlights the system's robust security features, addressing confidentiality, integrity, and availability aspects. The proposed decentralized predictive maintenance (PdM) system, which incorporates deep learning (DL), blockchain technology, and decentralized storage, has the potential to improve predictive accuracy and overcome significant security and scalability obstacles. Consequently, this system holds promising implications for the advancement of predictive maintenance in the context of Industry 4.0.},
}
@article {pmid38192461,
year = {2023},
author = {Xiao, J and Chang, C and Wu, P and Ma, Y},
title = {Attribute identification based IoT fog data security control and forwarding.},
journal = {PeerJ. Computer science},
volume = {9},
number = {},
pages = {e1747},
pmid = {38192461},
issn = {2376-5992},
abstract = {As Internet of Things (IoT) applications continue to proliferate, traditional cloud computing is increasingly unable to meet the low-latency demands of these applications. The IoT fog architecture solves this limitation by introducing fog servers in the fog layer that are closer to the IoT devices. However, this architecture lacks authentication mechanisms for information sources, security verification for information transmission, and reasonable allocation of fog nodes. To ensure the secure transmission of end-to-end information in the IoT fog architecture, an attribute identification based security control and forwarding method for IoT fog data (AISCF) is proposed. AISCF applies attribute signatures to the IoT fog architecture and uses software defined network (SDN) to control and forward fog layer data flows. Firstly, IoT devices add attribute identifiers to the data they send based on attribute features. The ingress switch then performs fine-grained access control on the data based on these attribute identifiers. Secondly, SDN uses attribute features as flow table matching items to achieve fine-grained control and forwarding of fog layer data flows based on attribute identifiers. Lastly, the egress switch dynamically samples data flows and verifies the attribute signatures of the sampled data packets at the controller end. Experimental validation has demonstrated that AISCF can effectively detect attacks such as data tampering and forged matching items. Moreover, AISCF imposes minimal overhead on network throughput, CPU utilization and packet forwarding latency, and has practicality in IoT fog architecture.},
}
@article {pmid38191935,
year = {2024},
author = {Renton, AI and Dao, TT and Johnstone, T and Civier, O and Sullivan, RP and White, DJ and Lyons, P and Slade, BM and Abbott, DF and Amos, TJ and Bollmann, S and Botting, A and Campbell, MEJ and Chang, J and Close, TG and Dörig, M and Eckstein, K and Egan, GF and Evas, S and Flandin, G and Garner, KG and Garrido, MI and Ghosh, SS and Grignard, M and Halchenko, YO and Hannan, AJ and Heinsfeld, AS and Huber, L and Hughes, ME and Kaczmarzyk, JR and Kasper, L and Kuhlmann, L and Lou, K and Mantilla-Ramos, YJ and Mattingley, JB and Meier, ML and Morris, J and Narayanan, A and Pestilli, F and Puce, A and Ribeiro, FL and Rogasch, NC and Rorden, C and Schira, MM and Shaw, TB and Sowman, PF and Spitz, G and Stewart, AW and Ye, X and Zhu, JD and Narayanan, A and Bollmann, S},
title = {Neurodesk: an accessible, flexible and portable data analysis environment for reproducible neuroimaging.},
journal = {Nature methods},
volume = {},
number = {},
pages = {},
pmid = {38191935},
issn = {1548-7105},
abstract = {Neuroimaging research requires purpose-built analysis software, which is challenging to install and may produce different results across computing environments. The community-oriented, open-source Neurodesk platform (https://www.neurodesk.org/) harnesses a comprehensive and growing suite of neuroimaging software containers. Neurodesk includes a browser-accessible virtual desktop, command-line interface and computational notebook compatibility, allowing for accessible, flexible, portable and fully reproducible neuroimaging analysis on personal workstations, high-performance computers and the cloud.},
}
@article {pmid38187735,
year = {2023},
author = {Moctezuma, L and Rivera, LB and van Nouhuijs, F and Orcales, F and Kim, A and Campbell, R and Fuse, M and Pennings, PS},
title = {Using a decision tree to predict COVID case numbers: a tutorial for beginners.},
journal = {bioRxiv : the preprint server for biology},
volume = {},
number = {},
pages = {},
pmid = {38187735},
support = {T32 GM142515/GM/NIGMS NIH HHS/United States ; T34 GM008574/GM/NIGMS NIH HHS/United States ; },
abstract = {Machine learning (ML) makes it possible to analyze large volumes of data and is an important tool in biomedical research. The use of ML methods can lead to improvements in diagnosis, treatment, and prevention of diseases. During the COVID pandemic, ML methods were used for predictions at the patient and community levels. Given the ubiquity of ML, it is important that future doctors, researchers and teachers get acquainted with ML and its contributions to research. Our goal is to make it easier for students and their professors to learn about ML. The learning module we present here is based on a small but relevant COVID dataset, videos, annotated code and the use of cloud computing platforms. The benefit of cloud computing platforms is that students don't have to set up a coding environment on their computer. This saves time and is also an important democratization factor - allowing students to use old or borrowed computers (e.g., from a library), tablets or Chromebooks. As a result, this will benefit colleges geared toward underserved populations with limited computing infrastructure. We developed a beginner-friendly module focused on learning the basics of decision trees by applying them to COVID tabular data. It introduces students to basic terminology used in supervised ML and its relevance to research. The module includes two Python notebooks with pre-written code, one with practice exercises and another with its solutions. Our experience with biology students at San Francisco State University suggests that the material increases interest in ML.},
}
@article {pmid38183538,
year = {2024},
author = {Indraja, G and Aashi, A and Vema, VK},
title = {Spatial and temporal classification and prediction of LULC in Brahmani and Baitarni basin using integrated cellular automata models.},
journal = {Environmental monitoring and assessment},
volume = {196},
number = {2},
pages = {117},
pmid = {38183538},
issn = {1573-2959},
mesh = {*Cellular Automata ; *Ecosystem ; Environmental Monitoring ; Algorithms ; Agriculture ; },
abstract = {Monitoring the dynamics of land use and land cover (LULC) is imperative in the changing climate and evolving urbanization patterns worldwide. The shifts in land use have a significant impact on the hydrological response of watersheds across the globe. Several studies have applied machine learning (ML) algorithms using historical LULC maps along with elevation data and slope for predicting future LULC projections. However, the influence of other driving factors such as socio-economic and climatological factors has not been thoroughly explored. In the present study, a sensitivity analysis approach was adopted to understand the effect of both physical (elevation, slope, aspect, etc.) and socio-economic factors such as population density, distance to built-up, and distance to road and rail, as well as climatic factors (mean precipitation) on the accuracy of LULC prediction in the Brahmani and Baitarni (BB) basin of Eastern India. Additionally, in the absence of the recent LULC maps of the basin, three ML algorithms, i.e., random forest (RF), classified and regression trees (CART), and support vector machine (SVM) were utilized for LULC classification for the years 2007, 2014, and 2021 on Google earth engine (GEE) cloud computing platform. Among the three algorithms, RF performed best for classifying built-up areas along with all the other classes as compared to CART and SVM. The prediction results revealed that the proximity to built-up and population growth dominates in modeling LULC over physical factors such as elevation and slope. The analysis of historical data revealed an increase of 351% in built-up areas over the past years (2007-2021), with a corresponding decline in forest and water areas by 12% and 36% respectively. While the future predictions highlighted an increase in built-up class ranging from 11 to 38% during the years 2028-2070, the forested areas are anticipated to decline by 4 to 16%. The overall findings of the present study suggested that the BB basin, despite being primarily agricultural with a significant forest cover, is undergoing rapid expansion of built-up areas through the encroachment of agricultural and forested lands, which could have far-reaching implications for the region's ecosystem services and sustainability.},
}
@article {pmid38179578,
year = {2023},
author = {Pelofske, E and Hahn, G and Djidjev, H},
title = {Initial State Encoding via Reverse Quantum Annealing and H-Gain Features.},
journal = {IEEE transactions on quantum engineering},
volume = {4},
number = {},
pages = {},
pmid = {38179578},
issn = {2689-1808},
support = {R01 AI154470/AI/NIAID NIH HHS/United States ; U01 HL089897/HL/NHLBI NIH HHS/United States ; R21 HD095228/HD/NICHD NIH HHS/United States ; P30 ES002109/ES/NIEHS NIH HHS/United States ; U01 HG008685/HG/NHGRI NIH HHS/United States ; P01 HL132825/HL/NHLBI NIH HHS/United States ; U01 HL089856/HL/NHLBI NIH HHS/United States ; P01 HL120839/HL/NHLBI NIH HHS/United States ; },
abstract = {Quantum annealing is a specialized type of quantum computation that aims to use quantum fluctuations in order to obtain global minimum solutions of combinatorial optimization problems. Programmable D-Wave quantum annealers are available as cloud computing resources, which allow users low-level access to quantum annealing control features. In this article, we are interested in improving the quality of the solutions returned by a quantum annealer by encoding an initial state into the annealing process. We explore twoD-Wave features that allow one toencode such an initialstate: the reverse annealing (RA) and theh-gain(HG)features.RAaimstorefineaknownsolutionfollowinganannealpathstartingwithaclassical state representing a good solution, going backward to a point where a transverse field is present, and then finishing the annealing process with a forward anneal. The HG feature allows one to put a time-dependent weighting scheme on linear (h) biases of the Hamiltonian, and we demonstrate that this feature likewise can be used to bias the annealing to start from an initial state. We also consider a hybrid method consisting of a backward phase resembling RA and a forward phase using the HG initial state encoding. Importantly, we investigate the idea of iteratively applying RA and HG to a problem, with the goal of monotonically improving on an initial state that is not optimal. The HG encoding technique is evaluated on a variety of input problems including the edge-weighted maximum cut problem and the vertex-weighted maximum clique problem, demonstrating that the HG technique is a viable alternative to RA for some problems. We also investigate how the iterative procedures perform for both RA and HG initial state encodings on random whole-chip spin glasses with the native hardware connectivity of the D-Wave Chimera and Pegasus chips.},
}
@article {pmid38178510,
year = {2023},
author = {Xu, X and Lu, Y and Huang, Y and Zhou, X and Ma, R and Xiong, H and Li, M and Wu, Q and Xu, J},
title = {Frequency modulation of terahertz microcavity via strong coupling with plasmonic resonators.},
journal = {Optics express},
volume = {31},
number = {26},
pages = {44375-44384},
doi = {10.1364/OE.510365},
pmid = {38178510},
issn = {1094-4087},
abstract = {Tunable terahertz (THz) microcavities are crucial for the compact on-chip THz devices, aiming to future cloud-based computing, and artificial-intelligence technologies. However, the solutions to effectively modulate THz microcavities remain elusive. Strong coupling has been widely demonstrated in many configurations at different ambient conditions to date and may serve as a promising tool to modulate THz microcavities. Here, we schematically design a microcavity-plasmon hybrid system, and propose an effective approach to modulating the resonant frequencies of THz microcavities by the microcavity-resonator strong coupling. In this case, we observed the strongly coupling states, where the resultant two-polariton branches exhibit an anti-crossing splitting in the frequency domain, experimentally exhibiting a ∼6.2% frequency modulation to the microcavity compared to the uncoupled case. This work provides an efficient approach to modulating chip-scale THz microcavities, thereby facilitating the development and application of compact THz integrated devices, further empowering the evolution of future information processing and intelligent computing system.},
}
@article {pmid38167901,
year = {2024},
author = {DeWitt, PE and Rebull, MA and Bennett, TD},
title = {Open source and reproducible and inexpensive infrastructure for data challenges and education.},
journal = {Scientific data},
volume = {11},
number = {1},
pages = {8},
pmid = {38167901},
issn = {2052-4463},
support = {K23 HD074620/HD/NICHD NIH HHS/United States ; R03 HD094912/HD/NICHD NIH HHS/United States ; },
abstract = {Data sharing is necessary to maximize the actionable knowledge generated from research data. Data challenges can encourage secondary analyses of datasets. Data challenges in biomedicine often rely on advanced cloud-based computing infrastructure and expensive industry partnerships. Examples include challenges that use Google Cloud virtual machines and the Sage Bionetworks Dream Challenges platform. Such robust infrastructures can be financially prohibitive for investigators without substantial resources. Given the potential to develop scientific and clinical knowledge and the NIH emphasis on data sharing and reuse, there is a need for inexpensive and computationally lightweight methods for data sharing and hosting data challenges. To fill that gap, we developed a workflow that allows for reproducible model training, testing, and evaluation. We leveraged public GitHub repositories, open-source computational languages, and Docker technology. In addition, we conducted a data challenge using the infrastructure we developed. In this manuscript, we report on the infrastructure, workflow, and data challenge results. The infrastructure and workflow are likely to be useful for data challenges and education.},
}
@article {pmid38166081,
year = {2024},
author = {Tian, Z and Qiu, L and Wang, L},
title = {Drivers and influencers of blockchain and cloud-based business sustainability accounting in China: Enhancing practices and promoting adoption.},
journal = {PloS one},
volume = {19},
number = {1},
pages = {e0295802},
pmid = {38166081},
issn = {1932-6203},
abstract = {The field of sustainability accounting aims to integrate environmental, social, and governance factors into financial reporting. With the growing importance of sustainability practices, emerging technologies have the potential to revolutionize reporting methods. However, there is a lack of research on the factors influencing the adoption of blockchain and cloud-based sustainability accounting in China. This study employs a mixed-methods approach to examine the key drivers and barriers to technology adoption for sustainability reporting among Chinese businesses. Through a systematic literature review, gaps in knowledge were identified. Primary data was collected through an online survey of firms, followed by in-depth case studies. The findings of the study reveal a positive relationship between company size and reporting behaviors. However, size alone is not sufficient to predict outcomes accurately. The industry type also has significant but small effects, although its impact on reporting behaviors varies. The relationship between profitability and reporting behaviors is intricate and contingent, requiring contextual examination. The adoption of blockchain technology is positively associated with capabilities, resources, skills, and regulatory factors. On the other hand, cloud computing adoption is linked to resources, management support, and risk exposures. However, the specific impacts of industry on adoption remain inconclusive. This study aims to offer empirical validation of relationships, shedding light on the intricate nature of interactions that necessitate nuanced conceptualizations incorporating contextual moderators. The findings underscore the importance of providing customized support and adaptable guidance to accommodate the evolving practices in sustainability accounting. Moreover, the assimilation of technology and organizational changes highlights the need for multifaceted stakeholder cooperation to drive responsible innovation and address the challenges posed by digital transformations in this field.},
}
@article {pmid38166050,
year = {2024},
author = {Alourani, A and Khalid, A and Tahir, M and Sardaraz, M},
title = {Energy efficient virtual machines placement in cloud datacenters using genetic algorithm and adaptive thresholds.},
journal = {PloS one},
volume = {19},
number = {1},
pages = {e0296399},
pmid = {38166050},
issn = {1932-6203},
mesh = {*Conservation of Energy Resources ; *Algorithms ; Cloud Computing ; },
abstract = {Cloud computing platform provides on-demand IT services to users and advanced the technology. The purpose of virtualization is to improve the utilization of resources and reduce power consumption. Energy consumption is a major issue faced by data centers management. Virtual machine placement is an effective technique used for this purpose. Different algorithms have been proposed for virtual machine placement in cloud environments. These algorithms have considered different parameters. It is obvious that improving one parameter affects other parameters. There is still a need to reduce energy consumption in cloud data centers. Data centers need solutions that reduce energy consumption without affecting other parameters. There is a need to device solutions to effectively utilize cloud resources and reduce energy consumption. In this article, we present an algorithm for Virtual Machines (VMs) placement in cloud computing. The algorithm uses adaptive thresholding to identify over utilized and underutilized hosts to reduce energy consumption and Service Level Agreement (SLA) violations. The algorithm is validated with simulations and comparative results are presented.},
}
@article {pmid38161217,
year = {2024},
author = {Zhang, X and Dou, Z and Kim, SH and Upadhyay, G and Havert, D and Kang, S and Kazemi, K and Huang, KY and Aydin, O and Huang, R and Rahman, S and Ellis-Mohr, A and Noblet, HA and Lim, KH and Chung, HJ and Gritton, HJ and Saif, MTA and Kong, HJ and Beggs, JM and Gazzola, M},
title = {Mind In Vitro Platforms: Versatile, Scalable, Robust, and Open Solutions to Interfacing with Living Neurons.},
journal = {Advanced science (Weinheim, Baden-Wurttemberg, Germany)},
volume = {11},
number = {11},
pages = {e2306826},
pmid = {38161217},
issn = {2198-3844},
support = {2123781//National Science Foundation/ ; 1830881//National Science Foundation/ ; },
mesh = {Electrodes ; *Brain/physiology ; *Neurons/physiology ; Electric Stimulation ; Electrophysiological Phenomena/physiology ; },
abstract = {Motivated by the unexplored potential of in vitro neural systems for computing and by the corresponding need of versatile, scalable interfaces for multimodal interaction, an accurate, modular, fully customizable, and portable recording/stimulation solution that can be easily fabricated, robustly operated, and broadly disseminated is presented. This approach entails a reconfigurable platform that works across multiple industry standards and that enables a complete signal chain, from neural substrates sampled through micro-electrode arrays (MEAs) to data acquisition, downstream analysis, and cloud storage. Built-in modularity supports the seamless integration of electrical/optical stimulation and fluidic interfaces. Custom MEA fabrication leverages maskless photolithography, favoring the rapid prototyping of a variety of configurations, spatial topologies, and constitutive materials. Through a dedicated analysis and management software suite, the utility and robustness of this system are demonstrated across neural cultures and applications, including embryonic stem cell-derived and primary neurons, organotypic brain slices, 3D engineered tissue mimics, concurrent calcium imaging, and long-term recording. Overall, this technology, termed "mind in vitro" to underscore the computing inspiration, provides an end-to-end solution that can be widely deployed due to its affordable (>10× cost reduction) and open-source nature, catering to the expanding needs of both conventional and unconventional electrophysiology.},
}
@article {pmid38155856,
year = {2023},
author = {Lai, H and Chen, B and Yin, X and Wang, G and Wang, X and Yun, T and Lan, G and Wu, Z and Yang, C and Kou, W},
title = {Dry season temperature and rainy season precipitation significantly affect the spatio-temporal pattern of rubber plantation phenology in Yunnan province.},
journal = {Frontiers in plant science},
volume = {14},
number = {},
pages = {1283315},
pmid = {38155856},
issn = {1664-462X},
abstract = {The ongoing global warming trajectory poses extensive challenges to plant ecosystems, with rubber plantations particularly vulnerable due to their influence on not only the longevity of the growth cycle and rubber yield, but also the complex interplay of carbon, water, and energy exchanges between the forest canopy and atmosphere. However, the response mechanism of phenology in rubber plantations to climate change remains unclear. This study concentrates on sub-optimal environment rubber plantations in Yunnan province, Southwest China. Utilizing the Google Earth Engine (GEE) cloud platform, multi-source remote sensing images were synthesized at 8-day intervals with a spatial resolution of 30-meters. The Normalized Difference Vegetation Index (NDVI) time series was reconstructed using the Savitzky-Golay (S-G) filter, coupled with the application of the seasonal amplitude method to extract three crucial phenological indicators, namely the start of the growing season (SOS), the end of the growing season (EOS), and the length of the growing season (LOS). Linear regression method, Pearson correlation coefficient, multiple stepwise regression analysis were used to extract of the phenology trend and find the relationship between SOS, EOS and climate factors. The findings demonstrated that 1) the phenology of rubber plantations has undergone dynamic changes over the past two decades. Specifically, the SOS advanced by 9.4 days per decade (R[2] = 0.42, p< 0.01), whereas the EOS was delayed by 3.8 days per decade (R[2] = 0.35, p< 0.01). Additionally, the LOS was extended by 13.2 days per decade (R[2] = 0.55, p< 0.01); 2) rubber phenology demonstrated a notable sensitivity to temperature fluctuations during the dry season and precipitation patterns during the rainy season. The SOS advanced 2.0 days (r =-0.19, p< 0.01) and the EOS advanced 2.8 days (r =-0.35, p< 0.01) for every 1°C increase in the cool-dry season. Whereas a 100 mm increase in rainy season precipitation caused the SOS to be delayed by 2.0 days (r = 0.24, p< 0.01), a 100 mm increase in hot-dry season precipitation caused the EOS to be advanced by 7.0 days (r =-0.28, p< 0.01); 3) rubber phenology displayed a legacy effect of preseason climate variations. Changes in temperature during the fourth preseason month and precipitation during the fourth and eleventh preseason months are predominantly responsible for the variation in SOS. Meanwhile, temperature changes during the second, fourth, and ninth preseason months are primarily responsible for the variation in EOS. The study aims to enhance our understanding of how rubber plantations respond to climate change in sub-optimal environments and provide valuable insights for sustainable rubber production management in the face of changing environmental conditions.},
}
@article {pmid38151930,
year = {2023},
author = {Wang, X and Li, Q and Ma, C and Zhang, S and Lin, Y and Li, J and Liu, C},
title = {[Artificial intelligence in wearable electrocardiogram monitoring].},
journal = {Sheng wu yi xue gong cheng xue za zhi = Journal of biomedical engineering = Shengwu yixue gongchengxue zazhi},
volume = {40},
number = {6},
pages = {1084-1092},
pmid = {38151930},
issn = {1001-5515},
mesh = {Humans ; Artificial Intelligence ; Reproducibility of Results ; Electrocardiography ; *Cardiovascular Diseases ; *Wearable Electronic Devices ; },
abstract = {Electrocardiogram (ECG) monitoring owns important clinical value in diagnosis, prevention and rehabilitation of cardiovascular disease (CVD). With the rapid development of Internet of Things (IoT), big data, cloud computing, artificial intelligence (AI) and other advanced technologies, wearable ECG is playing an increasingly important role. With the aging process of the population, it is more and more urgent to upgrade the diagnostic mode of CVD. Using AI technology to assist the clinical analysis of long-term ECGs, and thus to improve the ability of early detection and prediction of CVD has become an important direction. Intelligent wearable ECG monitoring needs the collaboration between edge and cloud computing. Meanwhile, the clarity of medical scene is conducive for the precise implementation of wearable ECG monitoring. This paper first summarized the progress of AI-related ECG studies and the current technical orientation. Then three cases were depicted to illustrate how the AI in wearable ECG cooperate with the clinic. Finally, we demonstrated the two core issues-the reliability and worth of AI-related ECG technology and prospected the future opportunities and challenges.},
}
@article {pmid38146308,
year = {2024},
author = {Singh, S and Hou, F and Wang, R},
title = {Real and synthetic Punjabi speech datasets for automatic speech recognition.},
journal = {Data in brief},
volume = {52},
number = {},
pages = {109865},
doi = {10.1016/j.dib.2023.109865},
pmid = {38146308},
issn = {2352-3409},
abstract = {Automatic speech recognition (ASR) has been an active area of research. Training with large annotated datasets is the key to the development of robust ASR systems. However, most available datasets are focused on high-resource languages like English, leaving a significant gap for low-resource languages. Among these languages is Punjabi, despite its large number of speakers, Punjabi lacks high-quality annotated datasets for accurate speech recognition. To address this gap, we introduce three labeled Punjabi speech datasets: Punjabi Speech (real speech dataset) and Google-synth/CMU-synth (synthesized speech datasets). The Punjabi Speech dataset consists of read speech recordings captured in various environments, including both studio and open settings. In addition, the Google-synth dataset is synthesized using Google's Punjabi text-to-speech cloud services. Furthermore, the CMU-synth dataset is created using the Clustergen model available in the Festival speech synthesis system developed by CMU. These datasets aim to facilitate the development of accurate Punjabi speech recognition systems, bridging the resource gap for this important language.},
}
@article {pmid38140780,
year = {2023},
author = {Li, B and Du, K and Qu, G and Tang, N},
title = {Big data research in nursing: A bibliometric exploration of themes and publications.},
journal = {Journal of nursing scholarship : an official publication of Sigma Theta Tau International Honor Society of Nursing},
volume = {},
number = {},
pages = {},
doi = {10.1111/jnu.12954},
pmid = {38140780},
issn = {1547-5069},
support = {22A320067//the Key Research Project in Higher Education in Henan, China/ ; SBGJ202103076//Medical science and technology public relations project jointly built by Henan Health Commission/ ; HLKY2023002//Nursing research Special Fund of the First Affiliated Hospital of Zhengzhou University/ ; },
abstract = {AIMS: To comprehend the current research hotspots and emerging trends in big data research within the global nursing domain.
DESIGN: Bibliometric analysis.
METHODS: The quality articles for analysis indexed by the science core collection were obtained from the Web of Science database as of February 10, 2023.The descriptive, visual analysis and text mining were realized by CiteSpace and VOSviewer.
RESULTS: The research on big data in the nursing field has experienced steady growth over the past decade. A total of 45 core authors and 17 core journals around the world have contributed to this field. The author's keyword analysis has revealed five distinct clusters of research focus. These encompass machine/deep learning and artificial intelligence, natural language processing, big data analytics and data science, IoT and cloud computing, and the development of prediction models through data mining. Furthermore, a comparative examination was conducted with data spanning from 1980 to 2016, and an extended analysis was performed covering the years from 1980 to 2019. This bibliometric mapping comparison allowed for the identification of prevailing research trends and the pinpointing of potential future research hotspots within the field.
CONCLUSIONS: The fusion of data mining and nursing research has steadily advanced and become more refined over time. Technologically, it has expanded from initial natural language processing to encompass machine learning, deep learning, artificial intelligence, and data mining approach that amalgamates multiple technologies. Professionally, it has progressed from addressing patient safety and pressure ulcers to encompassing chronic diseases, critical care, emergency response, community and nursing home settings, and specific diseases (Cardiovascular diseases, diabetes, stroke, etc.). The convergence of IoT, cloud computing, fog computing, and big data processing has opened new avenues for research in geriatric nursing management and community care. However, a global imbalance exists in utilizing big data in nursing research, emphasizing the need to enhance data science literacy among clinical staff worldwide to advance this field.
CLINICAL RELEVANCE: This study focused on the thematic trends and evolution of research on the big data in nursing research. Moreover, this study may contribute to the understanding of researchers, journals, and countries around the world and generate the possible collaborations of them to promote the development of big data in nursing science.},
}
@article {pmid38139731,
year = {2023},
author = {Yang, X and Fang, H and Gao, Y and Wang, X and Wang, K and Liu, Z},
title = {Computation Offloading and Resource Allocation Based on P-DQN in LEO Satellite Edge Networks.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {24},
pages = {},
pmid = {38139731},
issn = {1424-8220},
support = {2020YFB1808003//National Key Research and Development Program of China/ ; 61801379//National Natural Science Foundation of China/ ; 2020JQ-647//Natural Science Foundation of Shaanxi Province of China/ ; },
abstract = {Traditional low earth orbit (LEO) satellite networks are typically independent of terrestrial networks, which develop relatively slowly due to the on-board capacity limitation. By integrating emerging mobile edge computing (MEC) with LEO satellite networks to form the business-oriented "end-edge-cloud" multi-level computing architecture, some computing-sensitive tasks can be offloaded by ground terminals to satellites, thereby satisfying more tasks in the network. How to make computation offloading and resource allocation decisions in LEO satellite edge networks, nevertheless, indeed poses challenges in tracking network dynamics and handling sophisticated actions. For the discrete-continuous hybrid action space and time-varying networks, this work aims to use the parameterized deep Q-network (P-DQN) for the joint computation offloading and resource allocation. First, the characteristics of time-varying channels are modeled, and then both communication and computation models under three different offloading decisions are constructed. Second, the constraints on task offloading decisions, on remaining available computing resources, and on the power control of LEO satellites as well as the cloud server are formulated, followed by the maximization problem of satisfied task number over the long run. Third, using the parameterized action Markov decision process (PAMDP) and P-DQN, the joint computing offloading, resource allocation, and power control are made in real time, to accommodate dynamics in LEO satellite edge networks and dispose of the discrete-continuous hybrid action space. Simulation results show that the proposed P-DQN method could approach the optimal control, and outperforms other reinforcement learning (RL) methods for merely either discrete or continuous action space, in terms of the long-term rate of satisfied tasks.},
}
@article {pmid38139716,
year = {2023},
author = {Aldaej, A and Ahanger, TA and Ullah, I},
title = {Deep Learning-Inspired IoT-IDS Mechanism for Edge Computing Environments.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {24},
pages = {},
pmid = {38139716},
issn = {1424-8220},
support = {2022/01/21723//Prince Sattam Bin Abdulaziz University/ ; },
abstract = {The Internet of Things (IoT) technology has seen substantial research in Deep Learning (DL) techniques to detect cyberattacks. Critical Infrastructures (CIs) must be able to quickly detect cyberattacks close to edge devices in order to prevent service interruptions. DL approaches outperform shallow machine learning techniques in attack detection, giving them a viable alternative for use in intrusion detection. However, because of the massive amount of IoT data and the computational requirements for DL models, transmission overheads prevent the successful implementation of DL models closer to the devices. As they were not trained on pertinent IoT, current Intrusion Detection Systems (IDS) either use conventional techniques or are not intended for scattered edge-cloud deployment. A new edge-cloud-based IoT IDS is suggested to address these issues. It uses distributed processing to separate the dataset into subsets appropriate to different attack classes and performs attribute selection on time-series IoT data. Next, DL is used to train an attack detection Recurrent Neural Network, which consists of a Recurrent Neural Network (RNN) and Bidirectional Long Short-Term Memory (LSTM). The high-dimensional BoT-IoT dataset, which replicates massive amounts of genuine IoT attack traffic, is used to test the proposed model. Despite an 85 percent reduction in dataset size made achievable by attribute selection approaches, the attack detection capability was kept intact. The models built utilizing the smaller dataset demonstrated a higher recall rate (98.25%), F1-measure (99.12%), accuracy (99.56%), and precision (99.45%) with no loss in class discrimination performance compared to models trained on the entire attribute set. With the smaller attribute space, neither the RNN nor the Bi-LSTM models experienced underfitting or overfitting. The proposed DL-based IoT intrusion detection solution has the capability to scale efficiently in the face of large volumes of IoT data, thus making it an ideal candidate for edge-cloud deployment.},
}
@article {pmid38139704,
year = {2023},
author = {Peixoto, J and Sousa, J and Carvalho, R and Santos, G and Cardoso, R and Reis, A},
title = {End-to-End Solution for Analog Gauge Monitoring Using Computer Vision in an IoT Platform.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {24},
pages = {},
pmid = {38139704},
issn = {1424-8220},
support = {POCI-01-0247-FEDER-047091-GRS: Glartek Retrofit Sensors//Fundo Europeu de Desenvolvimento Regional (FEDER)/ ; },
abstract = {The emergence of Industry 4.0 and 5.0 technologies has enabled the digital transformation of various processes and the integration of sensors with the internet. Despite these strides, many industrial sectors still rely on visual inspection of physical processes, especially those employing analog gauges. This method of monitoring introduces the risk of human errors and inefficiencies. Automating these processes has the potential, not only to boost productivity for companies, but also potentially reduce risks for workers. Therefore, this paper proposes an end-to-end solution to digitize analog gauges and monitor them using computer vision through integrating them into an IoT architecture, to tackle these problems. Our prototype device has been designed to capture images of gauges and transmit them to a remote server, where computer vision algorithms analyze the images and obtain gauge readings. These algorithms achieved adequate robustness and accuracy for industrial environments, with an average relative error of 0.95%. In addition, the gauge data were seamlessly integrated into an IoT platform leveraging computer vision and cloud computing technologies. This integration empowers users to create custom dashboards for real-time gauge monitoring, while also enabling them to set thresholds, alarms, and warnings, as needed. The proposed solution was tested and validated in a real-world industrial scenario, demonstrating the solution's potential to be implemented in a large-scale setting to serve workers, reduce costs, and increase productivity.},
}
@article {pmid38139612,
year = {2023},
author = {Ju, S and Park, Y},
title = {Provably Secure Lightweight Mutual Authentication and Key Agreement Scheme for Cloud-Based IoT Environments.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {24},
pages = {},
pmid = {38139612},
issn = {1424-8220},
support = {2022//Keimyung University/ ; },
abstract = {A paradigm that combines cloud computing and the Internet of Things (IoT) allows for more impressive services to be provided to users while addressing storage and computational resource issues in the IoT environments. This cloud-based IoT environment has been used in various industries, including public services, for quite some time, and has been researched in academia. However, various security issues can arise during the communication between IoT devices and cloud servers, because communication between devices occurs in open channels. Moreover, issues such as theft of a user's IoT device or extraction of key parameters from the user's device in a remote location can arise. Researchers interested in these issues have proposed lightweight mutual authentication key agreement protocols that are safe and suitable for IoT environments. Recently, a lightweight authentication scheme between IoT devices and cloud servers has been presented. However, we found out their scheme had various security vulnerabilities, vulnerable to insider, impersonation, verification table leakage, and privileged insider attacks, and did not provide users with untraceability. To address these flaws, we propose a provably secure lightweight authentication scheme. The proposed scheme uses the user's biometric information and the cloud server's secret key to prevent the exposure of key parameters. Additionally, it ensures low computational costs for providing users with real-time and fast services using only exclusive OR operations and hash functions in the IoT environments. To analyze the safety of the proposed scheme, we use informal security analysis, Burrows-Abadi-Needham (BAN) logic and a Real-or-Random (RoR) model. The analysis results confirm that our scheme is secure against insider attacks, impersonation attacks, stolen verifier attacks, and so on; furthermore, it provides additional security elements. Simultaneously, it has been verified to possess enhanced communication costs, and total bit size has been shortened to 3776 bits, which is improved by almost 6% compared to Wu et al.'s scheme. Therefore, we demonstrate that the proposed scheme is suitable for cloud-based IoT environments.},
}
@article {pmid38139476,
year = {2023},
author = {Zhang, T and Fan, Y},
title = {A 3D U-Net Based on a Vision Transformer for Radar Semantic Segmentation.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {24},
pages = {},
pmid = {38139476},
issn = {1424-8220},
support = {61976033//National Natural Science Foundation of China/ ; 2022JH24/10200029//Pilot Base Construction and Pilot Verification Plan Program of Liaoning Province of China/ ; 2019JH8/10100100//Key Development Guidance Program of Liaoning Province of China/ ; 2022M710569//China Postdoctoral Science Foundation/ ; },
abstract = {Radar data can be presented in various forms, unlike visible data. In the field of radar target recognition, most current work involves point cloud data due to computing limitations, but this form of data lacks useful information. This paper proposes a semantic segmentation network to process high-dimensional data and enable automatic radar target recognition. Rather than relying on point cloud data, which is common in current radar automatic target recognition algorithms, the paper suggests using a radar heat map of high-dimensional data to increase the efficiency of radar data use. The radar heat map provides more complete information than point cloud data, leading to more accurate classification results. Additionally, this paper proposes a dimension collapse module based on a vision transformer for feature extraction between two modules with dimension differences during dimension changes in high-dimensional data. This module is easily extendable to other networks with high-dimensional data collapse requirements. The network's performance is verified using a real radar dataset, showing that the radar semantic segmentation network based on a vision transformer has better performance and fewer parameters compared to segmentation networks that use other dimensional collapse methods.},
}
@article {pmid38136978,
year = {2023},
author = {Song, Y and Zhong, S and Li, Y and Jiang, M and Wei, Q},
title = {Constructing an Interactive and Integrated Analysis and Identification Platform for Pathogenic Microorganisms to Support Surveillance Capacity.},
journal = {Genes},
volume = {14},
number = {12},
pages = {},
pmid = {38136978},
issn = {2073-4425},
support = {2022YFC2602200//Supported by National Key Research and Development Program of China/ ; },
mesh = {*Software ; *User-Computer Interface ; Genomics/methods ; Computational Biology/methods ; Genome ; },
abstract = {INTRODUCTION: Whole genome sequencing (WGS) holds significant promise for epidemiological inquiries, as it enables the identification and tracking of pathogenic origins and dissemination through comprehensive genome analysis. This method is widely preferred for investigating outbreaks and monitoring pathogen activity. However, the effective utilization of microbiome sequencing data remains a challenge for clinical and public health experts. Through the National Pathogen Resource Center, we have constructed a dynamic and interactive online analysis platform to facilitate the in-depth analysis and use of pathogen genomic data, by public health and associated professionals, to support infectious disease surveillance framework building and capacity warnings.
METHOD: The platform was implemented using the Java programming language, and the front-end pages were developed using the VUE framework, following the MVC (Model-View-Controller) pattern to enable interactive service functionalities for front-end data collection and back-end data computation. Cloud computing services were employed to integrate biological information analysis tools for conducting fundamental analysis on sequencing data.
RESULT: The platform achieved the goal of non-programming analysis, providing an interactive visual interface that allows users to visually obtain results by setting parameters in web pages. Moreover, the platform allows users to export results in various formats to further support their research.
DISCUSSION: We have established a dynamic and interactive online platform for bioinformatics analysis. By encapsulating the complex background experiments and analysis processes in a cloud-based service platform, the complex background experiments and analysis processes are presented to the end-user in a simple and interactive manner. It facilitates real-time data mining and analysis by allowing users to independently select parameters and generate analysis results at the click of a button, based on their needs, without the need for a programming foundation.},
}
@article {pmid38136521,
year = {2023},
author = {Xia, C and Jin, X and Xu, C and Zeng, P},
title = {Computational-Intelligence-Based Scheduling with Edge Computing in Cyber-Physical Production Systems.},
journal = {Entropy (Basel, Switzerland)},
volume = {25},
number = {12},
pages = {},
pmid = {38136521},
issn = {1099-4300},
support = {61903356//National Natural Science Foundation of China/ ; },
abstract = {Real-time performance and reliability are two critical indicators in cyber-physical production systems (CPPS). To meet strict requirements in terms of these indicators, it is necessary to solve complex job-shop scheduling problems (JSPs) and reserve considerable redundant resources for unexpected jobs before production. However, traditional job-shop methods are difficult to apply under dynamic conditions due to the uncertain time cost of transmission and computation. Edge computing offers an efficient solution to this issue. By deploying edge servers around the equipment, smart factories can achieve localized decisions based on computational intelligence (CI) methods offloaded from the cloud. Most works on edge computing have studied task offloading and dispatching scheduling based on CI. However, few of the existing methods can be used for behavior-level control due to the corresponding requirements for ultralow latency (10 ms) and ultrahigh reliability (99.9999% in wireless transmission), especially when unexpected computing jobs arise. Therefore, this paper proposes a dynamic resource prediction scheduling (DRPS) method based on CI to achieve real-time localized behavior-level control. The proposed DRPS method primarily focuses on the schedulability of unexpected computing jobs, and its core ideas are (1) to predict job arrival times based on a backpropagation neural network and (2) to perform real-time migration in the form of human-computer interaction based on the results of resource analysis. An experimental comparison with existing schemes shows that our DRPS method improves the acceptance ratio by 25.9% compared to the earliest deadline first scheme.},
}
@article {pmid38136475,
year = {2023},
author = {Kang, H and Liu, G and Wang, Q and Meng, L and Liu, J},
title = {Theory and Application of Zero Trust Security: A Brief Survey.},
journal = {Entropy (Basel, Switzerland)},
volume = {25},
number = {12},
pages = {},
pmid = {38136475},
issn = {1099-4300},
abstract = {As cross-border access becomes more frequent, traditional perimeter-based network security models can no longer cope with evolving security requirements. Zero trust is a novel paradigm for cybersecurity based on the core concept of "never trust, always verify". It attempts to protect against security risks related to internal threats by eliminating the demarcations between the internal and external network of traditional network perimeters. Nevertheless, research on the theory and application of zero trust is still in its infancy, and more extensive research is necessary to facilitate a deeper understanding of the paradigm in academia and the industry. In this paper, trust in cybersecurity is discussed, following which the origin, concepts, and principles related to zero trust are elaborated on. The characteristics, strengths, and weaknesses of the existing research are analysed in the context of zero trust achievements and their technical applications in Cloud and IoT environments. Finally, to support the development and application of zero trust in the future, the concept and its current challenges are analysed.},
}
@article {pmid38134209,
year = {2024},
author = {Wang, J and Hu, Y and Xiang, L and Morota, G and Brooks, SA and Wickens, CL and Miller-Cushon, EK and Yu, H},
title = {Technical note: ShinyAnimalCV: open-source cloud-based web application for object detection, segmentation, and three-dimensional visualization of animals using computer vision.},
journal = {Journal of animal science},
volume = {102},
number = {},
pages = {},
pmid = {38134209},
issn = {1525-3163},
mesh = {Animals ; *Cloud Computing ; *Imaging, Three-Dimensional/veterinary ; Software ; Computers ; Animal Husbandry ; Livestock ; },
abstract = {Computer vision (CV), a non-intrusive and cost-effective technology, has furthered the development of precision livestock farming by enabling optimized decision-making through timely and individualized animal care. The availability of affordable two- and three-dimensional camera sensors, combined with various machine learning and deep learning algorithms, has provided a valuable opportunity to improve livestock production systems. However, despite the availability of various CV tools in the public domain, applying these tools to animal data can be challenging, often requiring users to have programming and data analysis skills, as well as access to computing resources. Moreover, the rapid expansion of precision livestock farming is creating a growing need to educate and train animal science students in CV. This presents educators with the challenge of efficiently demonstrating the complex algorithms involved in CV. Thus, the objective of this study was to develop ShinyAnimalCV, an open-source cloud-based web application designed to facilitate CV teaching in animal science. This application provides a user-friendly interface for performing CV tasks, including object segmentation, detection, three-dimensional surface visualization, and extraction of two- and three-dimensional morphological features. Nine pre-trained CV models using top-view animal data are included in the application. ShinyAnimalCV has been deployed online using cloud computing platforms. The source code of ShinyAnimalCV is available on GitHub, along with detailed documentation on training CV models using custom data and deploying ShinyAnimalCV locally to allow users to fully leverage the capabilities of the application. ShinyAnimalCV can help to support the teaching of CV, thereby laying the groundwork to promote the adoption of CV in the animal science community.},
}
@article {pmid38133241,
year = {2023},
author = {Afonso, CL and Afonso, AM},
title = {Next-Generation Sequencing for the Detection of Microbial Agents in Avian Clinical Samples.},
journal = {Veterinary sciences},
volume = {10},
number = {12},
pages = {},
pmid = {38133241},
issn = {2306-7381},
abstract = {Direct-targeted next-generation sequencing (tNGS), with its undoubtedly superior diagnostic capacity over real-time PCR (RT-PCR), and direct-non-targeted NGS (ntNGS), with its higher capacity to identify and characterize multiple agents, are both likely to become diagnostic methods of choice in the future. tNGS is a rapid and sensitive method for precise characterization of suspected agents. ntNGS, also known as agnostic diagnosis, does not require a hypothesis and has been used to identify unsuspected infections in clinical samples. Implemented in the form of multiplexed total DNA metagenomics or as total RNA sequencing, the approach produces comprehensive and actionable reports that allow semi-quantitative identification of most of the agents present in respiratory, cloacal, and tissue samples. The diagnostic benefits of the use of direct tNGS and ntNGS are high specificity, compatibility with different types of clinical samples (fresh, frozen, FTA cards, and paraffin-embedded), production of nearly complete infection profiles (viruses, bacteria, fungus, and parasites), production of "semi-quantitative" information, direct agent genotyping, and infectious agent mutational information. The achievements of NGS in terms of diagnosing poultry problems are described here, along with future applications. Multiplexing, development of standard operating procedures, robotics, sequencing kits, automated bioinformatics, cloud computing, and artificial intelligence (AI) are disciplines converging toward the use of this technology for active surveillance in poultry farms. Other advances in human and veterinary NGS sequencing are likely to be adaptable to avian species in the future.},
}
@article {pmid38126383,
year = {2023},
author = {Fonseca, ELD and Santos, ECD and Figueiredo, AR and Simões, JC},
title = {The use of sentinel-2 imagery to generate vegetations maps for the Northern Antarctic peninsula and offshore islands.},
journal = {Anais da Academia Brasileira de Ciencias},
volume = {95},
number = {suppl 3},
pages = {e20230710},
doi = {10.1590/0001-3765202320230710},
pmid = {38126383},
issn = {1678-2690},
mesh = {Antarctic Regions ; *Plants ; *Bryophyta ; },
abstract = {We used Sentinel-2 imagery time series to generate a vegetation map for the Northern part of the Antarctica Peninsula and offshore islands, including the South Shetlands. The vegetation cover was identified in the NDVI maximum value composite image. The NDVI values were associated with the occurrence of algae (0.15 - 0.20), lichens (0.20 - 0.50), and mosses (0.50 - 0.80). The vegetation cover distribution map was validated using the literature information. Generating a vegetation map distribution on an annual basis was not possible due to high cloud cover in the Antarctic region, especially in coastal áreas, so optical images from 2016 to 2021 were necessary to map the vegetation distribution in the entire study área. The final map analyzed in association with the weather data shows the occurrence of a microenvironment over the western islands of the Antarctic Peninsula that provided vegetation growth conditions. The Sentinel-2 images with 10m spatial resolution allow the assembly of accurate vegetation distribution maps for the Antarctica Peninsula and Islands, the Google Earth Engine cloud computing being essential to process a large amount of the satellite images necessary for processing these maps.},
}
@article {pmid38124874,
year = {2023},
author = {Intelligence And Neuroscience, C},
title = {Retracted: Blockchain-Based Trust Management Framework for Cloud Computing-Based Internet of Medical Things (IoMT): A Systematic Review.},
journal = {Computational intelligence and neuroscience},
volume = {2023},
number = {},
pages = {9867976},
pmid = {38124874},
issn = {1687-5273},
abstract = {[This retracts the article DOI: 10.1155/2022/9766844.].},
}
@article {pmid38124577,
year = {2023},
author = {Niu, S and Liu, W and Yan, S and Liu, Q},
title = {Message sharing scheme based on edge computing in IoV.},
journal = {Mathematical biosciences and engineering : MBE},
volume = {20},
number = {12},
pages = {20809-20827},
doi = {10.3934/mbe.2023921},
pmid = {38124577},
issn = {1551-0018},
abstract = {With the rapid development of 5G wireless communication and sensing technology, the Internet of Vehicles (IoV) will establish a widespread network between vehicles and roadside infrastructure. The collected road information is transferred to the cloud server with the assistance of roadside infrastructure, where it is stored and made available to other vehicles as a resource. However, in an open cloud environment, message confidentiality and vehicle identity privacy are severely compromised, and current attribute-based encryption algorithms still burden vehicles with large computational costs. In order to resolve these issues, we propose a message-sharing scheme in IoV based on edge computing. To start, we utilize attribute-based encryption techniques to protect the communications being delivered. We introduce edge computing, in which the vehicle outsources some operations in encryption and decryption to roadside units to reduce the vehicle's computational load. Second, to guarantee the integrity of the message and the security of the vehicle identity, we utilize anonymous identity-based signature technology. At the same time, we can batch verify the message, which further reduces the time and transmission of verifying a large number of message signatures. Based on the computational Diffie-Hellman problem, it is demonstrated that the proposed scheme is secure under the random oracle model. Finally, the performance analysis results show that our work is more computationally efficient compared to existing schemes and is more suitable for actual vehicle networking.},
}
@article {pmid38114166,
year = {2023},
author = {Ma, XR and Wang, BX and Zhao, WS and Cong, DG and Sun, W and Xiong, HS and Zhang, SN},
title = {[Application progress on data-driven technologies in intelligent manufacturing of traditional Chinese medicine extraction].},
journal = {Zhongguo Zhong yao za zhi = Zhongguo zhongyao zazhi = China journal of Chinese materia medica},
volume = {48},
number = {21},
pages = {5701-5706},
doi = {10.19540/j.cnki.cjcmm.20230824.601},
pmid = {38114166},
issn = {1001-5302},
mesh = {*Medicine, Chinese Traditional ; *Drugs, Chinese Herbal ; Quality Control ; Big Data ; Algorithms ; },
abstract = {The application of new-generation information technologies such as big data, the internet of things(IoT), and cloud computing in the traditional Chinese medicine(TCM)manufacturing industry is gradually deepening, driving the intelligent transformation and upgrading of the TCM industry. At the current stage, there are challenges in understanding the extraction process and its mechanisms in TCM. Online detection technology faces difficulties in making breakthroughs, and data throughout the entire production process is scattered, lacking valuable mining and utilization, which significantly hinders the intelligent upgrading of the TCM industry. Applying data-driven technologies in the process of TCM extraction can enhance the understanding of the extraction process, achieve precise control, and effectively improve the quality of TCM products. This article analyzed the technological bottlenecks in the production process of TCM extraction, summarized commonly used data-driven algorithms in the research and production control of extraction processes, and reviewed the progress in the application of data-driven technologies in the following five aspects: mechanism analysis of the extraction process, process development and optimization, online detection, process control, and production management. This article is expected to provide references for optimizing the extraction process and intelligent production of TCM.},
}
@article {pmid38113434,
year = {2024},
author = {Brown, C and Agarwal, A and Luque, A},
title = {pyCapsid: identifying dominant dynamics and quasi-rigid mechanical units in protein shells.},
journal = {Bioinformatics (Oxford, England)},
volume = {40},
number = {1},
pages = {},
pmid = {38113434},
issn = {1367-4811},
support = {1951678//National Science Foundation/ ; GBMF9871//Gordon and Betty Moore Foundation/ ; },
mesh = {*Software ; *Proteins ; Amino Acids ; Documentation ; },
abstract = {SUMMARY: pyCapsid is a Python package developed to facilitate the characterization of the dynamics and quasi-rigid mechanical units of protein shells and other protein complexes. The package was developed in response to the rapid increase of high-resolution structures, particularly capsids of viruses, requiring multiscale biophysical analyses. Given a protein shell, pyCapsid generates the collective vibrations of its amino-acid residues, identifies quasi-rigid mechanical regions associated with the disassembly of the structure, and maps the results back to the input proteins for interpretation. pyCapsid summarizes the main results in a report that includes publication-quality figures.
pyCapsid's source code is available under MIT License on GitHub. It is compatible with Python 3.8-3.10 and has been deployed in two leading Python package-management systems, PIP and Conda. Installation instructions and tutorials are available in the online documentation and in the pyCapsid's YouTube playlist. In addition, a cloud-based implementation of pyCapsid is available as a Google Colab notebook. pyCapsid Colab does not require installation and generates the same report and outputs as the installable version. Users can post issues regarding pyCapsid in the repository's issues section.},
}
@article {pmid38113067,
year = {2023},
author = {Faisal, S and Samoth, D and Aslam, Y and Patel, H and Park, S and Baby, B and Patel, T},
title = {Key Features of Smart Medication Adherence Products: Updated Scoping Review.},
journal = {JMIR aging},
volume = {6},
number = {},
pages = {e50990},
pmid = {38113067},
issn = {2561-7605},
abstract = {BACKGROUND: Older adults often face challenges in self-managing their medication owing to physical and cognitive limitations, complex medication regimens, and packaging of medications. Emerging smart medication dispensing and adherence products (SMAPs) offer the options of automated dispensing, tracking medication intake in real time, and reminders and notifications. A 2021 review identified 51 SMAPs owing to the rapid influx of digital technology; an update to this review is required.
OBJECTIVE: This review aims to identify new products and summarize and compare the key features of SMAPs.
METHODS: Gray and published literature and videos were searched using Google, YouTube, PubMed, Embase, and Scopus. The first 10 pages of Google and the first 100 results of YouTube were screened using 4 and 5 keyword searches, respectively. SMAPs were included if they were able to store and allowed for the dispensation of medications, tracked real-time medication intake data, and could automatically analyze data. Products were excluded if they were stand-alone software applications, not marketed in English, not for in-home use, or only used in clinical trials. In total, 5 researchers independently screened and extracted the data.
RESULTS: This review identified 114 SMAPs, including 80 (70.2%) marketed and 34 (29.8%) prototypes, grouped into 15 types. Among the marketed products, 68% (54/80) were available for consumer purchase. Of these products, 26% (14/54) were available worldwide and 78% (42/54) were available in North America. There was variability in the hardware, software, data collection and management features, and cost of the products. Examples of hardware features include battery life, medication storage capacity, availability of types and number of alarms, locking features, and additional technology required for use of the product, whereas software features included reminder and notification capabilities and availability of manufacturer support. Data capture methods included the availability of sensors to record the use of the product and data-syncing capabilities with cloud storage with short-range communications. Data were accessible to users via mobile apps or web-based portals. Some SMAPs provided data security assurance with secure log-ins (use of personal identification numbers or facial recognition), whereas other SMAPs provided data through registered email addresses. Although some SMAPs were available at set prices or free of cost to end users, the cost of other products varied based on availability, shipping fees, and subscription fees.
CONCLUSIONS: An expanding market for SMAPs with features specific to at-home patient use is emerging. Health care professionals can use these features to select and suggest products that meet their patients' unique requirements.},
}
@article {pmid38107765,
year = {2023},
author = {Alam, AKMM and Chen, K},
title = {TEE-Graph: efficient privacy and ownership protection for cloud-based graph spectral analysis.},
journal = {Frontiers in big data},
volume = {6},
number = {},
pages = {1296469},
pmid = {38107765},
issn = {2624-909X},
abstract = {INTRODUCTION: Big graphs like social network user interactions and customer rating matrices require significant computing resources to maintain. Data owners are now using public cloud resources for storage and computing elasticity. However, existing solutions do not fully address the privacy and ownership protection needs of the key involved parties: data contributors and the data owner who collects data from contributors.
METHODS: We propose a Trusted Execution Environment (TEE) based solution: TEE-Graph for graph spectral analysis of outsourced graphs in the cloud. TEEs are new CPU features that can enable much more efficient confidential computing solutions than traditional software-based cryptographic ones. Our approach has several unique contributions compared to existing confidential graph analysis approaches. (1) It utilizes the unique TEE properties to ensure contributors' new privacy needs, e.g., the right of revocation for shared data. (2) It implements efficient access-pattern protection with a differentially private data encoding method. And (3) it implements TEE-based special analysis algorithms: the Lanczos method and the Nystrom method for efficiently handling big graphs and protecting confidentiality from compromised cloud providers.
RESULTS: The TEE-Graph approach is much more efficient than software crypto approaches and also immune to access-pattern-based attacks. Compared with the best-known software crypto approach for graph spectral analysis, PrivateGraph, we have seen that TEE-Graph has 10[3]-10[5] times lower computation, storage, and communication costs. Furthermore, the proposed access-pattern protection method incurs only about 10%-25% of the overall computation cost.
DISCUSSION: Our experimentation showed that TEE-Graph performs significantly better and has lower costs than typical software approaches. It also addresses the unique ownership and access-pattern issues that other TEE-related graph analytics approaches have not sufficiently studied. The proposed approach can be extended to other graph analytics problems with strong ownership and access-pattern protection.},
}
@article {pmid38093855,
year = {2024},
author = {Ortega Candel, JM and Mora Gimeno, FJ and Mora Mora, H},
title = {Generation of a dataset for DoW attack detection in serverless architectures.},
journal = {Data in brief},
volume = {52},
number = {},
pages = {109921},
pmid = {38093855},
issn = {2352-3409},
abstract = {Denial of Wallet (DoW) attacks refers to a type of cyberattack that aims to exploit and exhaust the financial resources of an organization by triggering excessive costs or charges within their cloud or serverless computing environment. These attacks are particularly relevant in the context of serverless architectures due to characteristics like pay-as-you-go model, auto-scaling, limited control and cost amplification. Serverless computing, often referred to as Function-as-a-Service (FaaS), is a cloud computing model that allows developers to build and run applications without the need to manage traditional server infrastructure. Serverless architectures have gained popularity in cloud computing due to their flexibility and ability to scale automatically based on demand. These architectures are based on executing functions without the need to manage the underlying infrastructure. However, the lack of realistic and representative datasets that simulate function invocations in serverless environments has been a challenge for research and development of solutions in this field. The aim is to create a dataset for simulating function invocations in serverless architectures, that is a valuable practice for ensuring the reliability, efficiency, and security of serverless applications. Furthermore, we propose a methodology for the generation of the dataset, which involves the generation of synthetic data from traffic generated on cloud platforms and the identification of the main characteristics of function invocations. These characteristics include SubmitTime, Invocation Delay, Response Delay, Function Duration, Active Functions at Request, Active Functions at Response. By generating this dataset, we expect to facilitate the detection of Denial of Wallet (DoW) attacks using machine learning techniques and neural networks. In this way, this dataset available in Mendeley data repository could provide other researchers and developers with a dataset to test and evaluate machine learning algorithms or use other techniques based on the detection of attacks and anomalies in serverless environments.},
}
@article {pmid38090001,
year = {2023},
author = {Quan, G and Yao, Z and Chen, L and Fang, Y and Zhu, W and Si, X and Li, M},
title = {A trusted medical data sharing framework for edge computing leveraging blockchain and outsourced computation.},
journal = {Heliyon},
volume = {9},
number = {12},
pages = {e22542},
pmid = {38090001},
issn = {2405-8440},
abstract = {Traditional cloud-centric approaches to medical data sharing pose risks related to real-time performance, security, and stability. Medical and healthcare data encounter challenges like data silos, privacy breaches, and transmission latency. In response to these challenges, this paper introduces a blockchain-based framework for trustworthy medical data sharing in edge computing environments. Leveraging healthcare consortium edge blockchains, this framework enables fine-grained access control to medical data. Specifically, it addresses the real-time, multi-attribute authorization challenge in CP-ABE through a Distributed Attribute Authorization strategy (DAA) based on blockchain. Furthermore, it tackles the key security issues in CP-ABE through a Distributed Key Generation protocol (DKG) based on blockchain. To address computational resource constraints in CP-ABE, we enhance a Distributed Modular Exponentiation Outsourcing algorithm (DME) and elevate its verifiable probability to "1". Theoretical analysis establishes the IND-CPA security of this framework in the Random Oracle Model. Experimental results demonstrate the effectiveness of our solution for resource-constrained end-user devices in edge computing environments.},
}
@article {pmid38082849,
year = {2023},
author = {Calo, J and Lo, B},
title = {IoT Federated Blockchain Learning at the Edge.},
journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference},
volume = {2023},
number = {},
pages = {1-4},
doi = {10.1109/EMBC40787.2023.10339946},
pmid = {38082849},
issn = {2694-0604},
mesh = {Humans ; *Blockchain ; Hospitals ; Intelligence ; Machine Learning ; *Medicine ; },
abstract = {IoT devices are sorely underutilized in the medical field, especially within machine learning for medicine, yet they offer unrivaled benefits. IoT devices are low cost, energy efficient, small and intelligent devices [1].In this paper, we propose a distributed federated learning framework for IoT devices, more specifically for IoMT (In-ternet of Medical Things), using blockchain to allow for a decentralized scheme improving privacy and efficiency over a centralized system; this allows us to move from the cloud based architectures, that are prevalent, to the edge.The system is designed for three paradigms: 1) Training neural networks on IoT devices to allow for collaborative training of a shared model whilst decoupling the learning from the dataset [2] to ensure privacy [3]. Training is performed in an online manner simultaneously amongst all participants, allowing for training of actual data that may not have been present in a dataset collected in the traditional way and dynamically adapt the system whilst it is being trained. 2) Training of an IoMT system in a fully private manner such as to mitigate the issue with confidentiality of medical data and to build robust, and potentially bespoke [4], models where not much, if any, data exists. 3) Distribution of the actual network training, something federated learning itself does not do, to allow hospitals, for example, to utilize their spare computing resources to train network models.},
}
@article {pmid38077560,
year = {2023},
author = {Wang, Z},
title = {An English course practice evaluation system based on multi-source mobile information and IoT technology.},
journal = {PeerJ. Computer science},
volume = {9},
number = {},
pages = {e1615},
pmid = {38077560},
issn = {2376-5992},
abstract = {With the increased use of online English courses, the quality of the course directly determines its efficacy. Recently, various industries have continuously employed Internet of Things (IoT) technology, which has considerable scene adaptability. To better supervise the specific content of English courses, we discuss how to apply multi-source mobile Internet of Things information technology to the practical evaluation system of English courses to boost the performance of English learning evaluation. Therefore, by analyzing the problems of existing English course evaluation and the characteristics of multi-source mobile Internet of Things information technology, this article designs an English course practical evaluation system based on multi-source data collection, processing, and analysis. The system can collect real-time student voices, behavior, and other data through mobile devices. Then, analyze the data using cloud computing and data mining technology and provide real-time learning progress and feedback. We can demonstrate that the accuracy of the evaluation system can reach 80.23%, which can effectively improve the efficiency of English learning evaluation, provide a new method for English teaching evaluation, and further improve and optimize the English education teaching content to meet the needs of the actual teaching environment.},
}
@article {pmid38077558,
year = {2023},
author = {Gu, H and Wang, J and Yu, J and Wang, D and Li, B and He, X and Yin, X},
title = {Towards virtual machine scheduling research based on multi-decision AHP method in the cloud computing platform.},
journal = {PeerJ. Computer science},
volume = {9},
number = {},
pages = {e1675},
pmid = {38077558},
issn = {2376-5992},
abstract = {Virtual machine scheduling and resource allocation mechanism in the process of dynamic virtual machine consolidation is a promising access to alleviate the cloud data centers of prominent energy consumption and service level agreement violations with improvement in quality of service (QoS). In this article, we propose an efficient algorithm (AESVMP) based on the Analytic Hierarchy Process (AHP) for the virtual machine scheduling in accordance with the measure. Firstly, we take into consideration three key criteria including the host of power consumption, available resource and resource allocation balance ratio, in which the ratio can be calculated by the balance value between overall three-dimensional resource (CPU, RAM, BW) flat surface and resource allocation flat surface (when new migrated virtual machine (VM) consumed the targeted host's resource). Then, virtual machine placement decision is determined by the application of multi-criteria decision making techniques AHP embedded with the above-mentioned three criteria. Extensive experimental results based on the CloudSim emulator using 10 PlanetLab workloads demonstrate that the proposed approach can reduce the cloud data center of number of migration, service level agreement violation (SLAV), aggregate indicators of energy comsumption (ESV) by an average of 51.76%, 67.4%, 67.6% compared with the cutting-edge method LBVMP, which validates the effectiveness.},
}
@article {pmid38077531,
year = {2023},
author = {Eljack, S and Jemmali, M and Denden, M and Turki, S and Khedr, WM and Algashami, AM and ALsadig, M},
title = {A secure solution based on load-balancing algorithms between regions in the cloud environment.},
journal = {PeerJ. Computer science},
volume = {9},
number = {},
pages = {e1513},
pmid = {38077531},
issn = {2376-5992},
abstract = {The problem treated in this article is the storage of sensitive data in the cloud environment and how to choose regions and zones to minimize the number of transfer file events. Handling sensitive data in the global internet network many times can increase risks and minimize security levels. Our work consists of scheduling several files on the different regions based on the security and load balancing parameters in the cloud. Each file is characterized by its size. If data is misplaced from the start it will require a transfer from one region to another and sometimes from one area to another. The objective is to find a schedule that assigns these files to the appropriate region ensuring the load balancing executed in each region to guarantee the minimum number of migrations. This problem is NP-hard. A novel model regarding the regional security and load balancing of files in the cloud environment is proposed in this article. This model is based on the component called "Scheduler" which utilizes the proposed algorithms to solve the problem. This model is a secure solution to guarantee an efficient dispersion of the stored files to avoid the most storage in one region. Consequently, damage to this region does not cause a loss of big data. In addition, a novel method called the "Grouping method" is proposed. Several variants of the application of this method are utilized to propose novel algorithms for solving the studied problem. Initially, seven algorithms are proposed in this article. The experimental results show that there is no dominance between these algorithms. Therefore, three combinations of these seven algorithms generate three other algorithms with better results. Based on the dominance rule, only six algorithms are selected to discuss the performance of the proposed algorithms. Four classes of instances are generated to measure and test the performance of algorithms. In total, 1,360 instances are tested. Three metrics are used to assess the algorithms and make a comparison between them. The experimental results show that the best algorithm is the "Best-value of four algorithms" in 86.5% of cases with an average gap of 0.021 and an average running time of 0.0018 s.},
}
@article {pmid38074363,
year = {2023},
author = {Intelligence And Neuroscience, C},
title = {Retracted: The Rise of Cloud Computing: Data Protection, Privacy, and Open Research Challenges-A Systematic Literature Review (SLR).},
journal = {Computational intelligence and neuroscience},
volume = {2023},
number = {},
pages = {9838129},
pmid = {38074363},
issn = {1687-5273},
abstract = {[This retracts the article DOI: 10.1155/2022/8303504.].},
}
@article {pmid38074307,
year = {2023},
author = {Mangana, CM and Barraquer, A and Ferragut-Alegre, Á and Santolaria, G and Olivera, M and Barraquer, R},
title = {Detection of graft failure in post-keratoplasty patients by automated deep learning.},
journal = {Saudi journal of ophthalmology : official journal of the Saudi Ophthalmological Society},
volume = {37},
number = {3},
pages = {207-210},
pmid = {38074307},
issn = {1319-4534},
abstract = {PURPOSE: Detection of graft failure of post-penetrating keratoplasty (PKP) patients from the proprietary dataset using algorithms trained in Automated Deep Learning (AutoML).
METHODS: This was an observational cross-sectional study, for which AutoML algorithms were trained following the success/failure labeling strategy based on clinical notes, on a cohort corresponding to 220 images of post-keratoplasty anterior pole eyes. Once the image quality criteria were analyzed and the dataset was pseudo-anonymized, it was transferred to the Google Cloud Platform, where using the Vertex AI-AutoML API, cloud- and edge-based algorithms were trained, following expert recommendations on dataset splitting (80% training, 10% test, and 10% validation).
RESULTS: The metrics obtained in the cloud-based and edge-based models have been similar, but we chose to analyze the edge model as it is an exportable model, lighter and cheaper to train. The initial results of the model presented an accuracy of 95.83%, with a specificity of 91.67% and a sensitivity of 100%, obtaining an F1SCORE of 95.996% and a precision of 92.30%. Other metrics, such as the area under the curve, confusion matrix, and activation map development, were contemplated.
CONCLUSION: Initial results indicate the possibility of training algorithms in an automated fashion for the detection of graft failure in patients who underwent PKP. These algorithms are very lightweight tools easily integrated into mobile or desktop applications, potentially allowing every corneal transplant patient to have access to the best knowledge to enable the correct and timely diagnosis and treatment of graft failure. Although the results were good, because of the relatively small dataset, it is possible the data have some tendency to overfitting. AutoML opens the possibility of working in the field of artificial intelligence by computer vision to professionals with little experience and knowledge of programming.},
}
@article {pmid38072221,
year = {2024},
author = {Doo, FX and Kulkarni, P and Siegel, EL and Toland, M and Yi, PH and Carlos, RC and Parekh, VS},
title = {Economic and Environmental Costs of Cloud Technologies for Medical Imaging and Radiology Artificial Intelligence.},
journal = {Journal of the American College of Radiology : JACR},
volume = {21},
number = {2},
pages = {248-256},
doi = {10.1016/j.jacr.2023.11.011},
pmid = {38072221},
issn = {1558-349X},
mesh = {*Artificial Intelligence ; Cloud Computing ; *Radiology ; Costs and Cost Analysis ; Diagnostic Imaging ; },
abstract = {Radiology is on the verge of a technological revolution driven by artificial intelligence (including large language models), which requires robust computing and storage capabilities, often beyond the capacity of current non-cloud-based informatics systems. The cloud presents a potential solution for radiology, and we should weigh its economic and environmental implications. Recently, cloud technologies have become a cost-effective strategy by providing necessary infrastructure while reducing expenditures associated with hardware ownership, maintenance, and upgrades. Simultaneously, given the optimized energy consumption in modern cloud data centers, this transition is expected to reduce the environmental footprint of radiologic operations. The path to cloud integration comes with its own challenges, and radiology informatics leaders must consider elements such as cloud architectural choices, pricing, data security, uptime service agreements, user training and support, and broader interoperability. With the increasing importance of data-driven tools in radiology, understanding and navigating the cloud landscape will be essential for the future of radiology and its various stakeholders.},
}
@article {pmid38069903,
year = {2024},
author = {Mirchandani, CD and Shultz, AJ and Thomas, GWC and Smith, SJ and Baylis, M and Arnold, B and Corbett-Detig, R and Enbody, E and Sackton, TB},
title = {A Fast, Reproducible, High-throughput Variant Calling Workflow for Population Genomics.},
journal = {Molecular biology and evolution},
volume = {41},
number = {1},
pages = {},
pmid = {38069903},
issn = {1537-1719},
mesh = {Animals ; *Software ; *Metagenomics ; Workflow ; Genomics ; Sequence Analysis, DNA ; High-Throughput Nucleotide Sequencing ; },
abstract = {The increasing availability of genomic resequencing data sets and high-quality reference genomes across the tree of life present exciting opportunities for comparative population genomic studies. However, substantial challenges prevent the simple reuse of data across different studies and species, arising from variability in variant calling pipelines, data quality, and the need for computationally intensive reanalysis. Here, we present snpArcher, a flexible and highly efficient workflow designed for the analysis of genomic resequencing data in nonmodel organisms. snpArcher provides a standardized variant calling pipeline and includes modules for variant quality control, data visualization, variant filtering, and other downstream analyses. Implemented in Snakemake, snpArcher is user-friendly, reproducible, and designed to be compatible with high-performance computing clusters and cloud environments. To demonstrate the flexibility of this pipeline, we applied snpArcher to 26 public resequencing data sets from nonmammalian vertebrates. These variant data sets are hosted publicly to enable future comparative population genomic analyses. With its extensibility and the availability of public data sets, snpArcher will contribute to a broader understanding of genetic variation across species by facilitating the rapid use and reuse of large genomic data sets.},
}
@article {pmid38067890,
year = {2023},
author = {Kiarashi, Y and Saghafi, S and Das, B and Hegde, C and Madala, VSK and Nakum, A and Singh, R and Tweedy, R and Doiron, M and Rodriguez, AD and Levey, AI and Clifford, GD and Kwon, H},
title = {Graph Trilateration for Indoor Localization in Sparsely Distributed Edge Computing Devices in Complex Environments Using Bluetooth Technology.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {23},
pages = {},
pmid = {38067890},
issn = {1424-8220},
support = {cox-emory2019//James M. Cox Foundation and Cox Enterprises, Inc/ ; },
mesh = {Humans ; *Cloud Computing ; Wireless Technology ; Health Status ; Movement ; *Spatial Navigation/physiology ; },
abstract = {Spatial navigation patterns in indoor space usage can reveal important cues about the cognitive health of participants. In this work, we present a low-cost, scalable, open-source edge computing system using Bluetooth low energy (BLE) beacons for tracking indoor movements in a large, 1700 m2 facility used to carry out therapeutic activities for participants with mild cognitive impairment (MCI). The facility is instrumented with 39 edge computing systems, along with an on-premise fog server. The participants carry a BLE beacon, in which BLE signals are received and analyzed by the edge computing systems. Edge computing systems are sparsely distributed in the wide, complex indoor space, challenging the standard trilateration technique for localizing subjects, which assumes a dense installation of BLE beacons. We propose a graph trilateration approach that considers the temporal density of hits from the BLE beacon to surrounding edge devices to handle the inconsistent coverage of edge devices. This proposed method helps us tackle the varying signal strength, which leads to intermittent detection of beacons. The proposed method can pinpoint the positions of multiple participants with an average error of 4.4 m and over 85% accuracy in region-level localization across the entire study area. Our experimental results, evaluated in a clinical environment, suggest that an ordinary medical facility can be transformed into a smart space that enables automatic assessment of individuals' movements, which may reflect health status or response to treatment.},
}
@article {pmid38067868,
year = {2023},
author = {Garcia-Perez, A and Miñón, R and Torre-Bastida, AI and Zulueta-Guerrero, E},
title = {Analysing Edge Computing Devices for the Deployment of Embedded AI.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {23},
pages = {},
pmid = {38067868},
issn = {1424-8220},
support = {SONETO project, ref. KK-2023/00038//Basque Government Elkartek program/ ; },
abstract = {In recent years, more and more devices are connected to the network, generating an overwhelming amount of data. This term that is booming today is known as the Internet of Things. In order to deal with these data close to the source, the term Edge Computing arises. The main objective is to address the limitations of cloud processing and satisfy the growing demand for applications and services that require low latency, greater efficiency and real-time response capabilities. Furthermore, it is essential to underscore the intrinsic connection between artificial intelligence and edge computing within the context of our study. This integral relationship not only addresses the challenges posed by data proliferation but also propels a transformative wave of innovation, shaping a new era of data processing capabilities at the network's edge. Edge devices can perform real-time data analysis and make autonomous decisions without relying on constant connectivity to the cloud. This article aims at analysing and comparing Edge Computing devices when artificial intelligence algorithms are deployed on them. To this end, a detailed experiment involving various edge devices, models and metrics is conducted. In addition, we will observe how artificial intelligence accelerators such as Tensor Processing Unit behave. This analysis seeks to respond to the choice of a device that best suits the necessary AI requirements. As a summary, in general terms, the Jetson Nano provides the best performance when only CPU is used. Nevertheless the utilisation of a TPU drastically enhances the results.},
}
@article {pmid38067859,
year = {2023},
author = {Balatsouras, CP and Karras, A and Karras, C and Karydis, I and Sioutas, S},
title = {WiCHORD+: A Scalable, Sustainable, and P2P Chord-Based Ecosystem for Smart Agriculture Applications.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {23},
pages = {},
pmid = {38067859},
issn = {1424-8220},
support = {Save-Water//European Union and national funds of Greece and Albania under the Interreg IPA II Cross-border Cooperation Programme "Greece - Albania 2014-2020"/ ; },
abstract = {In the evolving landscape of Industry 4.0, the convergence of peer-to-peer (P2P) systems, LoRa-enabled wireless sensor networks (WSNs), and distributed hash tables (DHTs) represents a major advancement that enhances sustainability in the modern agriculture framework and its applications. In this study, we propose a P2P Chord-based ecosystem for sustainable and smart agriculture applications, inspired by the inner workings of the Chord protocol. The node-centric approach of WiCHORD+ is a standout feature, streamlining operations in WSNs and leading to more energy-efficient and straightforward system interactions. Instead of traditional key-centric methods, WiCHORD+ is a node-centric protocol that is compatible with the inherent characteristics of WSNs. This unique design integrates seamlessly with distributed hash tables (DHTs), providing an efficient mechanism to locate nodes and ensure robust data retrieval while reducing energy consumption. Additionally, by utilizing the MAC address of each node in data routing, WiCHORD+ offers a more direct and efficient data lookup mechanism, essential for the timely and energy-efficient operation of WSNs. While the increasing dependence of smart agriculture on cloud computing environments for data storage and machine learning techniques for real-time prediction and analytics continues, frameworks like the proposed WiCHORD+ appear promising for future IoT applications due to their compatibility with modern devices and peripherals. Ultimately, the proposed approach aims to effectively incorporate LoRa, WSNs, DHTs, cloud computing, and machine learning, by providing practical solutions to the ongoing challenges in the current smart agriculture landscape and IoT applications.},
}
@article {pmid38067809,
year = {2023},
author = {Park, J and Jeong, J},
title = {An Autoscaling System Based on Predicting the Demand for Resources and Responding to Failure in Forecasting.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {23},
pages = {},
pmid = {38067809},
issn = {1424-8220},
support = {2018R1A5A7023490//National Research Foundation of Korea/ ; 2021R1F1A1061514//National Research Foundation of Korea/ ; S-2022-G0001-00070//Dongguk University/ ; },
abstract = {In recent years, the convergence of edge computing and sensor technologies has become a pivotal frontier revolutionizing real-time data processing. In particular, the practice of data acquisition-which encompasses the collection of sensory information in the form of images and videos, followed by their transmission to a remote cloud infrastructure for subsequent analysis-has witnessed a notable surge in adoption. However, to ensure seamless real-time processing irrespective of the data volume being conveyed or the frequency of incoming requests, it is vital to proactively locate resources within the cloud infrastructure specifically tailored to data-processing tasks. Many studies have focused on the proactive prediction of resource demands through the use of deep learning algorithms, generating considerable interest in real-time data processing. Nonetheless, an inherent risk arises when relying solely on predictive resource allocation, as it can heighten the susceptibility to system failure. In this study, a framework that includes algorithms that periodically monitor resource requirements and dynamically adjust resource provisioning to match the actual demand is proposed. Under experimental conditions with the Bitbrains dataset, setting the network throughput to 300 kB/s and with a threshold of 80%, the proposed system provides a 99% performance improvement in terms of the autoscaling algorithm and requires only 0.43 ms of additional computational overhead compared to relying on a simple prediction model alone.},
}
@article {pmid38067758,
year = {2023},
author = {Khan, A and Khattak, KS and Khan, ZH and Gulliver, TA and Abdullah, },
title = {Edge Computing for Effective and Efficient Traffic Characterization.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {23},
pages = {},
pmid = {38067758},
issn = {1424-8220},
support = {National Center for Big Data and Cloud Computing//Higher Education Commission/ ; },
abstract = {Traffic flow analysis is essential to develop smart urban mobility solutions. Although numerous tools have been proposed, they employ only a small number of parameters. To overcome this limitation, an edge computing solution is proposed based on nine traffic parameters, namely, vehicle count, direction, speed, and type, flow, peak hour factor, density, time headway, and distance headway. The proposed low-cost solution is easy to deploy and maintain. The sensor node is comprised of a Raspberry Pi 4, Pi camera, Intel Movidius Neural Compute Stick 2, Xiaomi MI Power Bank, and Zong 4G Bolt+. Pre-trained models from the OpenVINO Toolkit are employed for vehicle detection and classification, and a centroid tracking algorithm is used to estimate vehicle speed. The measured traffic parameters are transmitted to the ThingSpeak cloud platform via 4G. The proposed solution was field-tested for one week (7 h/day), with approximately 10,000 vehicles per day. The count, classification, and speed accuracies obtained were 79.8%, 93.2%, and 82.9%, respectively. The sensor node can operate for approximately 8 h with a 10,000 mAh power bank and the required data bandwidth is 1.5 MB/h. The proposed edge computing solution overcomes the limitations of existing traffic monitoring systems and can work in hostile environments.},
}
@article {pmid38067756,
year = {2023},
author = {Aljebreen, M and Alohali, MA and Mahgoub, H and Aljameel, SS and Alsumayt, A and Sayed, A},
title = {Multi-Objective Seagull Optimization Algorithm with Deep Learning-Enabled Vulnerability Detection for Secure Cloud Environments.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {23},
pages = {},
pmid = {38067756},
issn = {1424-8220},
support = {PNURSP2023R330//Princess Nourah bint Abdulrahman University/ ; RSP2023R459//King Saud University/ ; },
abstract = {Cloud computing (CC) is an internet-enabled environment that provides computing services such as networking, databases, and servers to clients and organizations in a cost-effective manner. Despite the benefits rendered by CC, its security remains a prominent concern to overcome. An intrusion detection system (IDS) is generally used to detect both normal and anomalous behavior in networks. The design of IDS using a machine learning (ML) technique comprises a series of methods that can learn patterns from data and forecast the outcomes consequently. In this background, the current study designs a novel multi-objective seagull optimization algorithm with a deep learning-enabled vulnerability detection (MOSOA-DLVD) technique to secure the cloud platform. The MOSOA-DLVD technique uses the feature selection (FS) method and hyperparameter tuning strategy to identify the presence of vulnerabilities or attacks in the cloud infrastructure. Primarily, the FS method is implemented using the MOSOA technique. Furthermore, the MOSOA-DLVD technique uses a deep belief network (DBN) method for intrusion detection and its classification. In order to improve the detection outcomes of the DBN algorithm, the sooty tern optimization algorithm (STOA) is applied for the hyperparameter tuning process. The performance of the proposed MOSOA-DLVD system was validated with extensive simulations upon a benchmark IDS dataset. The improved intrusion detection results of the MOSOA-DLVD approach with a maximum accuracy of 99.34% establish the proficiency of the model compared with recent methods.},
}
@article {pmid38067703,
year = {2023},
author = {Cicero, S and Guarascio, M and Guerrieri, A and Mungari, S},
title = {A Deep Anomaly Detection System for IoT-Based Smart Buildings.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {23},
pages = {},
pmid = {38067703},
issn = {1424-8220},
abstract = {In recent years, technological advancements in sensor, communication, and data storage technologies have led to the increasingly widespread use of smart devices in different types of buildings, such as residential homes, offices, and industrial installations. The main benefit of using these devices is the possibility of enhancing different crucial aspects of life within these buildings, including energy efficiency, safety, health, and occupant comfort. In particular, the fast progress in the field of the Internet of Things has yielded exponential growth in the number of connected smart devices and, consequently, increased the volume of data generated and exchanged. However, traditional Cloud-Computing platforms have exhibited limitations in their capacity to handle and process the continuous data exchange, leading to the rise of new computing paradigms, such as Edge Computing and Fog Computing. In this new complex scenario, advanced Artificial Intelligence and Machine Learning can play a key role in analyzing the generated data and predicting unexpected or anomalous events, allowing for quickly setting up effective responses against these unexpected events. To the best of our knowledge, current literature lacks Deep-Learning-based approaches specifically devised for guaranteeing safety in IoT-Based Smart Buildings. For this reason, we adopt an unsupervised neural architecture for detecting anomalies, such as faults, fires, theft attempts, and more, in such contexts. In more detail, in our proposal, data from a sensor network are processed by a Sparse U-Net neural model. The proposed approach is lightweight, making it suitable for deployment on the edge nodes of the network, and it does not require a pre-labeled training dataset. Experimental results conducted on a real-world case study demonstrate the effectiveness of the developed solution.},
}
@article {pmid38067697,
year = {2023},
author = {Mehmood, KT and Atiq, S and Hussain, MM},
title = {Enhancing QoS of Telecom Networks through Server Load Management in Software-Defined Networking (SDN).},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {23},
pages = {},
pmid = {38067697},
issn = {1424-8220},
abstract = {In the modern era, with the emergence of the Internet of Things (IoT), big data applications, cloud computing, and the ever-increasing demand for high-speed internet with the aid of upgraded telecom network resources, users now require virtualization of the network for smart handling of modern-day challenges to obtain better services (in terms of security, reliability, scalability, etc.). These requirements can be fulfilled by using software-defined networking (SDN). This research article emphasizes one of the major aspects of the practical implementation of SDN to enhance the QoS of a virtual network through the load management of network servers. In an SDN-based network, several servers are available to fulfill users' hypertext transfer protocol (HTTP) requests to ensure dynamic routing under the influence of the SDN controller. However, if the number of requests is directed to a specific server, the controller is bound to follow the user-programmed instructions, and the load on that server is increased, which results in (a) an increase in end-to-end user delay, (b) a decrease in the data transfer rate, and (c) a decrease in the available bandwidth of the targeted server. All of the above-mentioned factors will result in the degradation of network QoS. With the implementation of the proposed algorithm, dynamic active sensing server load management (DASLM), on the SDN controller, the load on the server is shared based on QoS control parameters (throughput, response time, round trip time, etc.). The overall delay is reduced, and the bandwidth utilization along with throughput is also increased.},
}
@article {pmid38062043,
year = {2023},
author = {Stanimirova, R and Tarrio, K and Turlej, K and McAvoy, K and Stonebrook, S and Hu, KT and Arévalo, P and Bullock, EL and Zhang, Y and Woodcock, CE and Olofsson, P and Zhu, Z and Barber, CP and Souza, CM and Chen, S and Wang, JA and Mensah, F and Calderón-Loor, M and Hadjikakou, M and Bryan, BA and Graesser, J and Beyene, DL and Mutasha, B and Siame, S and Siampale, A and Friedl, MA},
title = {A global land cover training dataset from 1984 to 2020.},
journal = {Scientific data},
volume = {10},
number = {1},
pages = {879},
pmid = {38062043},
issn = {2052-4463},
support = {80NSSC18K0994//National Aeronautics and Space Administration (NASA)/ ; },
abstract = {State-of-the-art cloud computing platforms such as Google Earth Engine (GEE) enable regional-to-global land cover and land cover change mapping with machine learning algorithms. However, collection of high-quality training data, which is necessary for accurate land cover mapping, remains costly and labor-intensive. To address this need, we created a global database of nearly 2 million training units spanning the period from 1984 to 2020 for seven primary and nine secondary land cover classes. Our training data collection approach leveraged GEE and machine learning algorithms to ensure data quality and biogeographic representation. We sampled the spectral-temporal feature space from Landsat imagery to efficiently allocate training data across global ecoregions and incorporated publicly available and collaborator-provided datasets to our database. To reflect the underlying regional class distribution and post-disturbance landscapes, we strategically augmented the database. We used a machine learning-based cross-validation procedure to remove potentially mis-labeled training units. Our training database is relevant for a wide array of studies such as land cover change, agriculture, forestry, hydrology, urban development, among many others.},
}
@article {pmid38061141,
year = {2024},
author = {Long, K and Chen, Z and Zhang, H and Zhang, M},
title = {Spatiotemporal disturbances and attribution analysis of mangrove in southern China from 1986 to 2020 based on time-series Landsat imagery.},
journal = {The Science of the total environment},
volume = {912},
number = {},
pages = {169157},
doi = {10.1016/j.scitotenv.2023.169157},
pmid = {38061141},
issn = {1879-1026},
abstract = {As one of the most productive ecosystems in the world, mangrove has a critical role to play in both the natural ecosystem and the human economic and social society. However, two thirds of the world's mangrove have been irreversibly damaged over the past 100 years, as a result of ongoing human activities and climate change. In this paper, adopting Landsat for the past 36 years as the data source, the detection of spatiotemporal changes of mangrove in southern China was carried out based on the Google Earth Engine (GEE) cloud platform using the LandTrendr algorithm. In addition, the attribution of mangrove disturbances was analyzed by a random forest algorithm. The results indicated the area of mangrove recovery (5174.64 hm[2]) was much larger than the area of mangrove disturbances (1625.40 hm[2]) over the 35-year period in the study area. The disturbances of mangrove in southern China were dominated by low and low-to-medium-level disturbances, with an area of 1009.89 hm[2], accounting for 57.50 % of the total disturbances. The mangrove recovery was also dominated by low and low-to-medium-level recovery, with an area of 3239.19 hm[2], accounting for 62.61 % of the total recovery area. Both human and natural factors interacted and influenced each other, together causing spatiotemporal disturbances of mangrove in southern China during 1986-2020. The mangrove disturbances in the Phase I (1986-2000) and Phase III (2011-2020) were characterized by human-induced (50.74 % and 58.86 %), such as construction of roads and aquaculture ponds. The mangrove disturbances in the Phase II (2001-2010) were dominated by natural factors (55.73 %), such as tides, flooding, and species invasions. It was also observed that the area of mangrove recovery in southern China increased dramatically from 1986 to 2020 due to the promulgation and implementation of the Chinese government's policy on mangrove protection, as well as increased human awareness of mangrove wetland protection.},
}
@article {pmid38053971,
year = {2023},
author = {Bernardi, M and Cardarelli, F},
title = {Phasor identifier: A cloud-based analysis of phasor-FLIM data on Python notebooks.},
journal = {Biophysical reports},
volume = {3},
number = {4},
pages = {100135},
pmid = {38053971},
issn = {2667-0747},
abstract = {This paper introduces an innovative approach utilizing Google Colaboratory for the versatile analysis of phasor fluorescence lifetime imaging microscopy (FLIM) data collected from various samples (e.g., cuvette, cells, tissues) and in various input file formats. In fact, phasor-FLIM widespread adoption has been hampered by complex instrumentation and data analysis requirements. We mean to make advanced FLIM analysis more accessible to researchers through a cloud-based solution that 1) harnesses robust computational resources, 2) eliminates hardware limitations, and 3) supports both CPU and GPU processing. We envision a paradigm shift in FLIM data accessibility and potential, aligning with the evolving field of artificial intelligence-driven FLIM analysis. This approach simplifies FLIM data handling and opens doors for diverse applications, from studying cellular metabolism to investigating drug encapsulation, benefiting researchers across multiple domains. The comparative analysis of freely distributed FLIM tools highlights the unique advantages of this approach in terms of adaptability, scalability, and open-source nature.},
}
@article {pmid38053860,
year = {2023},
author = {Moparthi, NR and Balakrishna, G and Chithaluru, P and Kolla, M and Kumar, M},
title = {An improved energy-efficient cloud-optimized load-balancing for IoT frameworks.},
journal = {Heliyon},
volume = {9},
number = {11},
pages = {e21947},
pmid = {38053860},
issn = {2405-8440},
abstract = {As wireless communication grows, so does the need for smart, simple, affordable solutions. The need prompted academics to develop appropriate network solutions ranging from wireless sensor networks (WSNs) to the Internet of Things (IoT). With the innovations of researchers, the necessity for enhancements in existing researchers has increased. Initially, network protocols were the focus of study and development. Regardless, IoT devices are already being employed in different industries and collecting massive amounts of data through complicated applications. This necessitates IoT load-balancing research. Several studies tried to address the communication overheads produced by significant IoT network traffic. These studies intended to control network loads by evenly spreading them across IoT nodes. Eventually, the practitioners decided to migrate the IoT node data and the apps processing it to the cloud. So, the difficulty is to design a cloud-based load balancer algorithm that meets the criteria of IoT network protocols. Defined as a unique method for controlling loads on cloud-integrated IoT networks. The suggested method analyses actual and virtual host machine needs in cloud computing environments. The purpose of the proposed model is to design a load balancer that improves network response time while reducing energy consumption. The proposed load balancer algorithm may be easily integrated with peer-existing IoT frameworks. Handling the load for cloud-based IoT architectures with the above-described methods. Significantly boosts response time for the IoT network by 60 %. The proposed scheme has less energy consumption (31 %), less execution time (24\%), decreased node shutdown time (45 %), and less infrastructure cost (48\%) in comparison to existing frameworks. Based on the simulation results, it is concluded that the proposed framework offers an improved solution for IoT-based cloud load-balancing issues.},
}
@article {pmid38053722,
year = {2023},
author = {Tang, R and Aridas, NK and Talip, MSA},
title = {Design of a data processing method for the farmland environmental monitoring based on improved Spark components.},
journal = {Frontiers in big data},
volume = {6},
number = {},
pages = {1282352},
pmid = {38053722},
issn = {2624-909X},
abstract = {With the popularization of big data technology, agricultural data processing systems have become more intelligent. In this study, a data processing method for farmland environmental monitoring based on improved Spark components is designed. It introduces the FAST-Join (Join critical filtering sampling partition optimization) algorithm in the Spark component for equivalence association query optimization to improve the operating efficiency of the Spark component and cluster. The experimental results show that the amount of data written and read in Shuffle by Spark optimized by the FAST-join algorithm only accounts for 0.958 and 1.384% of the original data volume on average, and the calculation speed is 202.11% faster than the original. The average data processing time and occupied memory size of the Spark cluster are reduced by 128.22 and 76.75% compared with the originals. It also compared the cluster performance of the FAST-join and Equi-join algorithms. The Spark cluster optimized by the FAST-join algorithm reduced the processing time and occupied memory size by an average of 68.74 and 37.80% compared with the Equi-join algorithm, which shows that the FAST-join algorithm can effectively improve the efficiency of inter-data table querying and cluster computing.},
}
@article {pmid38052579,
year = {2023},
author = {Yu, L and Zhang, Z and Lai, Y and Zhao, Y and Mo, F},
title = {Edge computing-based intelligent monitoring system for manhole cover.},
journal = {Mathematical biosciences and engineering : MBE},
volume = {20},
number = {10},
pages = {18792-18819},
doi = {10.3934/mbe.2023833},
pmid = {38052579},
issn = {1551-0018},
abstract = {Unusual states of manhole covers (MCs), such as being tilted, lost or flooded, can present substantial safety hazards and risks to pedestrians and vehicles on the roadway. Most MCs are still being managed through manual regular inspections and have limited information technology integration. This leads to time-consuming and labor-intensive identification with a lower level of accuracy. In this paper, we propose an edge computing-based intelligent monitoring system for manhole covers (EC-MCIMS). Sensors detect the MC and send status and positioning information via LoRa to the edge gateway located on the nearby wisdom pole. The edge gateway utilizes a lightweight machine learning model, trained on the edge impulse (EI) platform, which can predict the state of the MC. If an abnormality is detected, the display and voice device on the wisdom pole will respectively show and broadcast messages to alert pedestrians and vehicles. Simultaneously, the information is uploaded to the cloud platform, enabling remote maintenance personnel to promptly repair and restore it. Tests were performed on the EI platform and in Dongguan townships, demonstrating that the average response time for identifying MCs is 4.81 s. Higher responsiveness and lower power consumption were obtained compared to cloud computing models. Moreover, the system utilizes a lightweight model that better reduces read-only memory (ROM) and random-access memory (RAM), while maintaining an average identification accuracy of 94%.},
}
@article {pmid38049547,
year = {2023},
author = {Parashar, D and Kumar, A and Palni, S and Pandey, A and Singh, A and Singh, AP},
title = {Use of machine learning-based classification algorithms in the monitoring of Land Use and Land Cover practices in a hilly terrain.},
journal = {Environmental monitoring and assessment},
volume = {196},
number = {1},
pages = {8},
pmid = {38049547},
issn = {1573-2959},
mesh = {Cities ; *Environmental Monitoring/methods ; *Hot Temperature ; Algorithms ; Support Vector Machine ; },
abstract = {The current high rate of urbanization in developing countries and its consequences, like traffic congestion, slum development, scarcity of resources, and urban heat islands, raise a need for better Land Use Land Cover (LULC) classification mapping for improved planning. This study mainly deals with two objectives: 1) to explore the applicability of machine learning-based techniques, especially the Random forest (RF) algorithm and Support Vector Machine (SVM) algorithm as the potential classifiers for LULC mapping under different scenarios, and 2) to prepare a better LULC classification model for mountain terrain by using different indices with combination of spectral bands. Due to differences in topography, shadows, spectral confusion from overlapping spectral signatures of different land cover types, and a lack of access for ground verification, classification in mountainous terrain is difficult task compared to plain terrain classification. An enhanced LULC classification model has been designed using two popular machine learning (ML) classifier algorithms, SVM and RF, explicitly for mountainous terrains by taking into consideration of a study area of Gopeshwer town in the Chamoli district of Uttarakhand state, India. Online-based cloud platform Google Earth Engine (GEE) was used for overall processing. Four classification models were built using Sentinel 2B satellite imagery with 20m and 10m resolutions. Two of these models (Model 'i' based on RF algorithm and Model 'ii' based on SVM algorithm) were designed using spectral bands of visible and infrared wavelengths, and the other two (Model 'iii' based on RF algorithm and Model 'iv' based on SVM algorithm) with the addition of indices with spectral bands. The accuracy assessment was done using the confusion matrix based on the output results. Obtained result highlights that the overall accuracy for model 'i' and model 'ii' were 82% and 86% respectively, whereas these were 87.17% and 87.2% for model 'iii' and model 'iv' respectively. Finally, the study compared the performance of each model based on different accuracy metrics for better LULC mapping. It proposes an improved LULC classification model for mountainous terrains, which can contribute to better land management and planning in the study area.},
}
@article {pmid38046398,
year = {2023},
author = {Babar, M and Ahmad Jan, M and He, X and Usman Tariq, M and Mastorakis, S and Alturki, R},
title = {An Optimized IoT-enabled Big Data Analytics Architecture for Edge-Cloud Computing.},
journal = {IEEE internet of things journal},
volume = {10},
number = {5},
pages = {3995-4005},
pmid = {38046398},
issn = {2327-4662},
support = {P20 GM109090/GM/NIGMS NIH HHS/United States ; },
abstract = {The awareness of edge computing is attaining eminence and is largely acknowledged with the rise of Internet of Things (IoT). Edge-enabled solutions offer efficient computing and control at the network edge to resolve the scalability and latency-related concerns. Though, it comes to be challenging for edge computing to tackle diverse applications of IoT as they produce massive heterogeneous data. The IoT-enabled frameworks for Big Data analytics face numerous challenges in their existing structural design, for instance, the high volume of data storage and processing, data heterogeneity, and processing time among others. Moreover, the existing proposals lack effective parallel data loading and robust mechanisms for handling communication overhead. To address these challenges, we propose an optimized IoT-enabled big data analytics architecture for edge-cloud computing using machine learning. In the proposed scheme, an edge intelligence module is introduced to process and store the big data efficiently at the edges of the network with the integration of cloud technology. The proposed scheme is composed of two layers: IoT-edge and Cloud-processing. The data injection and storage is carried out with an optimized MapReduce parallel algorithm. Optimized Yet Another Resource Negotiator (YARN) is used for efficiently managing the cluster. The proposed data design is experimentally simulated with an authentic dataset using Apache Spark. The comparative analysis is decorated with existing proposals and traditional mechanisms. The results justify the efficiency of our proposed work.},
}
@article {pmid38043630,
year = {2024},
author = {Doo, FX and Parekh, VS and Kanhere, A and Savani, D and Tejani, AS and Sapkota, A and Yi, PH},
title = {Evaluation of Climate-Aware Metrics Tools for Radiology Informatics and Artificial Intelligence: Toward a Potential Radiology Ecolabel.},
journal = {Journal of the American College of Radiology : JACR},
volume = {21},
number = {2},
pages = {239-247},
doi = {10.1016/j.jacr.2023.11.019},
pmid = {38043630},
issn = {1558-349X},
mesh = {Humans ; Artificial Intelligence ; Radiography ; *Radiology ; *Medical Informatics ; Diagnostic Imaging ; },
abstract = {Radiology is a major contributor to health care's impact on climate change, in part due to its reliance on energy-intensive equipment as well as its growing technological reliance. Delivering modern patient care requires a robust informatics team to move images from the imaging equipment to the workstations and the health care system. Radiology informatics is the field that manages medical imaging IT. This involves the acquisition, storage, retrieval, and use of imaging information in health care to improve access and quality, which includes PACS, cloud services, and artificial intelligence. However, the electricity consumption of computing and the life cycle of various computer components expands the carbon footprint of health care. The authors provide a general framework to understand the environmental impact of clinical radiology informatics, which includes using the international Greenhouse Gas Protocol to draft a definition of scopes of emissions pertinent to radiology informatics, as well as exploring existing tools to measure and account for these emissions. A novel standard ecolabel for radiology informatics tools, such as the Energy Star label for consumer devices or Leadership in Energy and Environmental Design certification for buildings, should be developed to promote awareness and guide radiologists and radiology informatics leaders in making environmentally conscious decisions for their clinical practice. At this critical climate juncture, the radiology community has a unique and pressing obligation to consider our shared environmental responsibility in innovating clinical technology for patient care.},
}
@article {pmid38042609,
year = {2023},
author = {Shaikh, TA and Rasool, T and Verma, P},
title = {Machine intelligence and medical cyber-physical system architectures for smart healthcare: Taxonomy, challenges, opportunities, and possible solutions.},
journal = {Artificial intelligence in medicine},
volume = {146},
number = {},
pages = {102692},
doi = {10.1016/j.artmed.2023.102692},
pmid = {38042609},
issn = {1873-2860},
mesh = {Humans ; *Artificial Intelligence ; *Computer Security ; Delivery of Health Care ; Cloud Computing ; },
abstract = {Hospitals use medical cyber-physical systems (MCPS) more often to give patients quality continuous care. MCPS isa life-critical, context-aware, networked system of medical equipment. It has been challenging to achieve high assurance in system software, interoperability, context-aware intelligence, autonomy, security and privacy, and device certifiability due to the necessity to create complicated MCPS that are safe and efficient. The MCPS system is shown in the paper as a newly developed application case study of artificial intelligence in healthcare. Applications for various CPS-based healthcare systems are discussed, such as telehealthcare systems for managing chronic diseases (cardiovascular diseases, epilepsy, hearing loss, and respiratory diseases), supporting medication intake management, and tele-homecare systems. The goal of this study is to provide a thorough overview of the essential components of the MCPS from several angles, including design, methodology, and important enabling technologies, including sensor networks, the Internet of Things (IoT), cloud computing, and multi-agent systems. Additionally, some significant applications are investigated, such as smart cities, which are regarded as one of the key applications that will offer new services for industrial systems, transportation networks, energy distribution, monitoring of environmental changes, business and commerce applications, emergency response, and other social and recreational activities.The four levels of an MCPS's general architecture-data collecting, data aggregation, cloud processing, and action-are shown in this study. Different encryption techniques must be employed to ensure data privacy inside each layer due to the variations in hardware and communication capabilities of each layer. We compare established and new encryption techniques based on how well they support safe data exchange, secure computing, and secure storage. Our thorough experimental study of each method reveals that, although enabling innovative new features like secure sharing and safe computing, developing encryption approaches significantly increases computational and storage overhead. To increase the usability of newly developed encryption schemes in an MCPS and to provide a comprehensive list of tools and databases to assist other researchers, we provide a list of opportunities and challenges for incorporating machine intelligence-based MCPS in healthcare applications in our paper's conclusion.},
}
@article {pmid38039654,
year = {2024},
author = {Chen, X and Li, J and Chen, D and Zhou, Y and Tu, Z and Lin, M and Kang, T and Lin, J and Gong, T and Zhu, L and Zhou, J and Lin, OY and Guo, J and Dong, J and Guo, D and Qu, X},
title = {CloudBrain-MRS: An intelligent cloud computing platform for in vivo magnetic resonance spectroscopy preprocessing, quantification, and analysis.},
journal = {Journal of magnetic resonance (San Diego, Calif. : 1997)},
volume = {358},
number = {},
pages = {107601},
doi = {10.1016/j.jmr.2023.107601},
pmid = {38039654},
issn = {1096-0856},
mesh = {Humans ; *Cloud Computing ; *Artificial Intelligence ; Magnetic Resonance Spectroscopy/methods ; Magnetic Resonance Imaging/methods ; Software ; },
abstract = {Magnetic resonance spectroscopy (MRS) is an important clinical imaging method for diagnosis of diseases. MRS spectrum is used to observe the signal intensity of metabolites or further infer their concentrations. Although the magnetic resonance vendors commonly provide basic functions of spectrum plots and metabolite quantification, the spread of clinical research of MRS is still limited due to the lack of easy-to-use processing software or platform. To address this issue, we have developed CloudBrain-MRS, a cloud-based online platform that provides powerful hardware and advanced algorithms. The platform can be accessed simply through a web browser, without the need of any program installation on the user side. CloudBrain-MRS also integrates the classic LCModel and advanced artificial intelligence algorithms and supports batch preprocessing, quantification, and analysis of MRS data from different vendors. Additionally, the platform offers useful functions: (1) Automatically statistical analysis to find biomarkers for diseases; (2) Consistency verification between the classic and artificial intelligence quantification algorithms; (3) Colorful three-dimensional visualization for easy observation of individual metabolite spectrum. Last, data of both healthy subjects and patients with mild cognitive impairment are used to demonstrate the functions of the platform. To the best of our knowledge, this is the first cloud computing platform for in vivo MRS with artificial intelligence processing. We have shared our cloud platform at MRSHub, providing at least two years of free access and service. If you are interested, please visit https://mrshub.org/software_all/#CloudBrain-MRS or https://csrc.xmu.edu.cn/CloudBrain.html.},
}
@article {pmid38035280,
year = {2023},
author = {Zhao, K and Farrell, K and Mashiku, M and Abay, D and Tang, K and Oberste, MS and Burns, CC},
title = {A search-based geographic metadata curation pipeline to refine sequencing institution information and support public health.},
journal = {Frontiers in public health},
volume = {11},
number = {},
pages = {1254976},
pmid = {38035280},
issn = {2296-2565},
mesh = {*Metadata ; *Public Health ; High-Throughput Nucleotide Sequencing ; China ; United Kingdom ; },
abstract = {BACKGROUND: The National Center for Biotechnology Information (NCBI) Sequence Read Archive (SRA) has amassed a vast reservoir of genetic data since its inception in 2007. These public data hold immense potential for supporting pathogen surveillance and control. However, the lack of standardized metadata and inconsistent submission practices in SRA may impede the data's utility in public health.
METHODS: To address this issue, we introduce the Search-based Geographic Metadata Curation (SGMC) pipeline. SGMC utilized Python and web scraping to extract geographic data of sequencing institutions from NCBI SRA in the Cloud and its website. It then harnessed ChatGPT to refine the sequencing institution and location assignments. To illustrate the pipeline's utility, we examined the geographic distribution of the sequencing institutions and their countries relevant to polio eradication and categorized them.
RESULTS: SGMC successfully identified 7,649 sequencing institutions and their global locations from a random selection of 2,321,044 SRA accessions. These institutions were distributed across 97 countries, with strong representation in the United States, the United Kingdom and China. However, there was a lack of data from African, Central Asian, and Central American countries, indicating potential disparities in sequencing capabilities. Comparison with manually curated data for U.S. institutions reveals SGMC's accuracy rates of 94.8% for institutions, 93.1% for countries, and 74.5% for geographic coordinates.
CONCLUSION: SGMC may represent a novel approach using a generative AI model to enhance geographic data (country and institution assignments) for large numbers of samples within SRA datasets. This information can be utilized to bolster public health endeavors.},
}
@article {pmid38035195,
year = {2023},
author = {Olson, RH and Cohen Kalafut, N and Wang, D},
title = {MANGEM: A web app for multimodal analysis of neuronal gene expression, electrophysiology, and morphology.},
journal = {Patterns (New York, N.Y.)},
volume = {4},
number = {11},
pages = {100847},
pmid = {38035195},
issn = {2666-3899},
support = {P50 HD105353/HD/NICHD NIH HHS/United States ; R01 AG067025/AG/NIA NIH HHS/United States ; RF1 MH128695/MH/NIMH NIH HHS/United States ; },
abstract = {Single-cell techniques like Patch-seq have enabled the acquisition of multimodal data from individual neuronal cells, offering systematic insights into neuronal functions. However, these data can be heterogeneous and noisy. To address this, machine learning methods have been used to align cells from different modalities onto a low-dimensional latent space, revealing multimodal cell clusters. The use of those methods can be challenging without computational expertise or suitable computing infrastructure for computationally expensive methods. To address this, we developed a cloud-based web application, MANGEM (multimodal analysis of neuronal gene expression, electrophysiology, and morphology). MANGEM provides a step-by-step accessible and user-friendly interface to machine learning alignment methods of neuronal multimodal data. It can run asynchronously for large-scale data alignment, provide users with various downstream analyses of aligned cells, and visualize the analytic results. We demonstrated the usage of MANGEM by aligning multimodal data of neuronal cells in the mouse visual cortex.},
}
@article {pmid38027905,
year = {2023},
author = {Ait Abdelmoula, I and Idrissi Kaitouni, S and Lamrini, N and Jbene, M and Ghennioui, A and Mehdary, A and El Aroussi, M},
title = {Towards a sustainable edge computing framework for condition monitoring in decentralized photovoltaic systems.},
journal = {Heliyon},
volume = {9},
number = {11},
pages = {e21475},
pmid = {38027905},
issn = {2405-8440},
abstract = {In recent times, the rapid advancements in technology have led to a digital revolution in urban areas, and new computing frameworks are emerging to address the current issues in monitoring and fault detection, particularly in the context of the growing renewable decentralized energy systems. This research proposes a novel framework for monitoring the condition of decentralized photovoltaic systems within a smart city infrastructure. The approach uses edge computing to overcome the challenges associated with costly processing through remote cloud servers. By processing data at the edge of the network, this concept allows for significant gains in speed and bandwidth consumption, making it suitable for a sustainable city environment. In the proposed edge-learning scheme, several machine learning models are compared to find the best suitable model achieving both high accuracy and low latency in detecting photovoltaic faults. Four light and rapid machine learning models, namely, CBLOF, LOF, KNN, ANN, are selected as best performers and trained locally in decentralized edge nodes. The overall approach is deployed in a smart solar campus with multiple distributed PV units located in the R&D platform Green & Smart Building Park. Several experiments were conducted on different anomaly scenarios, and the models were evaluated based on their supervision method, f1-score, inference time, RAM usage, and model size. The paper also investigates the impact of the type of supervision and the class of the model on the anomaly detection performance. The findings indicated that the supervised artificial neural network (ANN) had superior performance compared to other models, obtaining an f1-score of 80 % even in the most unfavorable conditions. The findings also showed that KNN was the most suitable unsupervised model for the investigated experiments achieving good f1-scores (100 %, 95 % and 92 %) in 3 out of 4 scenarios making it a good candidate for similar anomaly detection tasks.},
}
@article {pmid38027596,
year = {2023},
author = {Mohammed, MA and Lakhan, A and Abdulkareem, KH and Khanapi Abd Ghani, M and Abdulameer Marhoon, H and Nedoma, J and Martinek, R},
title = {Multi-objectives reinforcement federated learning blockchain enabled Internet of things and Fog-Cloud infrastructure for transport data.},
journal = {Heliyon},
volume = {9},
number = {11},
pages = {e21639},
doi = {10.1016/j.heliyon.2023.e21639},
pmid = {38027596},
issn = {2405-8440},
abstract = {For the past decade, there has been a significant increase in customer usage of public transport applications in smart cities. These applications rely on various services, such as communication and computation, provided by additional nodes within the smart city environment. However, these services are delivered by a diverse range of cloud computing-based servers that are widely spread and heterogeneous, leading to cybersecurity becoming a crucial challenge among these servers. Numerous machine-learning approaches have been proposed in the literature to address the cybersecurity challenges in heterogeneous transport applications within smart cities. However, the centralized security and scheduling strategies suggested so far have yet to produce optimal results for transport applications. This work aims to present a secure decentralized infrastructure for transporting data in fog cloud networks. This paper introduces Multi-Objectives Reinforcement Federated Learning Blockchain (MORFLB) for Transport Infrastructure. MORFLB aims to minimize processing and transfer delays while maximizing long-term rewards by identifying known and unknown attacks on remote sensing data in-vehicle applications. MORFLB incorporates multi-agent policies, proof-of-work hashing validation, and decentralized deep neural network training to achieve minimal processing and transfer delays. It comprises vehicle applications, decentralized fog, and cloud nodes based on blockchain reinforcement federated learning, which improves rewards through trial and error. The study formulates a combinatorial problem that minimizes and maximizes various factors for vehicle applications. The experimental results demonstrate that MORFLB effectively reduces processing and transfer delays while maximizing rewards compared to existing studies. It provides a promising solution to address the cybersecurity challenges in intelligent transport applications within smart cities. In conclusion, this paper presents MORFLB, a combination of different schemes that ensure the execution of transport data under their constraints and achieve optimal results with the suggested decentralized infrastructure based on blockchain technology.},
}
@article {pmid38027579,
year = {2023},
author = {Guo, LL and Calligan, M and Vettese, E and Cook, S and Gagnidze, G and Han, O and Inoue, J and Lemmon, J and Li, J and Roshdi, M and Sadovy, B and Wallace, S and Sung, L},
title = {Development and validation of the SickKids Enterprise-wide Data in Azure Repository (SEDAR).},
journal = {Heliyon},
volume = {9},
number = {11},
pages = {e21586},
pmid = {38027579},
issn = {2405-8440},
abstract = {OBJECTIVES: To describe the processes developed by The Hospital for Sick Children (SickKids) to enable utilization of electronic health record (EHR) data by creating sequentially transformed schemas for use across multiple user types.
METHODS: We used Microsoft Azure as the cloud service provider and named this effort the SickKids Enterprise-wide Data in Azure Repository (SEDAR). Epic Clarity data from on-premises was copied to a virtual network in Microsoft Azure. Three sequential schemas were developed. The Filtered Schema added a filter to retain only SickKids and valid patients. The Curated Schema created a data structure that was easier to navigate and query. Each table contained a logical unit such as patients, hospital encounters or laboratory tests. Data validation of randomly sampled observations in the Curated Schema was performed. The SK-OMOP Schema was designed to facilitate research and machine learning. Two individuals mapped medical elements to standard Observational Medical Outcomes Partnership (OMOP) concepts.
RESULTS: A copy of Clarity data was transferred to Microsoft Azure and updated each night using log shipping. The Filtered Schema and Curated Schema were implemented as stored procedures and executed each night with incremental updates or full loads. Data validation required up to 16 iterations for each Curated Schema table. OMOP concept mapping achieved at least 80 % coverage for each SK-OMOP table.
CONCLUSIONS: We described our experience in creating three sequential schemas to address different EHR data access requirements. Future work should consider replicating this approach at other institutions to determine whether approaches are generalizable.},
}
@article {pmid38022923,
year = {2023},
author = {Han, J and Sun, R and Zeeshan, M and Rehman, A and Ullah, I},
title = {The impact of digital transformation on green total factor productivity of heavily polluting enterprises.},
journal = {Frontiers in psychology},
volume = {14},
number = {},
pages = {1265391},
pmid = {38022923},
issn = {1664-1078},
abstract = {INTRODUCTION: Digital transformation has become an important engine for economic high-quality development and environment high-level protection. However, green total factor productivity (GTFP), as an indicator that comprehensively reflects economic and environmental benefits, there is a lack of studies that analyze the effect of digital transformation on heavily polluting enterprises' GTFP from a micro perspective, and its impact mechanism is still unclear. Therefore, we aim to study the impact of digital transformation on heavily polluting enterprises' GTFP and its mechanism, and explore the heterogeneity of its impact.
METHODS: We use Chinese A-share listed enterprises in the heavily polluting industry data from 2007 to 2019, measure enterprise digital transformation indicator using text analysis, and measure enterprise GTFP indicator using the GML index based on SBM directional distance function, to investigate the impact of digital transformation on heavily polluting enterprises' GTFP.
RESULTS: Digital transformation can significantly enhance heavily polluting enterprises' GTFP, and this finding still holds after considering the endogenous problem and conducting robustness tests. Digital transformation can enhance heavily polluting enterprises' GTFP by promoting green innovation, improving management efficiency, and reducing external transaction costs. The improvement role of digital transformation on heavily polluting enterprises' GTFP is more obvious in the samples of non-state-owned enterprises, non-high-tech industries, and the eastern region. Compared with blockchain technology, artificial intelligence technology, cloud computing technology, big data technology, and digital technology application can significantly improve heavily polluting enterprises' GTFP.
DISCUSSION: Our paper breaks through the limitations of existing research, which not only theoretically enriches the literature related to digital transformation and GTFP, but also practically provides policy implications for continuously promoting heavily polluting enterprises' digital transformation and facilitating their high-quality development.},
}
@article {pmid38020047,
year = {2023},
author = {Ko, HYK and Tripathi, NK and Mozumder, C and Muengtaweepongsa, S and Pal, I},
title = {Real-Time Remote Patient Monitoring and Alarming System for Noncommunicable Lifestyle Diseases.},
journal = {International journal of telemedicine and applications},
volume = {2023},
number = {},
pages = {9965226},
pmid = {38020047},
issn = {1687-6415},
abstract = {Telemedicine and remote patient monitoring (RPM) systems have been gaining interest and received adaptation in healthcare sectors since the COVID-19 pandemic due to their efficiency and capability to deliver timely healthcare services while containing COVID-19 transmission. These systems were developed using the latest technology in wireless sensors, medical devices, cloud computing, mobile computing, telecommunications, and machine learning technologies. In this article, a real-time remote patient monitoring system is proposed with an accessible, compact, accurate, and low-cost design. The implemented system is designed to an end-to-end communication interface between medical practitioners and patients. The objective of this study is to provide remote healthcare services to patients who need ongoing care or those who have been discharged from the hospital without affecting their daily routines. The developed monitoring system was then evaluated on 1177 records from MIMIC-III clinical dataset (aged between 19 and 99 years). The performance analysis of the proposed system achieved 88.7% accuracy in generating alerts with logistic regression classification algorithm. This result reflects positively on the quality and robustness of the proposed study. Since the processing time of the proposed system is less than 2 minutes, it can be stated that the system has a high computational speed and is convenient to use in real-time monitoring. Furthermore, the proposed system will fulfil to cover the lower doctor-to-patient ratio by monitoring patients from remote locations and aged people who reside in their residences.},
}
@article {pmid38006682,
year = {2024},
author = {Geroski, T and Gkaintes, O and Vulović, A and Ukaj, N and Barrasa-Fano, J and Perez-Boerema, F and Milićević, B and Atanasijević, A and Živković, J and Živić, A and Roumpi, M and Exarchos, T and Hellmich, C and Scheiner, S and Van Oosterwyck, H and Jakovljević, D and Ivanović, M and Filipović, N},
title = {SGABU computational platform for multiscale modeling: Bridging the gap between education and research.},
journal = {Computer methods and programs in biomedicine},
volume = {243},
number = {},
pages = {107935},
doi = {10.1016/j.cmpb.2023.107935},
pmid = {38006682},
issn = {1872-7565},
mesh = {*Software ; *User-Computer Interface ; Computer Simulation ; Language ; Workflow ; Computational Biology/methods ; },
abstract = {BACKGROUND AND OBJECTIVE: In accordance with the latest aspirations in the field of bioengineering, there is a need to create a web accessible, but powerful cloud computational platform that combines datasets and multiscale models related to bone modeling, cancer, cardiovascular diseases and tissue engineering. The SGABU platform may become a powerful information system for research and education that can integrate data, extract information, and facilitate knowledge exchange with the goal of creating and developing appropriate computing pipelines to provide accurate and comprehensive biological information from the molecular to organ level.
METHODS: The datasets integrated into the platform are obtained from experimental and/or clinical studies and are mainly in tabular or image file format, including metadata. The implementation of multiscale models, is an ambitious effort of the platform to capture phenomena at different length scales, described using partial and ordinary differential equations, which are solved numerically on complex geometries with the use of the finite element method. The majority of the SGABU platform's simulation pipelines are provided as Common Workflow Language (CWL) workflows. Each of them requires creating a CWL implementation on the backend and a user-friendly interface using standard web technologies. Platform is available at https://sgabu-test.unic.kg.ac.rs/login.
RESULTS: The main dashboard of the SGABU platform is divided into sections for each field of research, each one of which includes a subsection of datasets and multiscale models. The datasets can be presented in a simple form as tabular data, or using technologies such as Plotly.js for 2D plot interactivity, Kitware Paraview Glance for 3D view. Regarding the models, the usage of Docker containerization for packing the individual tools and CWL orchestration for describing inputs with validation forms and outputs with tabular views for output visualization, interactive diagrams, 3D views and animations.
CONCLUSIONS: In practice, the structure of SGABU platform means that any of the integrated workflows can work equally well on any other bioengineering platform. The key advantage of the SGABU platform over similar efforts is its versatility offered with the use of modern, modular, and extensible technology for various levels of architecture.},
}
@article {pmid38005614,
year = {2023},
author = {Zhang, T and Jin, X and Bai, S and Peng, Y and Li, Y and Zhang, J},
title = {Smart Public Transportation Sensing: Enhancing Perception and Data Management for Efficient and Safety Operations.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {22},
pages = {},
pmid = {38005614},
issn = {1424-8220},
support = {No. KCXST20221021111201002//Science and Technology Innovation Committee of Shenzhen/ ; },
abstract = {The use of cloud computing, big data, IoT, and mobile applications in the public transportation industry has resulted in the generation of vast and complex data, of which the large data volume and data variety have posed several obstacles to effective data sensing and processing with high efficiency in a real-time data-driven public transportation management system. To overcome the above-mentioned challenges and to guarantee optimal data availability for data sensing and processing in public transportation perception, a public transportation sensing platform is proposed to collect, integrate, and organize diverse data from different data sources. The proposed data perception platform connects multiple data systems and some edge intelligent perception devices to enable the collection of various types of data, including traveling information of passengers and transaction data of smart cards. To enable the efficient extraction of precise and detailed traveling behavior, an efficient field-level data lineage exploration method is proposed during logical plan generation and is integrated into the FlinkSQL system seamlessly. Furthermore, a row-level fine-grained permission control mechanism is adopted to support flexible data management. With these two techniques, the proposed data management system can support efficient data processing on large amounts of data and conducts comprehensive analysis and application of business data from numerous different sources to realize the value of the data with high data safety. Through operational testing in real environments, the proposed platform has proven highly efficient and effective in managing organizational operations, data assets, data life cycle, offline development, and backend administration over a large amount of various types of public transportation traffic data.},
}
@article {pmid38005586,
year = {2023},
author = {Nugroho, AK and Shioda, S and Kim, T},
title = {Optimal Resource Provisioning and Task Offloading for Network-Aware and Federated Edge Computing.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {22},
pages = {},
pmid = {38005586},
issn = {1424-8220},
support = {2021R1F1A1059109//National Research Foundation of Korea/ ; Research Grant, 2022//Pusan National University/ ; },
abstract = {Compared to cloud computing, mobile edge computing (MEC) is a promising solution for delay-sensitive applications due to its proximity to end users. Because of its ability to offload resource-intensive tasks to nearby edge servers, MEC allows a diverse range of compute- and storage-intensive applications to operate on resource-constrained devices. The optimal utilization of MEC can lead to enhanced responsiveness and quality of service, but it requires careful design from the perspective of user-base station association, virtualized resource provisioning, and task distribution. Also, considering the limited exploration of the federation concept in the existing literature, its impacts on the allocation and management of resources still remain not widely recognized. In this paper, we study the network and MEC resource scheduling problem, where some edge servers are federated, limiting resource expansion within the same federations. The integration of network and MEC is crucial, emphasizing the necessity of a joint approach. In this work, we present NAFEOS, a proposed solution formulated as a two-stage algorithm that can effectively integrate association optimization with vertical and horizontal scaling. The Stage-1 problem optimizes the user-base station association and federation assignment so that the edge servers can be utilized in a balanced manner. The following Stage-2 dynamically schedules both vertical and horizontal scaling so that the fluctuating task-offloading demands from users are fulfilled. The extensive evaluations and comparison results show that the proposed approach can effectively achieve optimal resource utilization.},
}
@article {pmid38005558,
year = {2023},
author = {Oliveira, M and Chauhan, S and Pereira, F and Felgueiras, C and Carvalho, D},
title = {Blockchain Protocols and Edge Computing Targeting Industry 5.0 Needs.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {22},
pages = {},
pmid = {38005558},
issn = {1424-8220},
abstract = {"Industry 5.0" is the latest industrial revolution. A variety of cutting-edge technologies, including artificial intelligence, the Internet of Things (IoT), and others, come together to form it. Billions of devices are connected for high-speed data transfer, especially in a 5G-enabled industrial environment for information collection and processing. Most of the issues, such as access control mechanism, time to fetch the data from different devices, and protocols used, may not be applicable in the future as these protocols are based upon a centralized mechanism. This centralized mechanism may have a single point of failure along with the computational overhead. Thus, there is a need for an efficient decentralized access control mechanism for device-to-device (D2D) communication in various industrial sectors, for example, sensors in different regions may collect and process the data for making intelligent decisions. In such an environment, reliability, security, and privacy are major concerns as most of the solutions are based upon a centralized control mechanism. To mitigate the aforementioned issues, this paper provides the opportunities for and highlights some of the most impressive initiatives that help to curve the future. This new era will bring about significant changes in the way businesses operate, allowing them to become more cost-effective, more efficient, and produce higher-quality goods and services. As sensors are getting more accurate, cheaper, and have lower time responses, 5G networks are being integrated, and more industrial equipment and machinery are becoming available; hence, various sectors, including the manufacturing sector, are going through a significant period of transition right now. Additionally, the emergence of the cloud enables modern production models that use the cloud (both internal and external services), networks, and systems to leverage the cloud's low cost, scalability, increased computational power, real-time communication, and data transfer capabilities to create much smarter and more autonomous systems. We discuss the ways in which decentralized networks that make use of protocols help to achieve decentralization and how network meshes can grow to make things more secure, reliable, and cohere with these technologies, which are not going away anytime soon. We emphasize the significance of new design in regard to cybersecurity, data integrity, and storage by using straightforward examples that have the potential to lead to the excellence of distributed systems. This groundbreaking paper delves deep into the world of industrial automation and explores the possibilities to adopt blockchain for developing solutions for smart cities, smart homes, healthcare, smart agriculture, autonomous vehicles, and supply chain management within Industry 5.0. With an in-depth examination of various consensus mechanisms, readers gain a comprehensive understanding of the latest developments in this field. The paper also explores the current issues and challenges associated with blockchain adaptation for industrial automation and provides a thorough comparison of the available consensus, enabling end customers to select the most suitable one based on its unique advantages. Case studies highlight how to enable the adoption of blockchain in Industry 5.0 solutions effectively and efficiently, offering valuable insights into the potential challenges that lie ahead, particularly for smart industrial applications.},
}
@article {pmid38004827,
year = {2023},
author = {Kim, J and Koh, H},
title = {MiTree: A Unified Web Cloud Analytic Platform for User-Friendly and Interpretable Microbiome Data Mining Using Tree-Based Methods.},
journal = {Microorganisms},
volume = {11},
number = {11},
pages = {},
pmid = {38004827},
issn = {2076-2607},
support = {2021R1C1C1013861//National Research Foundation of Korea/ ; },
abstract = {The advent of next-generation sequencing has greatly accelerated the field of human microbiome studies. Currently, investigators are seeking, struggling and competing to find new ways to diagnose, treat and prevent human diseases through the human microbiome. Machine learning is a promising approach to help such an effort, especially due to the high complexity of microbiome data. However, many of the current machine learning algorithms are in a "black box", i.e., they are difficult to understand and interpret. In addition, clinicians, public health practitioners and biologists are not usually skilled at computer programming, and they do not always have high-end computing devices. Thus, in this study, we introduce a unified web cloud analytic platform, named MiTree, for user-friendly and interpretable microbiome data mining. MiTree employs tree-based learning methods, including decision tree, random forest and gradient boosting, that are well understood and suited to human microbiome studies. We also stress that MiTree can address both classification and regression problems through covariate-adjusted or unadjusted analysis. MiTree should serve as an easy-to-use and interpretable data mining tool for microbiome-based disease prediction modeling, and should provide new insights into microbiome-based diagnostics, treatment and prevention. MiTree is an open-source software that is available on our web server.},
}
@article {pmid37987882,
year = {2023},
author = {Bahadur, FT and Shah, SR and Nidamanuri, RR},
title = {Applications of remote sensing vis-à-vis machine learning in air quality monitoring and modelling: a review.},
journal = {Environmental monitoring and assessment},
volume = {195},
number = {12},
pages = {1502},
pmid = {37987882},
issn = {1573-2959},
mesh = {*Artificial Intelligence ; Remote Sensing Technology ; Environmental Monitoring ; *Air Pollution ; Machine Learning ; },
abstract = {Environmental contamination especially air pollution is an exponentially growing menace requiring immediate attention, as it lingers on with the associated risks of health, economic and ecological crisis. The special focus of this study is on the advances in Air Quality (AQ) monitoring using modern sensors, integrated monitoring systems, remote sensing and the usage of Machine Learning (ML), Deep Learning (DL) algorithms, artificial neural networks, recent computational techniques, hybridizing techniques and different platforms available for AQ modelling. The modern world is data-driven, where critical decisions are taken based on the available and accessible data. Today's data analytics is a consequence of the information explosion we have reached. The current research also tends to re-evaluate its scope with data analytics. The emergence of artificial intelligence and machine learning in the research scenario has radically changed the methodologies and approaches of modern research. The aim of this review is to assess the impact of data analytics such as ML/DL frameworks, data integration techniques, advanced statistical modelling, cloud computing platforms and constantly improving optimization algorithms on AQ research. The usage of remote sensing in AQ monitoring along with providing enormous datasets is constantly filling the spatial gaps of ground stations, as the long-term air pollutant dynamics is best captured by the panoramic view of satellites. Remote sensing coupled with the techniques of ML/DL has the most impact in shaping the modern trends in AQ research. Current standing of research in this field, emerging trends and future scope are also discussed.},
}
@article {pmid37979853,
year = {2024},
author = {Wilkinson, R and Mleczko, MM and Brewin, RJW and Gaston, KJ and Mueller, M and Shutler, JD and Yan, X and Anderson, K},
title = {Environmental impacts of earth observation data in the constellation and cloud computing era.},
journal = {The Science of the total environment},
volume = {909},
number = {},
pages = {168584},
doi = {10.1016/j.scitotenv.2023.168584},
pmid = {37979853},
issn = {1879-1026},
abstract = {Numbers of Earth Observation (EO) satellites have increased exponentially over the past decade reaching the current population of 1193 (January 2023). Consequently, EO data volumes have mushroomed and data storage and processing have migrated to the cloud. Whilst attention has been given to the launch and in-orbit environmental impacts of satellites, EO data environmental footprints have been overlooked. These issues require urgent attention given data centre water and energy consumption, high carbon emissions for computer component manufacture, and difficulty of recycling computer components. Doing so is essential if the environmental good of EO is to withstand scrutiny. We provide the first assessment of the EO data life-cycle and estimate that the current size of the global EO data collection is ~807 PB, increasing by ~100 PB/year. Storage of this data volume generates annual CO2 equivalent emissions of 4101 t. Major state-funded EO providers use 57 of their own data centres globally, and a further 178 private cloud services, with considerable duplication of datasets across repositories. We explore scenarios for the environmental cost of performing EO functions on the cloud compared to desktop machines. A simple band arithmetic function applied to a Landsat 9 scene using Google Earth Engine (GEE) generated CO2 equivalent (e) emissions of 0.042-0.69 g CO2e (locally) and 0.13-0.45 g CO2e (European data centre; values multiply by nine for Australian data centre). Computation-based emissions scale rapidly for more intense processes and when testing code. When using cloud services such as GEE, users have no choice about the data centre used and we push for EO providers to be more transparent about the location-specific impacts of EO work, and to provide tools for measuring the environmental cost of cloud computation. The EO community as a whole needs to critically consider the broad suite of EO data life-cycle impacts.},
}
@article {pmid37979340,
year = {2023},
author = {Tomassini, S and Falcionelli, N and Bruschi, G and Sbrollini, A and Marini, N and Sernani, P and Morettini, M and Müller, H and Dragoni, AF and Burattini, L},
title = {On-cloud decision-support system for non-small cell lung cancer histology characterization from thorax computed tomography scans.},
journal = {Computerized medical imaging and graphics : the official journal of the Computerized Medical Imaging Society},
volume = {110},
number = {},
pages = {102310},
doi = {10.1016/j.compmedimag.2023.102310},
pmid = {37979340},
issn = {1879-0771},
mesh = {Humans ; *Carcinoma, Non-Small-Cell Lung/diagnostic imaging/pathology ; *Lung Neoplasms/diagnostic imaging/pathology ; *Carcinoma, Squamous Cell/pathology ; Tomography, X-Ray Computed/methods ; ROC Curve ; },
abstract = {Non-Small Cell Lung Cancer (NSCLC) accounts for about 85% of all lung cancers. Developing non-invasive techniques for NSCLC histology characterization may not only help clinicians to make targeted therapeutic treatments but also prevent subjects from undergoing lung biopsy, which is challenging and could lead to clinical implications. The motivation behind the study presented here is to develop an advanced on-cloud decision-support system, named LUCY, for non-small cell LUng Cancer histologY characterization directly from thorax Computed Tomography (CT) scans. This aim was pursued by selecting thorax CT scans of 182 LUng ADenocarcinoma (LUAD) and 186 LUng Squamous Cell carcinoma (LUSC) subjects from four openly accessible data collections (NSCLC-Radiomics, NSCLC-Radiogenomics, NSCLC-Radiomics-Genomics and TCGA-LUAD), in addition to the implementation and comparison of two end-to-end neural networks (the core layer of whom is a convolutional long short-term memory layer), the performance evaluation on test dataset (NSCLC-Radiomics-Genomics) from a subject-level perspective in relation to NSCLC histological subtype location and grade, and the dynamic visual interpretation of the achieved results by producing and analyzing one heatmap video for each scan. LUCY reached test Area Under the receiver operating characteristic Curve (AUC) values above 77% in all NSCLC histological subtype location and grade groups, and a best AUC value of 97% on the entire dataset reserved for testing, proving high generalizability to heterogeneous data and robustness. Thus, LUCY is a clinically-useful decision-support system able to timely, non-invasively and reliably provide visually-understandable predictions on LUAD and LUSC subjects in relation to clinically-relevant information.},
}
@article {pmid37961077,
year = {2023},
author = {Kwabla, W and Dinc, F and Oumimoun, K and Kockara, S and Halic, T and Demirel, D and Arikatla, S and Ahmadi, S},
title = {Evaluation of WebRTC in the Cloud for Surgical Simulations: A case study on Virtual Rotator Cuff Arthroscopic Skill Trainer (ViRCAST).},
journal = {Learning and collaboration technologies : 10th International Conference, LCT 2023, held as part of the 25th HCI International Conference, HCII 2023, Copenhagen, Denmark, July 23-28, 2023, proceedings. Part II. LCT (Conference) (10th : 2...},
volume = {14041},
number = {},
pages = {127-143},
pmid = {37961077},
support = {R01 EB005807/EB/NIBIB NIH HHS/United States ; R01 EB025241/EB/NIBIB NIH HHS/United States ; P20 GM103429/GM/NIGMS NIH HHS/United States ; R44 AR075481/AR/NIAMS NIH HHS/United States ; R01 EB033674/EB/NIBIB NIH HHS/United States ; },
abstract = {Web Real-Time Communication (WebRTC) is an open-source technology which enables remote peer-to-peer video and audio connection. It has quickly become the new standard for real-time communications over the web and is commonly used as a video conferencing platform. In this study, we present a different application domain which may greatly benefit from WebRTC technology, that is virtual reality (VR) based surgical simulations. Virtual Rotator Cuff Arthroscopic Skill Trainer (ViRCAST) is our testing platform that we completed preliminary feasibility studies for WebRTC. Since the elasticity of cloud computing provides the ability to meet possible future hardware/software requirements and demand growth, ViRCAST is deployed in a cloud environment. Additionally, in order to have plausible simulations and interactions, any VR-based surgery simulator must have haptic feedback. Therefore, we implemented an interface to WebRTC for integrating haptic devices. We tested ViRCAST on Google cloud through haptic-integrated WebRTC at various client configurations. Our experiments showed that WebRTC with cloud and haptic integrations is a feasible solution for VR-based surgery simulators. From our experiments, the WebRTC integrated simulation produced an average frame rate of 33 fps, and the hardware integration produced an average lag of 0.7 milliseconds in real-time.},
}
@article {pmid37960657,
year = {2023},
author = {Farooq, MS and Abdullah, M and Riaz, S and Alvi, A and Rustam, F and Flores, MAL and Galán, JC and Samad, MA and Ashraf, I},
title = {A Survey on the Role of Industrial IoT in Manufacturing for Implementation of Smart Industry.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {21},
pages = {},
pmid = {37960657},
issn = {1424-8220},
support = {N/A//the European University of the Atlantic/ ; },
abstract = {The Internet of Things (IoT) is an innovative technology that presents effective and attractive solutions to revolutionize various domains. Numerous solutions based on the IoT have been designed to automate industries, manufacturing units, and production houses to mitigate human involvement in hazardous operations. Owing to the large number of publications in the IoT paradigm, in particular those focusing on industrial IoT (IIoT), a comprehensive survey is significantly important to provide insights into recent developments. This survey presents the workings of the IoT-based smart industry and its major components and proposes the state-of-the-art network infrastructure, including structured layers of IIoT architecture, IIoT network topologies, protocols, and devices. Furthermore, the relationship between IoT-based industries and key technologies is analyzed, including big data storage, cloud computing, and data analytics. A detailed discussion of IIoT-based application domains, smartphone application solutions, and sensor- and device-based IIoT applications developed for the management of the smart industry is also presented. Consequently, IIoT-based security attacks and their relevant countermeasures are highlighted. By analyzing the essential components, their security risks, and available solutions, future research directions regarding the implementation of IIoT are outlined. Finally, a comprehensive discussion of open research challenges and issues related to the smart industry is also presented.},
}
@article {pmid37960612,
year = {2023},
author = {Leng, J and Chen, X and Zhao, J and Wang, C and Zhu, J and Yan, Y and Zhao, J and Shi, W and Zhu, Z and Jiang, X and Lou, Y and Feng, C and Yang, Q and Xu, F},
title = {A Light Vehicle License-Plate-Recognition System Based on Hybrid Edge-Cloud Computing.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {21},
pages = {},
pmid = {37960612},
issn = {1424-8220},
support = {No.ZR2022MF289//Shandong Provincial Natural Science Foundation/ ; ZR2019MA037//Shandong Provincial Natural Science Foundation/ ; No.62271293//National Natural Science Foundation of China/ ; No.2021GXRC071//2021 Jinan City "20 New Universities" Support Project/ ; No.2021yb08//Qilu University of Technology 2021 Campus General Teaching Reform Project/ ; No. P202204//Qilu University of Technology 2022 Talent Training and Teaching Reform Project/ ; },
abstract = {With the world moving towards low-carbon and environmentally friendly development, the rapid growth of new-energy vehicles is evident. The utilization of deep-learning-based license-plate-recognition (LPR) algorithms has become widespread. However, existing LPR systems have difficulty achieving timely, effective, and energy-saving recognition due to their inherent limitations such as high latency and energy consumption. An innovative Edge-LPR system that leverages edge computing and lightweight network models is proposed in this paper. With the help of this technology, the excessive reliance on the computational capacity and the uneven implementation of resources of cloud computing can be successfully mitigated. The system is specifically a simple LPR. Channel pruning was used to reconstruct the backbone layer, reduce the network model parameters, and effectively reduce the GPU resource consumption. By utilizing the computing resources of the Intel second-generation computing stick, the network models were deployed on edge gateways to detect license plates directly. The reliability and effectiveness of the Edge-LPR system were validated through the experimental analysis of the CCPD standard dataset and real-time monitoring dataset from charging stations. The experimental results from the CCPD common dataset demonstrated that the network's total number of parameters was only 0.606 MB, with an impressive accuracy rate of 97%.},
}
@article {pmid37960584,
year = {2023},
author = {Younas, MI and Iqbal, MJ and Aziz, A and Sodhro, AH},
title = {Toward QoS Monitoring in IoT Edge Devices Driven Healthcare-A Systematic Literature Review.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {21},
pages = {},
pmid = {37960584},
issn = {1424-8220},
support = {2020VBC0002//PIFI 2020 (2020VBC0002), China/ ; },
mesh = {Humans ; *Artificial Intelligence ; Cloud Computing ; *Disasters ; Industry ; Delivery of Health Care ; },
abstract = {Smart healthcare is altering the delivery of healthcare by combining the benefits of IoT, mobile, and cloud computing. Cloud computing has tremendously helped the health industry connect healthcare facilities, caregivers, and patients for information sharing. The main drivers for implementing effective healthcare systems are low latency and faster response times. Thus, quick responses among healthcare organizations are important in general, but in an emergency, significant latency at different stakeholders might result in disastrous situations. Thus, cutting-edge approaches like edge computing and artificial intelligence (AI) can deal with such problems. A packet cannot be sent from one location to another unless the "quality of service" (QoS) specifications are met. The term QoS refers to how well a service works for users. QoS parameters like throughput, bandwidth, transmission delay, availability, jitter, latency, and packet loss are crucial in this regard. Our focus is on the individual devices present at different levels of the smart healthcare infrastructure and the QoS requirements of the healthcare system as a whole. The contribution of this paper is five-fold: first, a novel pre-SLR method for comprehensive keyword research on subject-related themes for mining pertinent research papers for quality SLR; second, SLR on QoS improvement in smart healthcare apps; third a review of several QoS techniques used in current smart healthcare apps; fourth, the examination of the most important QoS measures in contemporary smart healthcare apps; fifth, offering solutions to the problems encountered in delivering QoS in smart healthcare IoT applications to improve healthcare services.},
}
@article {pmid37960453,
year = {2023},
author = {Abbas, Q and Ahmad, G and Alyas, T and Alghamdi, T and Alsaawy, Y and Alzahrani, A},
title = {Revolutionizing Urban Mobility: IoT-Enhanced Autonomous Parking Solutions with Transfer Learning for Smart Cities.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {21},
pages = {},
pmid = {37960453},
issn = {1424-8220},
abstract = {Smart cities have emerged as a specialized domain encompassing various technologies, transitioning from civil engineering to technology-driven solutions. The accelerated development of technologies, such as the Internet of Things (IoT), software-defined networks (SDN), 5G, artificial intelligence, cognitive science, and analytics, has played a crucial role in providing solutions for smart cities. Smart cities heavily rely on devices, ad hoc networks, and cloud computing to integrate and streamline various activities towards common goals. However, the complexity arising from multiple cloud service providers offering myriad services necessitates a stable and coherent platform for sustainable operations. The Smart City Operational Platform Ecology (SCOPE) model has been developed to address the growing demands, and incorporates machine learning, cognitive correlates, ecosystem management, and security. SCOPE provides an ecosystem that establishes a balance for achieving sustainability and progress. In the context of smart cities, Internet of Things (IoT) devices play a significant role in enabling automation and data capture. This research paper focuses on a specific module of SCOPE, which deals with data processing and learning mechanisms for object identification in smart cities. Specifically, it presents a car parking system that utilizes smart identification techniques to identify vacant slots. The learning controller in SCOPE employs a two-tier approach, and utilizes two different models, namely Alex Net and YOLO, to ensure procedural stability and improvement.},
}
@article {pmid37954389,
year = {2023},
author = {Biswas, J and Jobaer, MA and Haque, SF and Islam Shozib, MS and Limon, ZA},
title = {Mapping and monitoring land use land cover dynamics employing Google Earth Engine and machine learning algorithms on Chattogram, Bangladesh.},
journal = {Heliyon},
volume = {9},
number = {11},
pages = {e21245},
pmid = {37954389},
issn = {2405-8440},
abstract = {Land use land cover change (LULC) significantly impacts urban sustainability, urban planning, climate change, natural resource management, and biodiversity. The Chattogram Metropolitan Area (CMA) has been going through rapid urbanization, which has impacted the LULC transformation and accelerated the growth of urban sprawl and unplanned development. To map those urban sprawls and natural resources depletion, this study aims to monitor the LULC change using Landsat satellite imagery from 2003 to 2023 in the cloud-based remote sensing platform Google Earth Engine (GEE). LULC has been classified into five distinct classes: waterbody, build-up, bare land, dense vegetation, and cropland, employing four machine learning algorithms (random forest, gradient tree boost, classification & regression tree, and support vector machine) in the GEE platform. The overall accuracy (kappa statistics) and the receiver operating characteristic (ROC) curve have demonstrated satisfactory results. The results indicate that the CART model outperforms other LULC models when considering efficiency and accuracy in the designated study region. The analysis of LULC conversions revealed notable trends, patterns, and magnitudes across all periods: 2003-2013, 2013-2023, and 2003-2023. The expansion of unregulated built-up areas and the decline of croplands emerged as primary concerns. However, there was a positive indication of a significant increase in dense vegetation within the study area over the 20 years.},
}
@article {pmid37946898,
year = {2023},
author = {Healthcare Engineering, JO},
title = {Retracted: Sports Training Teaching Device Based on Big Data and Cloud Computing.},
journal = {Journal of healthcare engineering},
volume = {2023},
number = {},
pages = {9795604},
pmid = {37946898},
issn = {2040-2309},
abstract = {[This retracts the article DOI: 10.1155/2021/7339486.].},
}
@article {pmid37946860,
year = {2023},
author = {Intelligence And Neuroscience, C},
title = {Retracted: Real-Time Detection of Body Nutrition in Sports Training Based on Cloud Computing and Somatosensory Network.},
journal = {Computational intelligence and neuroscience},
volume = {2023},
number = {},
pages = {9784817},
pmid = {37946860},
issn = {1687-5273},
abstract = {[This retracts the article DOI: 10.1155/2022/9911905.].},
}
@article {pmid37942151,
year = {2023},
author = {Faruqui, N and Yousuf, MA and Kateb, FA and Abdul Hamid, M and Monowar, MM},
title = {Healthcare As a Service (HAAS): CNN-based cloud computing model for ubiquitous access to lung cancer diagnosis.},
journal = {Heliyon},
volume = {9},
number = {11},
pages = {e21520},
pmid = {37942151},
issn = {2405-8440},
abstract = {The field of automated lung cancer diagnosis using Computed Tomography (CT) scans has been significantly advanced by the precise predictions offered by Convolutional Neural Network (CNN)-based classifiers. Critical areas of study include improving image quality, optimizing learning algorithms, and enhancing diagnostic accuracy. To facilitate a seamless transition from research laboratories to real-world applications, it is crucial to improve the technology's usability-a factor often neglected in current state-of-the-art research. Yet, current state-of-the-art research in this field frequently overlooks the need for expediting this process. This paper introduces Healthcare-As-A-Service (HAAS), an innovative concept inspired by Software-As-A-Service (SAAS) within the cloud computing paradigm. As a comprehensive lung cancer diagnosis service system, HAAS has the potential to reduce lung cancer mortality rates by providing early diagnosis opportunities to everyone. We present HAASNet, a cloud-compatible CNN that boasts an accuracy rate of 96.07%. By integrating HAASNet predictions with physio-symptomatic data from the Internet of Medical Things (IoMT), the proposed HAAS model generates accurate and reliable lung cancer diagnosis reports. Leveraging IoMT and cloud technology, the proposed service is globally accessible via the Internet, transcending geographic boundaries. This groundbreaking lung cancer diagnosis service achieves average precision, recall, and F1-scores of 96.47%, 95.39%, and 94.81%, respectively.},
}
@article {pmid37941779,
year = {2023},
author = {Wang, C and Dai, W},
title = {Lung nodule segmentation via semi-residual multi-resolution neural networks.},
journal = {Open life sciences},
volume = {18},
number = {1},
pages = {20220727},
pmid = {37941779},
issn = {2391-5412},
abstract = {The integration of deep neural networks and cloud computing has become increasingly prevalent within the domain of medical image processing, facilitated by the recent strides in neural network theory and the advent of the internet of things (IoTs). This juncture has led to the emergence of numerous image segmentation networks and innovative solutions that facilitate medical practitioners in diagnosing lung cancer. Within the contours of this study, we present an end-to-end neural network model, christened as the "semi-residual Multi-resolution Convolutional Neural Network" (semi-residual MCNN), devised to engender precise lung nodule segmentation maps within the milieu of cloud computing. Central to the architecture are three pivotal features, each coalescing to effectuate a notable enhancement in predictive accuracy: the incorporation of semi-residual building blocks, the deployment of group normalization techniques, and the orchestration of multi-resolution output heads. This innovative model is systematically subjected to rigorous training and testing regimes, using the LIDC-IDRI dataset - a widely embraced and accessible repository - comprising a diverse ensemble of 1,018 distinct lung CT images tailored to the realm of lung nodule segmentation.},
}
@article {pmid37937074,
year = {2023},
author = {Wadford, DA and Baumrind, N and Baylis, EF and Bell, JM and Bouchard, EL and Crumpler, M and Foote, EM and Gilliam, S and Glaser, CA and Hacker, JK and Ledin, K and Messenger, SL and Morales, C and Smith, EA and Sevinsky, JR and Corbett-Detig, RB and DeRisi, J and Jacobson, K},
title = {Implementation of California COVIDNet - a multi-sector collaboration for statewide SARS-CoV-2 genomic surveillance.},
journal = {Frontiers in public health},
volume = {11},
number = {},
pages = {1249614},
pmid = {37937074},
issn = {2296-2565},
support = {U01 CK000539/CK/NCEZID CDC HHS/United States ; U01CK000539/ACL/ACL HHS/United States ; },
mesh = {Humans ; *SARS-CoV-2/genetics ; *COVID-19/epidemiology ; Genomics ; California/epidemiology ; Data Management ; },
abstract = {INTRODUCTION: The SARS-CoV-2 pandemic represented a formidable scientific and technological challenge to public health due to its rapid spread and evolution. To meet these challenges and to characterize the virus over time, the State of California established the California SARS-CoV-2 Whole Genome Sequencing (WGS) Initiative, or "California COVIDNet". This initiative constituted an unprecedented multi-sector collaborative effort to achieve large-scale genomic surveillance of SARS-CoV-2 across California to monitor the spread of variants within the state, to detect new and emerging variants, and to characterize outbreaks in congregate, workplace, and other settings.
METHODS: California COVIDNet consists of 50 laboratory partners that include public health laboratories, private clinical diagnostic laboratories, and academic sequencing facilities as well as expert advisors, scientists, consultants, and contractors. Data management, sample sourcing and processing, and computational infrastructure were major challenges that had to be resolved in the midst of the pandemic chaos in order to conduct SARS-CoV-2 genomic surveillance. Data management, storage, and analytics needs were addressed with both conventional database applications and newer cloud-based data solutions, which also fulfilled computational requirements.
RESULTS: Representative and randomly selected samples were sourced from state-sponsored community testing sites. Since March of 2021, California COVIDNet partners have contributed more than 450,000 SARS-CoV-2 genomes sequenced from remnant samples from both molecular and antigen tests. Combined with genomes from CDC-contracted WGS labs, there are currently nearly 800,000 genomes from all 61 local health jurisdictions (LHJs) in California in the COVIDNet sequence database. More than 5% of all reported positive tests in the state have been sequenced, with similar rates of sequencing across 5 major geographic regions in the state.
DISCUSSION: Implementation of California COVIDNet revealed challenges and limitations in the public health system. These were overcome by engaging in novel partnerships that established a successful genomic surveillance program which provided valuable data to inform the COVID-19 public health response in California. Significantly, California COVIDNet has provided a foundational data framework and computational infrastructure needed to respond to future public health crises.},
}
@article {pmid37933859,
year = {2024},
author = {Varadi, M and Bertoni, D and Magana, P and Paramval, U and Pidruchna, I and Radhakrishnan, M and Tsenkov, M and Nair, S and Mirdita, M and Yeo, J and Kovalevskiy, O and Tunyasuvunakool, K and Laydon, A and Žídek, A and Tomlinson, H and Hariharan, D and Abrahamson, J and Green, T and Jumper, J and Birney, E and Steinegger, M and Hassabis, D and Velankar, S},
title = {AlphaFold Protein Structure Database in 2024: providing structure coverage for over 214 million protein sequences.},
journal = {Nucleic acids research},
volume = {52},
number = {D1},
pages = {D368-D375},
pmid = {37933859},
issn = {1362-4962},
support = {//Google DeepMind/ ; 2019R1A6A1A10073437//National Research Foundation of Korea/ ; //Samsung DS Research Fund/ ; //Seoul National University/ ; RS-2023-00250470//National Research Foundation of Korea/ ; },
mesh = {Amino Acid Sequence ; *Artificial Intelligence ; Databases, Protein ; *Proteome ; Search Engine ; Proteins/chemistry ; *Protein Structure, Secondary ; },
abstract = {The AlphaFold Database Protein Structure Database (AlphaFold DB, https://alphafold.ebi.ac.uk) has significantly impacted structural biology by amassing over 214 million predicted protein structures, expanding from the initial 300k structures released in 2021. Enabled by the groundbreaking AlphaFold2 artificial intelligence (AI) system, the predictions archived in AlphaFold DB have been integrated into primary data resources such as PDB, UniProt, Ensembl, InterPro and MobiDB. Our manuscript details subsequent enhancements in data archiving, covering successive releases encompassing model organisms, global health proteomes, Swiss-Prot integration, and a host of curated protein datasets. We detail the data access mechanisms of AlphaFold DB, from direct file access via FTP to advanced queries using Google Cloud Public Datasets and the programmatic access endpoints of the database. We also discuss the improvements and services added since its initial release, including enhancements to the Predicted Aligned Error viewer, customisation options for the 3D viewer, and improvements in the search engine of AlphaFold DB.},
}
@article {pmid37932347,
year = {2023},
author = {Bao, J and Wu, C and Lin, Y and Zhong, L and Chen, X and Yin, R},
title = {A scalable approach to optimize traffic signal control with federated reinforcement learning.},
journal = {Scientific reports},
volume = {13},
number = {1},
pages = {19184},
pmid = {37932347},
issn = {2045-2322},
abstract = {Intelligent Transportation has seen significant advancements with Deep Learning and the Internet of Things, making Traffic Signal Control (TSC) research crucial for reducing congestion, travel time, emissions, and energy consumption. Reinforcement Learning (RL) has emerged as the primary method for TSC, but centralized learning poses communication and computing challenges, while distributed learning struggles to adapt across intersections. This paper presents a novel approach using Federated Learning (FL)-based RL for TSC. FL integrates knowledge from local agents into a global model, overcoming intersection variations with a unified agent state structure. To endow the model with the capacity to globally represent the TSC task while preserving the distinctive feature information inherent to each intersection, a segment of the RL neural network is aggregated to the cloud, and the remaining layers undergo fine-tuning upon convergence of the model training process. Extensive experiments demonstrate reduced queuing and waiting times globally, and the successful scalability of the proposed model is validated on a real-world traffic network in Monaco, showing its potential for new intersections.},
}
@article {pmid37932308,
year = {2023},
author = {Mangalampalli, S and Karri, GR and Mohanty, SN and Ali, S and Khan, MI and Abduvalieva, D and Awwad, FA and Ismail, EAA},
title = {Fault tolerant trust based task scheduler using Harris Hawks optimization and deep reinforcement learning in multi cloud environment.},
journal = {Scientific reports},
volume = {13},
number = {1},
pages = {19179},
pmid = {37932308},
issn = {2045-2322},
abstract = {Cloud Computing model provides on demand delivery of seamless services to customers around the world yet single point of failures occurs in cloud model due to improper assignment of tasks to precise virtual machines which leads to increase in rate of failures which effects SLA based trust parameters (Availability, success rate, turnaround efficiency) upon which impacts trust on cloud provider. In this paper, we proposed a task scheduling algorithm which captures priorities of all tasks, virtual resources from task manager which comes onto cloud application console are fed to task scheduler which takes scheduling decisions based on hybridization of both Harris hawk optimization and ML based reinforcement algorithms to enhance the scheduling process. Task scheduling in this research performed in two phases i.e. Task selection and task mapping phases. In task selection phase, all incoming priorities of tasks, VMs are captured and generates schedules using Harris hawks optimization. In task mapping phase, generated schedules are optimized using a DQN model which is based on deep reinforcement learning. In this research, we used multi cloud environment to tackle availability of VMs if there is an increase in upcoming tasks dynamically and migrate tasks to one cloud to another to mitigate migration time. Extensive simulations are conducted in Cloudsim and workload generated by fabricated datasets and realtime synthetic workloads from NASA, HPC2N are used to check efficacy of our proposed scheduler (FTTHDRL). It compared against existing task schedulers i.e. MOABCQ, RATS-HM, AINN-BPSO approaches and our proposed FTTHDRL outperforms existing mechanisms by minimizing rate of failures, resource cost, improved SLA based trust parameters.},
}
@article {pmid37928198,
year = {2023},
author = {Mee, L and Barribeau, SM},
title = {Influence of social lifestyles on host-microbe symbioses in the bees.},
journal = {Ecology and evolution},
volume = {13},
number = {11},
pages = {e10679},
pmid = {37928198},
issn = {2045-7758},
abstract = {Microbiomes are increasingly recognised as critical for the health of an organism. In eusocial insect societies, frequent social interactions allow for high-fidelity transmission of microbes across generations, leading to closer host-microbe coevolution. The microbial communities of bees with other social lifestyles are less studied, and few comparisons have been made between taxa that vary in social structure. To address this gap, we leveraged a cloud-computing resource and publicly available transcriptomic data to conduct a survey of microbial diversity in bee samples from a variety of social lifestyles and taxa. We consistently recover the core microbes of well-studied corbiculate bees, supporting this method's ability to accurately characterise microbial communities. We find that the bacterial communities of bees are influenced by host location, phylogeny and social lifestyle, although no clear effect was found for fungal or viral microbial communities. Bee genera with more complex societies tend to harbour more diverse microbes, with Wolbachia detected more commonly in solitary tribes. We present a description of the microbiota of Euglossine bees and find that they do not share the "corbiculate core" microbiome. Notably, we find that bacteria with known anti-pathogenic properties are present across social bee genera, suggesting that symbioses that enhance host immunity are important with higher sociality. Our approach provides an inexpensive means of exploring microbiomes of a given taxa and identifying avenues for further research. These findings contribute to our understanding of the relationships between bees and their associated microbial communities, highlighting the importance of considering microbiome dynamics in investigations of bee health.},
}
@article {pmid37917778,
year = {2023},
author = {Qian, J and She, Q},
title = {The impact of corporate digital transformation on the export product quality: Evidence from Chinese enterprises.},
journal = {PloS one},
volume = {18},
number = {11},
pages = {e0293461},
pmid = {37917778},
issn = {1932-6203},
abstract = {The digital economy has become a driving force in the rapid development of the global economy and the promotion of export trade. Pivotal in its advent, the digital transformation of enterprises utilizes cloud computing, big data, artificial intelligence, and other digital technologies to provide an impetus for evolution and transformation in various industries and fields. in enhancing quality and efficiency. This has been critical for enhancing both quality and efficiency in enterprises based in the People's Republic of China. Through the available data on its listed enterprises, this paper measures their digital transformation through a textual analysis and examines how this transformation influences their export product quality. We then explore the possible mechanisms at work in this influence from the perspective of enterprise heterogeneity. The results find that: (1) Digital transformation significantly enhances the export product quality in an enterprises, and the empirical findings still hold after a series of robustness tests; (2) Further mechanism analysis reveals that the digital transformation can positively affect export product quality through the two mechanisms of process productivity (φ), the ability to produce output using fewer variable inputs, and product productivity (ξ), the ability to produce quality with fewer fixed outlays; (3) In terms of enterprise heterogeneity, the impact of digital transformation on export product quality is significant for enterprises engaged in general trade or high-tech industries and those with strong corporate governance. In terms of heterogeneity in digital transformation of enterprise and the regional digital infrastructure level, the higher the level of digital transformation and regional digital infrastructure, the greater the impact of digital transformation on export product quality. This paper has practical implications for public policies that offer vital aid to enterprises as they seek digital transformation to remain sync with the digital economy, upgrade their product quality, and drive the sustainable, high-quality, and healthy development of their nation's economy.},
}
@article {pmid37905003,
year = {2023},
author = {Copeland, CJ and Roddy, JW and Schmidt, AK and Secor, PR and Wheeler, TJ},
title = {VIBES: A Workflow for Annotating and Visualizing Viral Sequences Integrated into Bacterial Genomes.},
journal = {bioRxiv : the preprint server for biology},
volume = {},
number = {},
pages = {},
pmid = {37905003},
support = {R01 AI138981/AI/NIAID NIH HHS/United States ; R01 GM132600/GM/NIGMS NIH HHS/United States ; },
abstract = {Bacteriophages are viruses that infect bacteria. Many bacteriophages integrate their genomes into the bacterial chromosome and become prophages. Prophages may substantially burden or benefit host bacteria fitness, acting in some cases as parasites and in others as mutualists, and have been demonstrated to increase host virulence. The increasing ease of bacterial genome sequencing provides an opportunity to deeply explore prophage prevalence and insertion sites. Here we present VIBES, a workflow intended to automate prophage annotation in complete bacterial genome sequences. VIBES provides additional context to prophage annotations by annotating bacterial genes and viral proteins in user-provided bacterial and viral genomes. The VIBES pipeline is implemented as a Nextflow-driven workflow, providing a simple, unified interface for execution on local, cluster, and cloud computing environments. For each step of the pipeline, a container including all necessary software dependencies is provided. VIBES produces results in simple tab separated format and generates intuitive and interactive visualizations for data exploration. Despite VIBES' primary emphasis on prophage annotation, its generic alignment-based design allows it to be deployed as a general-purpose sequence similarity search manager. We demonstrate the utility of the VIBES prophage annotation workflow by searching for 178 Pf phage genomes across 1,072 Pseudomonas spp. genomes. VIBES software is available at https://github.com/TravisWheelerLab/VIBES.},
}
@article {pmid37899771,
year = {2023},
author = {Cai, T and Herner, K and Yang, T and Wang, M and Acosta Flechas, M and Harris, P and Holzman, B and Pedro, K and Tran, N},
title = {Accelerating Machine Learning Inference with GPUs in ProtoDUNE Data Processing.},
journal = {Computing and software for big science},
volume = {7},
number = {1},
pages = {11},
pmid = {37899771},
issn = {2510-2044},
abstract = {We study the performance of a cloud-based GPU-accelerated inference server to speed up event reconstruction in neutrino data batch jobs. Using detector data from the ProtoDUNE experiment and employing the standard DUNE grid job submission tools, we attempt to reprocess the data by running several thousand concurrent grid jobs, a rate we expect to be typical of current and future neutrino physics experiments. We process most of the dataset with the GPU version of our processing algorithm and the remainder with the CPU version for timing comparisons. We find that a 100-GPU cloud-based server is able to easily meet the processing demand, and that using the GPU version of the event processing algorithm is two times faster than processing these data with the CPU version when comparing to the newest CPUs in our sample. The amount of data transferred to the inference server during the GPU runs can overwhelm even the highest-bandwidth network switches, however, unless care is taken to observe network facility limits or otherwise distribute the jobs to multiple sites. We discuss the lessons learned from this processing campaign and several avenues for future improvements.},
}
@article {pmid37898096,
year = {2023},
author = {Horsley, JJ and Thomas, RH and Chowdhury, FA and Diehl, B and McEvoy, AW and Miserocchi, A and de Tisi, J and Vos, SB and Walker, MC and Winston, GP and Duncan, JS and Wang, Y and Taylor, PN},
title = {Complementary structural and functional abnormalities to localise epileptogenic tissue.},
journal = {EBioMedicine},
volume = {97},
number = {},
pages = {104848},
pmid = {37898096},
issn = {2352-3964},
support = {/WT_/Wellcome Trust/United Kingdom ; MR/T04294X/1/MRC_/Medical Research Council/United Kingdom ; U01 NS090407/NS/NINDS NIH HHS/United States ; },
mesh = {Humans ; Retrospective Studies ; *Epilepsy/diagnostic imaging/surgery ; Electroencephalography/methods ; Electrocorticography ; *Drug Resistant Epilepsy/surgery ; Seizures ; },
abstract = {BACKGROUND: When investigating suitability for epilepsy surgery, people with drug-refractory focal epilepsy may have intracranial EEG (iEEG) electrodes implanted to localise seizure onset. Diffusion-weighted magnetic resonance imaging (dMRI) may be acquired to identify key white matter tracts for surgical avoidance. Here, we investigate whether structural connectivity abnormalities, inferred from dMRI, may be used in conjunction with functional iEEG abnormalities to aid localisation of the epileptogenic zone (EZ), improving surgical outcomes in epilepsy.
METHODS: We retrospectively investigated data from 43 patients (42% female) with epilepsy who had surgery following iEEG. Twenty-five patients (58%) were free from disabling seizures (ILAE 1 or 2) at one year. Interictal iEEG functional, and dMRI structural connectivity abnormalities were quantified by comparison to a normative map and healthy controls. We explored whether the resection of maximal abnormalities related to improved surgical outcomes, in both modalities individually and concurrently. Additionally, we suggest how connectivity abnormalities may inform the placement of iEEG electrodes pre-surgically using a patient case study.
FINDINGS: Seizure freedom was 15 times more likely in patients with resection of maximal connectivity and iEEG abnormalities (p = 0.008). Both modalities separately distinguished patient surgical outcome groups and when used simultaneously, a decision tree correctly separated 36 of 43 (84%) patients.
INTERPRETATION: Our results suggest that both connectivity and iEEG abnormalities may localise epileptogenic tissue, and that these two modalities may provide complementary information in pre-surgical evaluations.
FUNDING: This research was funded by UKRI, CDT in Cloud Computing for Big Data, NIH, MRC, Wellcome Trust and Epilepsy Research UK.},
}
@article {pmid37896735,
year = {2023},
author = {Ramzan, M and Shoaib, M and Altaf, A and Arshad, S and Iqbal, F and Castilla, ÁK and Ashraf, I},
title = {Distributed Denial of Service Attack Detection in Network Traffic Using Deep Learning Algorithm.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {20},
pages = {},
pmid = {37896735},
issn = {1424-8220},
support = {N/A//the European University of Atlantic/ ; },
abstract = {Internet security is a major concern these days due to the increasing demand for information technology (IT)-based platforms and cloud computing. With its expansion, the Internet has been facing various types of attacks. Viruses, denial of service (DoS) attacks, distributed DoS (DDoS) attacks, code injection attacks, and spoofing are the most common types of attacks in the modern era. Due to the expansion of IT, the volume and severity of network attacks have been increasing lately. DoS and DDoS are the most frequently reported network traffic attacks. Traditional solutions such as intrusion detection systems and firewalls cannot detect complex DDoS and DoS attacks. With the integration of artificial intelligence-based machine learning and deep learning methods, several novel approaches have been presented for DoS and DDoS detection. In particular, deep learning models have played a crucial role in detecting DDoS attacks due to their exceptional performance. This study adopts deep learning models including recurrent neural network (RNN), long short-term memory (LSTM), and gradient recurrent unit (GRU) to detect DDoS attacks on the most recent dataset, CICDDoS2019, and a comparative analysis is conducted with the CICIDS2017 dataset. The comparative analysis contributes to the development of a competent and accurate method for detecting DDoS attacks with reduced execution time and complexity. The experimental results demonstrate that models perform equally well on the CICDDoS2019 dataset with an accuracy score of 0.99, but there is a difference in execution time, with GRU showing less execution time than those of RNN and LSTM.},
}
@article {pmid37896596,
year = {2023},
author = {Sheu, RK and Lin, YC and Pardeshi, MS and Huang, CY and Pai, KC and Chen, LC and Huang, CC},
title = {Adaptive Autonomous Protocol for Secured Remote Healthcare Using Fully Homomorphic Encryption (AutoPro-RHC).},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {20},
pages = {},
pmid = {37896596},
issn = {1424-8220},
support = {Grant MOST 111-2321-B-075A-001//Ministry of Science and Technology/ ; },
mesh = {Humans ; *Blood Glucose Self-Monitoring ; *Computer Security ; Blood Glucose ; Confidentiality ; Privacy ; Delivery of Health Care ; },
abstract = {The outreach of healthcare services is a challenge to remote areas with affected populations. Fortunately, remote health monitoring (RHM) has improved the hospital service quality and has proved its sustainable growth. However, the absence of security may breach the health insurance portability and accountability act (HIPAA), which has an exclusive set of rules for the privacy of medical data. Therefore, the goal of this work is to design and implement the adaptive Autonomous Protocol (AutoPro) on the patient's remote healthcare (RHC) monitoring data for the hospital using fully homomorphic encryption (FHE). The aim is to perform adaptive autonomous FHE computations on recent RHM data for providing health status reporting and maintaining the confidentiality of every patient. The autonomous protocol works independently within the group of prime hospital servers without the dependency on the third-party system. The adaptiveness of the protocol modes is based on the patient's affected level of slight, medium, and severe cases. Related applications are given as glucose monitoring for diabetes, digital blood pressure for stroke, pulse oximeter for COVID-19, electrocardiogram (ECG) for cardiac arrest, etc. The design for this work consists of an autonomous protocol, hospital servers combining multiple prime/local hospitals, and an algorithm based on fast fully homomorphic encryption over the torus (TFHE) library with a ring-variant by the Gentry, Sahai, and Waters (GSW) scheme. The concrete-ML model used within this work is trained using an open heart disease dataset from the UCI machine learning repository. Preprocessing is performed to recover the lost and incomplete data in the dataset. The concrete-ML model is evaluated both on the workstation and cloud server. Also, the FHE protocol is implemented on the AWS cloud network with performance details. The advantages entail providing confidentiality to the patient's data/report while saving the travel and waiting time for the hospital services. The patient's data will be completely confidential and can receive emergency services immediately. The FHE results show that the highest accuracy is achieved by support vector classification (SVC) of 88% and linear regression (LR) of 86% with the area under curve (AUC) of 91% and 90%, respectively. Ultimately, the FHE-based protocol presents a novel system that is successfully demonstrated on the cloud network.},
}
@article {pmid37896541,
year = {2023},
author = {Ramachandran, D and Naqi, SM and Perumal, G and Abbas, Q},
title = {DLTN-LOSP: A Novel Deep-Linear-Transition-Network-Based Resource Allocation Model with the Logic Overhead Security Protocol for Cloud Systems.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {20},
pages = {},
pmid = {37896541},
issn = {1424-8220},
support = {IMSIU-RP23067//Deanship of Scientific Research at Imam Mohammad Ibn Saud Islamic University (IMSIU)/ ; },
abstract = {Cloud organizations now face a challenge in managing the enormous volume of data and various resources in the cloud due to the rapid growth of the virtualized environment with many service users, ranging from small business owners to large corporations. The performance of cloud computing may suffer from ineffective resource management. As a result, resources must be distributed fairly among various stakeholders without sacrificing the organization's profitability or the satisfaction of its customers. A customer's request cannot be put on hold indefinitely just because the necessary resources are not available on the board. Therefore, a novel cloud resource allocation model incorporating security management is developed in this paper. Here, the Deep Linear Transition Network (DLTN) mechanism is developed for effectively allocating resources to cloud systems. Then, an Adaptive Mongoose Optimization Algorithm (AMOA) is deployed to compute the beamforming solution for reward prediction, which supports the process of resource allocation. Moreover, the Logic Overhead Security Protocol (LOSP) is implemented to ensure secured resource management in the cloud system, where Burrows-Abadi-Needham (BAN) logic is used to predict the agreement logic. During the results analysis, the performance of the proposed DLTN-LOSP model is validated and compared using different metrics such as makespan, processing time, and utilization rate. For system validation and testing, 100 to 500 resources are used in this study, and the results achieved a make-up of 2.3% and a utilization rate of 13 percent. Moreover, the obtained results confirm the superiority of the proposed framework, with better performance outcomes.},
}
@article {pmid37896525,
year = {2023},
author = {Pierleoni, P and Concetti, R and Belli, A and Palma, L and Marzorati, S and Esposito, M},
title = {A Cloud-IoT Architecture for Latency-Aware Localization in Earthquake Early Warning.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {20},
pages = {},
pmid = {37896525},
issn = {1424-8220},
abstract = {An effective earthquake early warning system requires rapid and reliable earthquake source detection. Despite the numerous proposed epicenter localization solutions in recent years, their utilization within the Internet of Things (IoT) framework and integration with IoT-oriented cloud platforms remain underexplored. This paper proposes a complete IoT architecture for earthquake detection, localization, and event notification. The architecture, which has been designed, deployed, and tested on a standard cloud platform, introduces an innovative approach by implementing P-wave "picking" directly on IoT devices, deviating from traditional regional earthquake early warning (EEW) approaches. Pick association, source localization, event declaration, and user notification functionalities are also deployed on the cloud. The cloud integration simplifies the integration of other services in the architecture, such as data storage and device management. Moreover, a localization algorithm based on the hyperbola method is proposed, but here, the time difference of arrival multilateration is applied that is often used in wireless sensor network applications. The results show that the proposed end-to-end architecture is able to provide a quick estimate of the earthquake epicenter location with acceptable errors for an EEW system scenario. Rigorous testing against the standard of reference in Italy for regional EEW showed an overall 3.39 s gain in the system localization speed, thus offering a tangible metric of the efficiency and potential proposed system as an EEW solution.},
}
@article {pmid37895480,
year = {2023},
author = {Lorenzo-Villegas, DL and Gohil, NV and Lamo, P and Gurajala, S and Bagiu, IC and Vulcanescu, DD and Horhat, FG and Sorop, VB and Diaconu, M and Sorop, MI and Oprisoni, A and Horhat, RM and Susan, M and MohanaSundaram, A},
title = {Innovative Biosensing Approaches for Swift Identification of Candida Species, Intrusive Pathogenic Organisms.},
journal = {Life (Basel, Switzerland)},
volume = {13},
number = {10},
pages = {},
pmid = {37895480},
issn = {2075-1729},
abstract = {Candida is the largest genus of medically significant fungi. Although most of its members are commensals, residing harmlessly in human bodies, some are opportunistic and dangerously invasive. These have the ability to cause severe nosocomial candidiasis and candidemia that affect the viscera and bloodstream. A prompt diagnosis will lead to a successful treatment modality. The smart solution of biosensing technologies for rapid and precise detection of Candida species has made remarkable progress. The development of point-of-care (POC) biosensor devices involves sensor precision down to pico-/femtogram level, cost-effectiveness, portability, rapidity, and user-friendliness. However, futuristic diagnostics will depend on exploiting technologies such as multiplexing for high-throughput screening, CRISPR, artificial intelligence (AI), neural networks, the Internet of Things (IoT), and cloud computing of medical databases. This review gives an insight into different biosensor technologies designed for the detection of medically significant Candida species, especially Candida albicans and C. auris, and their applications in the medical setting.},
}
@article {pmid37893978,
year = {2023},
author = {Dineva, K and Atanasova, T},
title = {Health Status Classification for Cows Using Machine Learning and Data Management on AWS Cloud.},
journal = {Animals : an open access journal from MDPI},
volume = {13},
number = {20},
pages = {},
pmid = {37893978},
issn = {2076-2615},
support = {Д01-62/18.03.2021///Ministry of Education and Science of the Republic Bulgaria/ ; },
abstract = {The health and welfare of livestock are significant for ensuring the sustainability and profitability of the agricultural industry. Addressing efficient ways to monitor and report the health status of individual cows is critical to prevent outbreaks and maintain herd productivity. The purpose of the study is to develop a machine learning (ML) model to classify the health status of milk cows into three categories. In this research, data are collected from existing non-invasive IoT devices and tools in a dairy farm, monitoring the micro- and macroenvironment of the cow in combination with particular information on age, days in milk, lactation, and more. A workflow of various data-processing methods is systematized and presented to create a complete, efficient, and reusable roadmap for data processing, modeling, and real-world integration. Following the proposed workflow, the data were treated, and five different ML algorithms were trained and tested to select the most descriptive one to monitor the health status of individual cows. The highest result for health status assessment is obtained by random forest classifier (RFC) with an accuracy of 0.959, recall of 0.954, and precision of 0.97. To increase the security, speed, and reliability of the work process, a cloud architecture of services is presented to integrate the trained model as an additional functionality in the Amazon Web Services (AWS) environment. The classification results of the ML model are visualized in a newly created interface in the client application.},
}
@article {pmid37886380,
year = {2023},
author = {Intelligence And Neuroscience, C},
title = {Retracted: Cloud Computing Load Balancing Mechanism Taking into Account Load Balancing Ant Colony Optimization Algorithm.},
journal = {Computational intelligence and neuroscience},
volume = {2023},
number = {},
pages = {9831926},
pmid = {37886380},
issn = {1687-5273},
abstract = {[This retracts the article DOI: 10.1155/2022/3120883.].},
}
@article {pmid37885760,
year = {2023},
author = {Hachisuca, AMM and de Souza, EG and Oliveira, WKM and Bazzi, CL and Donato, DG and Mendes, IS and Abdala, MC and Mercante, E},
title = {AgDataBox-IoT - application development for agrometeorological stations in smart.},
journal = {MethodsX},
volume = {11},
number = {},
pages = {102419},
pmid = {37885760},
issn = {2215-0161},
abstract = {Currently, Brazil is one of the world's largest grain producers and exporters. Agriculture has already entered its 4.0 version (2017), also known as digital agriculture, when the industry has entered the 4.0 era (2011). This new paradigm uses Internet of Things (IoT) techniques, sensors installed in the field, network of interconnected sensors in the plot, drones for crop monitoring, multispectral cameras, storage and processing of data in Cloud Computing, and Big Data techniques to process the large volumes of generated data. One of the practical options for implementing precision agriculture is the segmentation of the plot into management zones, aiming at maximizing profits according to the productive potential of each zone, being economically viable even for small producers. Considering that climate factors directly influence yield, this study describes the development of a sensor network for climate monitoring of management zones (microclimates), allowing the identification of climate factors that influence yield at each of its stages.•Application of the internet of things to assist in decision making in the agricultural production system.•AgDataBox (ADB-IoT) web platform has an Application Programming Interface (API).•An agrometeorological station capable of monitoring all meteorological parameters was developed (Kate 3.0).},
}
@article {pmid37879464,
year = {2024},
author = {Dube, T and Dube, T and Dalu, T and Gxokwe, S and Marambanyika, T},
title = {Assessment of land use and land cover, water nutrient and metal concentration related to illegal mining activities in an Austral semi-arid river system: A remote sensing and multivariate analysis approach.},
journal = {The Science of the total environment},
volume = {907},
number = {},
pages = {167919},
doi = {10.1016/j.scitotenv.2023.167919},
pmid = {37879464},
issn = {1879-1026},
abstract = {The mining sector in various countries, particularly in the sub-Saharan African region, faces significant impact from the emergence of small-scale unlicensed artisanal mines. This trend is influenced by the rising demand and prices for minerals, along with prevalent poverty levels. Thus, the detrimental impacts of these artisanal mines on the natural environment (i.e., rivers) have remained poorly understood particularly in the Zimbabwean context. To understand the consequences of this situation, a study was conducted in the Umzingwane Catchment, located in southern Zimbabwe, focusing on the variations in water nutrient and metal concentrations in rivers affected by illegal mining activities along their riparian zones. Using multi-year Sentinel-2 composite data and the random forest machine learning algorithm on the Google Earth Engine cloud-computing platform, we mapped the spatial distribution of illegal mines in the affected regions and seven distinct land use classes, including artisanal mines, bare surfaces, settlements, official mines, croplands, and natural vegetation, with an acceptable overall and class accuracies of ±70 % were identified. Artisanal mines were found to be located along rivers and this was attributed to their large water requirements needed during the mining process. The water quality analysis revealed elevated nutrient concentrations, such as ammonium and nitrate (range 0.10-20.0 mg L[-1]), which could be attributed to mine drainage from the use of ammonium nitrate explosives during mining activities. Additionally, the prevalence of croplands in the area may have potentially contributed to increased nutrient concentrations. The principal component analysis and hierarchical cluster analysis revealed three clusters, with one of these clusters showing parameters like Ca, Mg, K, Hg and Na, which are usually associated with mineral gypsum found in the drainage of artisanal mines in the selected rivers. Cluster 2 consisted of B, Cu, Fe, Pb, and Mn, which are likely from the natural environment and finally, cluster 3 contained As, Cd, Cr, and Zn, which were likely associated with both legal and illegal mining operations. These findings provide essential insights into the health of the studied river system and the impacts of human activities in the region. They further serve as a foundation for developing and implementing regulatory measures aimed at protecting riverine systems, in line with sustainable development goal 15.1 which focuses on preserving and conserving terrestrial and inland freshwater ecosystems, including rivers. By acting on this information, authorities can work towards safeguarding these vital natural resources and promoting sustainable development in the area.},
}
@article {pmid37869808,
year = {2023},
author = {Gal-Nadasan, N and Stoicu-Tivadar, V and Gal-Nadasan, E and Dinu, AR},
title = {Robotic Process Automation Based Data Extraction from Handwritten Medical Forms.},
journal = {Studies in health technology and informatics},
volume = {309},
number = {},
pages = {68-72},
doi = {10.3233/SHTI230741},
pmid = {37869808},
issn = {1879-8365},
mesh = {*Robotics ; *Robotic Surgical Procedures ; Software ; Automation ; Machine Learning ; },
abstract = {This paper proposes to create an RPA(robotic process automation) based software robot that can digitalize and extract data from handwritten medical forms. The RPA robot uses a taxonomy that is specific for the medical form and associates the extracted data with the taxonomy. This is accomplished using UiPath studio to create the robot, Google Cloud Vision OCR(optical character recognition) to create the DOM (digital object model) file and UiPath machine learning (ML) API to extract the data from the medical form. Due to the fact that the medical form is in a non-standard format a data extraction template had to be applied. After the extraction process the data can be saved into databases or into a spreadsheets.},
}
@article {pmid37867911,
year = {2023},
author = {Eneh, AH and Udanor, CN and Ossai, NI and Aneke, SO and Ugwoke, PO and Obayi, AA and Ugwuishiwu, CH and Okereke, GE},
title = {Towards an improved internet of things sensors data quality for a smart aquaponics system yield prediction.},
journal = {MethodsX},
volume = {11},
number = {},
pages = {102436},
pmid = {37867911},
issn = {2215-0161},
abstract = {The mobile aquaponics system is a sustainable integrated aquaculture-crop production system in which wastewater from fish ponds are utilized in crop production, filtered, and returned for aquaculture uses. This process ensures the optimization of water and nutrients as well as the simultaneous production of fish and crops in portable homestead models. The Lack of datasets and documentations on monitoring growth parameters in Sub-Saharan Africa hamper the effective management and prediction of yields. Water quality impacts the fish growth rate, feed consumption, and general well-being irrespective of the system. This research presents an improvement on the IoT water quality sensor system earlier developed in a previous study in carried out in conjunction with two local catfish farmers. The improved system produced datasets that when trained using several machine learning algorithms achieved a test RMSE score of 0.6140 against 1.0128 from the old system for fish length prediction using Decision Tree Regressor. Further testing with the XGBoost Regressor achieved a test RMSE score of 7.0192 for fish weight prediction from the initial IoT dataset and 0.7793 from the improved IoT dataset. Both systems achieved a prediction accuracy of 99%. These evaluations clearly show that the improved system outperformed the initial one.•The discovery and use of improved IoT pond water quality sensors.•Development of machine learning models to evaluate the methods.•Testing of the datasets from the two methods using the machine learning models.},
}
@article {pmid37864543,
year = {2023},
author = {Patel, M and Dayan, I and Fishman, EK and Flores, M and Gilbert, FJ and Guindy, M and Koay, EJ and Rosenthal, M and Roth, HR and Linguraru, MG},
title = {Accelerating artificial intelligence: How federated learning can protect privacy, facilitate collaboration, and improve outcomes.},
journal = {Health informatics journal},
volume = {29},
number = {4},
pages = {14604582231207744},
doi = {10.1177/14604582231207744},
pmid = {37864543},
issn = {1741-2811},
mesh = {Humans ; *Artificial Intelligence ; Privacy ; Learning ; *Pancreatic Neoplasms ; },
abstract = {Cross-institution collaborations are constrained by data-sharing challenges. These challenges hamper innovation, particularly in artificial intelligence, where models require diverse data to ensure strong performance. Federated learning (FL) solves data-sharing challenges. In typical collaborations, data is sent to a central repository where models are trained. With FL, models are sent to participating sites, trained locally, and model weights aggregated to create a master model with improved performance. At the 2021 Radiology Society of North America's (RSNA) conference, a panel was conducted titled "Accelerating AI: How Federated Learning Can Protect Privacy, Facilitate Collaboration and Improve Outcomes." Two groups shared insights: researchers from the EXAM study (EMC CXR AI Model) and members of the National Cancer Institute's Early Detection Research Network's (EDRN) pancreatic cancer working group. EXAM brought together 20 institutions to create a model to predict oxygen requirements of patients seen in the emergency department with COVID-19 symptoms. The EDRN collaboration is focused on improving outcomes for pancreatic cancer patients through earlier detection. This paper describes major insights from the panel, including direct quotes. The panelists described the impetus for FL, the long-term potential vision of FL, challenges faced in FL, and the immediate path forward for FL.},
}
@article {pmid37863925,
year = {2023},
author = {Naboureh, A and Li, A and Bian, J and Lei, G and Nan, X},
title = {Land cover dataset of the China Central-Asia West-Asia Economic Corridor from 1993 to 2018.},
journal = {Scientific data},
volume = {10},
number = {1},
pages = {728},
pmid = {37863925},
issn = {2052-4463},
support = {42090015 and 41801370//National Natural Science Foundation of China (National Science Foundation of China)/ ; 42090015 and 41801370//National Natural Science Foundation of China (National Science Foundation of China)/ ; 2019365//Youth Innovation Promotion Association of the Chinese Academy of Sciences (Youth Innovation Promotion Association CAS)/ ; },
abstract = {Land Cover (LC) maps offer vital knowledge for various studies, ranging from sustainable development to climate change. The China Central-Asia West-Asia Economic Corridor region, as a core component of the Belt and Road initiative program, has been experiencing some of the most severe LC change tragedies, such as the Aral Sea crisis and Lake Urmia shrinkage, in recent decades. Therefore, there is a high demand for producing a fine-resolution, spatially-explicit, and long-term LC dataset for this region. However, except China, such dataset for the rest of the region (Kyrgyzstan, Turkmenistan, Kazakhstan, Uzbekistan, Tajikistan, Turkey, and Iran) is currently lacking. Here, we constructed a historical set of six 30-m resolution LC maps between 1993 and 2018 at 5-year time intervals for the seven countries where nearly 200,000 Landsat scenes were classified into nine LC types within Google Earth Engine cloud computing platform. The generated LC maps displayed high accuracies. This publicly available dataset has the potential to be broadly applied in environmental policy and management.},
}
@article {pmid37860633,
year = {2023},
author = {Muratore, L and Tsagarakis, N},
title = {XBot2D: towards a robotics hybrid cloud architecture for field robotics.},
journal = {Frontiers in robotics and AI},
volume = {10},
number = {},
pages = {1168694},
pmid = {37860633},
issn = {2296-9144},
abstract = {Nowadays, robotics applications requiring the execution of complex tasks in real-world scenarios are still facing many challenges related to highly unstructured and dynamic environments in domains such as emergency response and search and rescue where robots have to operate for prolonged periods trading off computational performance with increased power autonomy and vice versa. In particular, there is a crucial need for robots capable of adapting to such settings while at the same time providing robustness and extended power autonomy. A possible approach to overcome the conflicting demand of a computational performing system with the need for long power autonomy is represented by cloud robotics, which can boost the computational capabilities of the robot while reducing the energy consumption by exploiting the offload of resources to the cloud. Nevertheless, the communication constraint due to limited bandwidth, latency, and connectivity, typical of field robotics, makes cloud-enabled robotics solutions challenging to deploy in real-world applications. In this context, we designed and realized the XBot2D software architecture, which provides a hybrid cloud manager capable of dynamically and seamlessly allocating robotics skills to perform a distributed computation based on the current network condition and the required latency, and computational/energy resources of the robot in use. The proposed framework leverage on the two dimensions, i.e., 2D (local and cloud), in a transparent way for the user, providing support for Real-Time (RT) skills execution on the local robot, as well as machine learning and A.I. resources on the cloud with the possibility to automatically relocate the above based on the required performances and communication quality. XBot2D implementation and its functionalities are presented and validated in realistic tasks involving the CENTAURO robot and the Amazon Web Service Elastic Computing Cloud (AWS EC2) infrastructure with different network conditions.},
}
@article {pmid37860604,
year = {2023},
author = {Post, AR and Ho, N and Rasmussen, E and Post, I and Cho, A and Hofer, J and Maness, AT and Parnell, T and Nix, DA},
title = {Hypermedia-based software architecture enables Test-Driven Development.},
journal = {JAMIA open},
volume = {6},
number = {4},
pages = {ooad089},
pmid = {37860604},
issn = {2574-2531},
support = {P30 CA042014/CA/NCI NIH HHS/United States ; },
abstract = {OBJECTIVES: Using agile software development practices, develop and evaluate an architecture and implementation for reliable and user-friendly self-service management of bioinformatic data stored in the cloud.
MATERIALS AND METHODS: Comprehensive Oncology Research Environment (CORE) Browser is a new open-source web application for cancer researchers to manage sequencing data organized in a flexible format in Amazon Simple Storage Service (S3) buckets. It has a microservices- and hypermedia-based architecture, which we integrated with Test-Driven Development (TDD), the iterative writing of computable specifications for how software should work prior to development. Relying on repeating patterns found in hypermedia-based architectures, we hypothesized that hypermedia would permit developing test "templates" that can be parameterized and executed for each microservice, maximizing code coverage while minimizing effort.
RESULTS: After one-and-a-half years of development, the CORE Browser backend had 121 test templates and 875 custom tests that were parameterized and executed 3031 times, providing 78% code coverage.
DISCUSSION: Architecting to permit test reuse through a hypermedia approach was a key success factor for our testing efforts. CORE Browser's application of hypermedia and TDD illustrates one way to integrate software engineering methods into data-intensive networked applications. Separating bioinformatic data management from analysis distinguishes this platform from others in bioinformatics and may provide stable data management while permitting analysis methods to advance more rapidly.
CONCLUSION: Software engineering practices are underutilized in informatics. Similar informatics projects will more likely succeed through application of good architecture and automated testing. Our approach is broadly applicable to data management tools involving cloud data storage.},
}
@article {pmid37860463,
year = {2023},
author = {Healthcare Engineering, JO},
title = {Retracted: Application of Cloud Computing in the Prediction of Exercise Improvement of Cardiovascular and Digestive Systems in Obese Patients.},
journal = {Journal of healthcare engineering},
volume = {2023},
number = {},
pages = {9872648},
pmid = {37860463},
issn = {2040-2309},
abstract = {[This retracts the article DOI: 10.1155/2021/4695722.].},
}
@article {pmid37860366,
year = {2023},
author = {Healthcare Engineering, JO},
title = {Retracted: Medical Cloud Computing Data Processing to Optimize the Effect of Drugs.},
journal = {Journal of healthcare engineering},
volume = {2023},
number = {},
pages = {9869843},
pmid = {37860366},
issn = {2040-2309},
abstract = {[This retracts the article DOI: 10.1155/2021/5560691.].},
}
@article {pmid37860340,
year = {2023},
author = {Healthcare Engineering, JO},
title = {Retracted: Cloud Computing into Respiratory Rehabilitation Training-Assisted Treatment of Patients with Pneumonia.},
journal = {Journal of healthcare engineering},
volume = {2023},
number = {},
pages = {9795658},
pmid = {37860340},
issn = {2040-2309},
abstract = {[This retracts the article DOI: 10.1155/2021/5884174.].},
}
@article {pmid37859937,
year = {2023},
author = {Hornik, J and Rachamim, M and Graguer, S},
title = {Fog computing: a platform for big-data marketing analytics.},
journal = {Frontiers in artificial intelligence},
volume = {6},
number = {},
pages = {1242574},
pmid = {37859937},
issn = {2624-8212},
abstract = {Marketing science embraces a wider variety of data types and measurement tools necessary for strategy, research, and applied decision making. Managing the marketing data generated by internet of things (IoT) sensors and actuators is one of the biggest challenges faced by marketing managers when deploying an IoT system. This short note shows how traditional cloud-based IoT systems are challenged by the large scale, heterogeneity, and high latency witnessed in some cloud ecosystems. It introduces researchers to one recent breakthrough, fog computing, an emerging concept that decentralizes applications, strategies, and data analytics into the network itself using a distributed and federated computing model. It transforms centralized cloud to distributed fog by bringing storage and computation closer to the user end. Fog computing is considered a novel marketplace phenomenon which can support AI and management strategies, especially for the design of "smart marketing".},
}
@article {pmid37856442,
year = {2023},
author = {Uhlrich, SD and Falisse, A and Kidziński, Ł and Muccini, J and Ko, M and Chaudhari, AS and Hicks, JL and Delp, SL},
title = {OpenCap: Human movement dynamics from smartphone videos.},
journal = {PLoS computational biology},
volume = {19},
number = {10},
pages = {e1011462},
pmid = {37856442},
issn = {1553-7358},
support = {P41 EB027060/EB/NIBIB NIH HHS/United States ; R01 AR077604/AR/NIAMS NIH HHS/United States ; },
mesh = {Humans ; *Smartphone ; *Models, Biological ; Muscles/physiology ; Software ; Biomechanical Phenomena ; Movement/physiology ; },
abstract = {Measures of human movement dynamics can predict outcomes like injury risk or musculoskeletal disease progression. However, these measures are rarely quantified in large-scale research studies or clinical practice due to the prohibitive cost, time, and expertise required. Here we present and validate OpenCap, an open-source platform for computing both the kinematics (i.e., motion) and dynamics (i.e., forces) of human movement using videos captured from two or more smartphones. OpenCap leverages pose estimation algorithms to identify body landmarks from videos; deep learning and biomechanical models to estimate three-dimensional kinematics; and physics-based simulations to estimate muscle activations and musculoskeletal dynamics. OpenCap's web application enables users to collect synchronous videos and visualize movement data that is automatically processed in the cloud, thereby eliminating the need for specialized hardware, software, and expertise. We show that OpenCap accurately predicts dynamic measures, like muscle activations, joint loads, and joint moments, which can be used to screen for disease risk, evaluate intervention efficacy, assess between-group movement differences, and inform rehabilitation decisions. Additionally, we demonstrate OpenCap's practical utility through a 100-subject field study, where a clinician using OpenCap estimated musculoskeletal dynamics 25 times faster than a laboratory-based approach at less than 1% of the cost. By democratizing access to human movement analysis, OpenCap can accelerate the incorporation of biomechanical metrics into large-scale research studies, clinical trials, and clinical practice.},
}
@article {pmid37854642,
year = {2023},
author = {Zhang, M},
title = {Optimization Strategy of College Students' Education Management Based on Smart Cloud Platform Teaching.},
journal = {Computational intelligence and neuroscience},
volume = {2023},
number = {},
pages = {5642142},
pmid = {37854642},
issn = {1687-5273},
mesh = {Humans ; *Artificial Intelligence ; *Cloud Computing ; Students ; Big Data ; Commerce ; },
abstract = {With the passage of time and social changes, the form of education is also changing step by step. In just a few decades, information technology has developed by leaps and bounds, and digital education has not yet been widely promoted. Intelligent education cloud platforms based on big data, Internet of things, cloud computing, and artificial intelligence have begun to emerge. The research on the "smart campus" cloud platform is conducive to improving the utilization rate of existing hardware equipment in colleges and universities and is conducive in improving the level of teaching software deployment. At the same time, this research also provides a new idea for the research in the field of cloud security. While cloud computing brings convenience to teaching work, it also brings new problems to system security. At present, virtualization technology is still in the ascendant stage in the construction of "smart campus" in colleges and universities and is gradually applied to cloud computing service products. At present, there are many cases about the construction of teaching resource platform, but most of them are modified from the early resource management system, which has strong coupling of single system, insufficient functions of collecting, processing, searching, sharing, and reusing resources, and weak application support ability for related business systems. Under this social background, this paper studies the teaching process management system for intelligent classroom.},
}
@article {pmid37853124,
year = {2023},
author = {Wang, Y and Hollingsworth, PM and Zhai, D and West, CD and Green, JMH and Chen, H and Hurni, K and Su, Y and Warren-Thomas, E and Xu, J and Ahrends, A},
title = {High-resolution maps show that rubber causes substantial deforestation.},
journal = {Nature},
volume = {623},
number = {7986},
pages = {340-346},
pmid = {37853124},
issn = {1476-4687},
mesh = {Asia, Southeastern ; Biodiversity ; Cloud Computing ; *Conservation of Natural Resources/statistics & numerical data/trends ; *Forests ; *Geographic Mapping ; *Rubber ; *Satellite Imagery ; },
abstract = {Understanding the effects of cash crop expansion on natural forest is of fundamental importance. However, for most crops there are no remotely sensed global maps[1], and global deforestation impacts are estimated using models and extrapolations. Natural rubber is an example of a principal commodity for which deforestation impacts have been highly uncertain, with estimates differing more than fivefold[1-4]. Here we harnessed Earth observation satellite data and cloud computing[5] to produce high-resolution maps of rubber (10 m pixel size) and associated deforestation (30 m pixel size) for Southeast Asia. Our maps indicate that rubber-related forest loss has been substantially underestimated in policy, by the public and in recent reports[6-8]. Our direct remotely sensed observations show that deforestation for rubber is at least twofold to threefold higher than suggested by figures now widely used for setting policy[4]. With more than 4 million hectares of forest loss for rubber since 1993 (at least 2 million hectares since 2000) and more than 1 million hectares of rubber plantations established in Key Biodiversity Areas, the effects of rubber on biodiversity and ecosystem services in Southeast Asia could be extensive. Thus, rubber deserves more attention in domestic policy, within trade agreements and in incoming due-diligence legislation.},
}
@article {pmid37850120,
year = {2023},
author = {Teng, Z and Chen, J and Wang, J and Wu, S and Chen, R and Lin, Y and Shen, L and Jackson, R and Zhou, J and Yang, C},
title = {Panicle-Cloud: An Open and AI-Powered Cloud Computing Platform for Quantifying Rice Panicles from Drone-Collected Imagery to Enable the Classification of Yield Production in Rice.},
journal = {Plant phenomics (Washington, D.C.)},
volume = {5},
number = {},
pages = {0105},
pmid = {37850120},
issn = {2643-6515},
abstract = {Rice (Oryza sativa) is an essential stable food for many rice consumption nations in the world and, thus, the importance to improve its yield production under global climate changes. To evaluate different rice varieties' yield performance, key yield-related traits such as panicle number per unit area (PNpM[2]) are key indicators, which have attracted much attention by many plant research groups. Nevertheless, it is still challenging to conduct large-scale screening of rice panicles to quantify the PNpM[2] trait due to complex field conditions, a large variation of rice cultivars, and their panicle morphological features. Here, we present Panicle-Cloud, an open and artificial intelligence (AI)-powered cloud computing platform that is capable of quantifying rice panicles from drone-collected imagery. To facilitate the development of AI-powered detection models, we first established an open diverse rice panicle detection dataset that was annotated by a group of rice specialists; then, we integrated several state-of-the-art deep learning models (including a preferred model called Panicle-AI) into the Panicle-Cloud platform, so that nonexpert users could select a pretrained model to detect rice panicles from their own aerial images. We trialed the AI models with images collected at different attitudes and growth stages, through which the right timing and preferred image resolutions for phenotyping rice panicles in the field were identified. Then, we applied the platform in a 2-season rice breeding trial to valid its biological relevance and classified yield production using the platform-derived PNpM[2] trait from hundreds of rice varieties. Through correlation analysis between computational analysis and manual scoring, we found that the platform could quantify the PNpM[2] trait reliably, based on which yield production was classified with high accuracy. Hence, we trust that our work demonstrates a valuable advance in phenotyping the PNpM[2] trait in rice, which provides a useful toolkit to enable rice breeders to screen and select desired rice varieties under field conditions.},
}
@article {pmid37848896,
year = {2023},
author = {Kline, JA and Reed, B and Frost, A and Alanis, N and Barshay, M and Melzer, A and Galbraith, JW and Budd, A and Winn, A and Pun, E and Camargo, CA},
title = {Database derived from an electronic medical record-based surveillance network of US emergency department patients with acute respiratory illness.},
journal = {BMC medical informatics and decision making},
volume = {23},
number = {1},
pages = {224},
pmid = {37848896},
issn = {1472-6947},
mesh = {Humans ; *Electronic Health Records ; Emergency Service, Hospital ; *Respiratory Tract Infections/diagnosis/epidemiology ; Laboratories ; Public Health ; },
abstract = {BACKGROUND: For surveillance of episodic illness, the emergency department (ED) represents one of the largest interfaces for generalizable data about segments of the US public experiencing a need for unscheduled care. This protocol manuscript describes the development and operation of a national network linking symptom, clinical, laboratory and disposition data that provides a public database dedicated to the surveillance of acute respiratory infections (ARIs) in EDs.
METHODS: The Respiratory Virus Laboratory Emergency Department Network Surveillance (RESP-LENS) network includes 26 academic investigators, from 24 sites, with 91 hospitals, and the Centers for Disease Control and Prevention (CDC) to survey viral infections. All data originate from electronic medical records (EMRs) accessed by structured query language (SQL) coding. Each Tuesday, data are imported into the standard data form for ARI visits that occurred the prior week (termed the index file); outcomes at 30 days and ED volume are also recorded. Up to 325 data fields can be populated for each case. Data are transferred from sites into an encrypted Google Cloud Platform, then programmatically checked for compliance, parsed, and aggregated into a central database housed on a second cloud platform prior to transfer to CDC.
RESULTS: As of August, 2023, the network has reported data on over 870,000 ARI cases selected from approximately 5.2 million ED encounters. Post-contracting challenges to network execution have included local shifts in testing policies and platforms, delays in ICD-10 coding to detect ARI cases, and site-level personnel turnover. The network is addressing these challenges and is poised to begin streaming weekly data for dissemination.
CONCLUSIONS: The RESP-LENS network provides a weekly updated database that is a public health resource to survey the epidemiology, viral causes, and outcomes of ED patients with acute respiratory infections.},
}
@article {pmid37848573,
year = {2023},
author = {Atchyuth, BAS and Swain, R and Das, P},
title = {Near real-time flood inundation and hazard mapping of Baitarani River Basin using Google Earth Engine and SAR imagery.},
journal = {Environmental monitoring and assessment},
volume = {195},
number = {11},
pages = {1331},
pmid = {37848573},
issn = {1573-2959},
mesh = {*Floods ; *Rivers ; Search Engine ; Environmental Monitoring/methods ; Water ; },
abstract = {Flood inundation mapping and satellite imagery monitoring are critical and effective responses during flood events. Mapping of a flood using optical data is limited due to the unavailability of cloud-free images. Because of its capacity to penetrate clouds and operate in all kinds of weather, synthetic aperture radar is preferred for water inundation mapping. Flood mapping in Eastern India's Baitarani River Basin for 2018, 2019, 2020, 2021, and 2022 was performed in this study using Sentinel-1 imagery and Google Earth Engine with Otsu's algorithm. Different machine-learning algorithms were used to map the LULC of the study region. Dual polarizations VH and VV and their combinations VV×VH, VV+VH, VH-VV, VV-VH, VV/VH, and VH/VV were examined to identify non-water and water bodies. The normalized difference water index (NDWI) map derived from Sentinel-2 data validated the surface water inundation with 80% accuracy. The total inundated areas were identified as 440.3 km[2] in 2018, 268.58 km[2] in 2019, 178.40 km[2] in 2020, 203.79 km[2] in 2021, and 321.33 km[2] in 2022, respectively. The overlap of flood maps on the LULC map indicated that flooding highly affected agriculture and urban areas in these years. The approach using the near-real-time Sentinel-1 SAR imagery and GEE platform can be operationalized for periodic flood mapping, helps develop flood control measures, and helps enhance flood management. The generated annual flood inundation maps are also useful for policy development, agriculture yield estimation, crop insurance framing, etc.},
}
@article {pmid37841693,
year = {2023},
author = {Familiar, AM and Mahtabfar, A and Fathi Kazerooni, A and Kiani, M and Vossough, A and Viaene, A and Storm, PB and Resnick, AC and Nabavizadeh, A},
title = {Radio-pathomic approaches in pediatric neuro-oncology: Opportunities and challenges.},
journal = {Neuro-oncology advances},
volume = {5},
number = {1},
pages = {vdad119},
pmid = {37841693},
issn = {2632-2498},
support = {75N91019D00024/CA/NCI NIH HHS/United States ; },
abstract = {With medical software platforms moving to cloud environments with scalable storage and computing, the translation of predictive artificial intelligence (AI) models to aid in clinical decision-making and facilitate personalized medicine for cancer patients is becoming a reality. Medical imaging, namely radiologic and histologic images, has immense analytical potential in neuro-oncology, and models utilizing integrated radiomic and pathomic data may yield a synergistic effect and provide a new modality for precision medicine. At the same time, the ability to harness multi-modal data is met with challenges in aggregating data across medical departments and institutions, as well as significant complexity in modeling the phenotypic and genotypic heterogeneity of pediatric brain tumors. In this paper, we review recent pathomic and integrated pathomic, radiomic, and genomic studies with clinical applications. We discuss current challenges limiting translational research on pediatric brain tumors and outline technical and analytical solutions. Overall, we propose that to empower the potential residing in radio-pathomics, systemic changes in cross-discipline data management and end-to-end software platforms to handle multi-modal data sets are needed, in addition to embracing modern AI-powered approaches. These changes can improve the performance of predictive models, and ultimately the ability to advance brain cancer treatments and patient outcomes through the development of such models.},
}
@article {pmid37840574,
year = {2023},
author = {Jang, H and Park, S and Koh, H},
title = {Comprehensive microbiome causal mediation analysis using MiMed on user-friendly web interfaces.},
journal = {Biology methods & protocols},
volume = {8},
number = {1},
pages = {bpad023},
pmid = {37840574},
issn = {2396-8923},
abstract = {It is a central goal of human microbiome studies to see the roles of the microbiome as a mediator that transmits environmental, behavioral, or medical exposures to health or disease outcomes. Yet, mediation analysis is not used as much as it should be. One reason is because of the lack of carefully planned routines, compilers, and automated computing systems for microbiome mediation analysis (MiMed) to perform a series of data processing, diversity calculation, data normalization, downstream data analysis, and visualizations. Many researchers in various disciplines (e.g. clinicians, public health practitioners, and biologists) are not also familiar with related statistical methods and programming languages on command-line interfaces. Thus, in this article, we introduce a web cloud computing platform, named as MiMed, that enables comprehensive MiMed on user-friendly web interfaces. The main features of MiMed are as follows. First, MiMed can survey the microbiome in various spheres (i) as a whole microbial ecosystem using different ecological measures (e.g. alpha- and beta-diversity indices) or (ii) as individual microbial taxa (e.g. phyla, classes, orders, families, genera, and species) using different data normalization methods. Second, MiMed enables covariate-adjusted analysis to control for potential confounding factors (e.g. age and gender), which is essential to enhance the causality of the results, especially for observational studies. Third, MiMed enables a breadth of statistical inferences in both mediation effect estimation and significance testing. Fourth, MiMed provides flexible and easy-to-use data processing and analytic modules and creates nice graphical representations. Finally, MiMed employs ChatGPT to search for what has been known about the microbial taxa that are found significantly as mediators using artificial intelligence technologies. For demonstration purposes, we applied MiMed to the study on the mediating roles of oral microbiome in subgingival niches between e-cigarette smoking and gingival inflammation. MiMed is freely available on our web server (http://mimed.micloud.kr).},
}
@article {pmid37838111,
year = {2024},
author = {Li, W and Li, SM and Kang, MC and Xiong, X and Wang, P and Tao, LQ},
title = {Multi-characteristic tannic acid-reinforced polyacrylamide/sodium carboxymethyl cellulose ionic hydrogel strain sensor for human-machine interaction.},
journal = {International journal of biological macromolecules},
volume = {254},
number = {Pt 2},
pages = {127434},
doi = {10.1016/j.ijbiomac.2023.127434},
pmid = {37838111},
issn = {1879-0003},
mesh = {Humans ; *Carboxymethylcellulose Sodium ; Ions ; *Hydrogels ; Electric Conductivity ; },
abstract = {Big data and cloud computing are propelling research in human-computer interface within academia. However, the potential of wearable human-machine interaction (HMI) devices utilizing multiperformance ionic hydrogels remains largely unexplored. Here, we present a motion recognition-based HMI system that enhances movement training. We engineered dual-network PAM/CMC/TA (PCT) hydrogels by reinforcing polyacrylamide (PAM) and sodium carboxymethyl cellulose (CMC) polymers with tannic acid (TA). These hydrogels possess exceptional transparency, adhesion, and remodelling features. By combining an elastic PAM backbone with tunable amounts of CMC and TA, the PCT hydrogels achieve optimal electromechanical performance. As strain sensors, they demonstrate higher sensitivity (GF = 4.03), low detection limit (0.5 %), and good linearity (0.997). Furthermore, we developed a highly accurate (97.85 %) motion recognition system using machine learning and hydrogel-based wearable sensors. This system enables contactless real-time training monitoring and wireless control of trolley operations. Our research underscores the effectiveness of PCT hydrogels for real-time HMI, thus advancing next-generation HMI systems.},
}
@article {pmid37837127,
year = {2023},
author = {Al-Bazzaz, H and Azam, M and Amayri, M and Bouguila, N},
title = {Unsupervised Mixture Models on the Edge for Smart Energy Consumption Segmentation with Feature Saliency.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {19},
pages = {},
pmid = {37837127},
issn = {1424-8220},
abstract = {Smart meter datasets have recently transitioned from monthly intervals to one-second granularity, yielding invaluable insights for diverse metering functions. Clustering analysis, a fundamental data mining technique, is extensively applied to discern unique energy consumption patterns. However, the advent of high-resolution smart meter data brings forth formidable challenges, including non-Gaussian data distributions, unknown cluster counts, and varying feature importance within high-dimensional spaces. This article introduces an innovative learning framework integrating the expectation-maximization algorithm with the minimum message length criterion. This unified approach enables concurrent feature and model selection, finely tuned for the proposed bounded asymmetric generalized Gaussian mixture model with feature saliency. Our experiments aim to replicate an efficient smart meter data analysis scenario by incorporating three distinct feature extraction methods. We rigorously validate the clustering efficacy of our proposed algorithm against several state-of-the-art approaches, employing diverse performance metrics across synthetic and real smart meter datasets. The clusters that we identify effectively highlight variations in residential energy consumption, furnishing utility companies with actionable insights for targeted demand reduction efforts. Moreover, we demonstrate our method's robustness and real-world applicability by harnessing Concordia's High-Performance Computing infrastructure. This facilitates efficient energy pattern characterization, particularly within smart meter environments involving edge cloud computing. Finally, we emphasize that our proposed mixture model outperforms three other models in this paper's comparative study. We achieve superior performance compared to the non-bounded variant of the proposed mixture model by an average percentage improvement of 7.828%.},
}
@article {pmid37832430,
year = {2023},
author = {Schacherer, DP and Herrmann, MD and Clunie, DA and Höfener, H and Clifford, W and Longabaugh, WJR and Pieper, S and Kikinis, R and Fedorov, A and Homeyer, A},
title = {The NCI Imaging Data Commons as a platform for reproducible research in computational pathology.},
journal = {Computer methods and programs in biomedicine},
volume = {242},
number = {},
pages = {107839},
pmid = {37832430},
issn = {1872-7565},
support = {HHSN261201500003C/CA/NCI NIH HHS/United States ; HHSN261201500003I/CA/NCI NIH HHS/United States ; },
mesh = {Humans ; *Software ; Reproducibility of Results ; Cloud Computing ; Diagnostic Imaging ; *Lung Neoplasms/diagnostic imaging ; },
abstract = {BACKGROUND AND OBJECTIVES: Reproducibility is a major challenge in developing machine learning (ML)-based solutions in computational pathology (CompPath). The NCI Imaging Data Commons (IDC) provides >120 cancer image collections according to the FAIR principles and is designed to be used with cloud ML services. Here, we explore its potential to facilitate reproducibility in CompPath research.
METHODS: Using the IDC, we implemented two experiments in which a representative ML-based method for classifying lung tumor tissue was trained and/or evaluated on different datasets. To assess reproducibility, the experiments were run multiple times with separate but identically configured instances of common ML services.
RESULTS: The results of different runs of the same experiment were reproducible to a large extent. However, we observed occasional, small variations in AUC values, indicating a practical limit to reproducibility.
CONCLUSIONS: We conclude that the IDC facilitates approaching the reproducibility limit of CompPath research (i) by enabling researchers to reuse exactly the same datasets and (ii) by integrating with cloud ML services so that experiments can be run in identically configured computing environments.},
}
@article {pmid37831665,
year = {2023},
author = {Saif, Y and Yusof, Y and Rus, AZM and Ghaleb, AM and Mejjaouli, S and Al-Alimi, S and Didane, DH and Latif, K and Abdul Kadir, AZ and Alshalabi, H and Sadeq, S},
title = {Implementing circularity measurements in industry 4.0-based manufacturing metrology using MQTT protocol and Open CV: A case study.},
journal = {PloS one},
volume = {18},
number = {10},
pages = {e0292814},
pmid = {37831665},
issn = {1932-6203},
mesh = {*Commerce ; *Industry ; Algorithms ; Cloud Computing ; Communication ; },
abstract = {In the context of Industry 4.0, manufacturing metrology is crucial for inspecting and measuring machines. The Internet of Things (IoT) technology enables seamless communication between advanced industrial devices through local and cloud computing servers. This study investigates the use of the MQTT protocol to enhance the performance of circularity measurement data transmission between cloud servers and round-hole data sources through Open CV. Accurate inspection of circular characteristics, particularly roundness errors, is vital for lubricant distribution, assemblies, and rotational force innovation. Circularity measurement techniques employ algorithms like the minimal zone circle tolerance algorithm. Vision inspection systems, utilizing image processing techniques, can promptly and accurately detect quality concerns by analyzing the model's surface through circular dimension analysis. This involves sending the model's image to a computer, which employs techniques such as Hough Transform, Edge Detection, and Contour Analysis to identify circular features and extract relevant parameters. This method is utilized in the camera industry and component assembly. To assess the performance, a comparative experiment was conducted between the non-contact-based 3SMVI system and the contact-based CMM system widely used in various industries for roundness evaluation. The CMM technique is known for its high precision but is time-consuming. Experimental results indicated a variation of 5 to 9.6 micrometers between the two methods. It is suggested that using a high-resolution camera and appropriate lighting conditions can further enhance result precision.},
}
@article {pmid37829921,
year = {2023},
author = {Intelligence And Neuroscience, C},
title = {Retracted: An Optimized Decision Method for Smart Teaching Effect Based on Cloud Computing and Deep Learning.},
journal = {Computational intelligence and neuroscience},
volume = {2023},
number = {},
pages = {9862737},
pmid = {37829921},
issn = {1687-5273},
abstract = {[This retracts the article DOI: 10.1155/2022/6907172.].},
}
@article {pmid37829877,
year = {2023},
author = {Intelligence And Neuroscience, C},
title = {Retracted: The Construction of Big Data Computational Intelligence System for E-Government in Cloud Computing Environment and Its Development Impact.},
journal = {Computational intelligence and neuroscience},
volume = {2023},
number = {},
pages = {9873976},
pmid = {37829877},
issn = {1687-5273},
abstract = {[This retracts the article DOI: 10.1155/2022/7295060.].},
}
@article {pmid37829372,
year = {2023},
author = {Healthcare Engineering, JO},
title = {Retracted: Construction of a Health Management Model for Early Identification of Ischaemic Stroke in Cloud Computing.},
journal = {Journal of healthcare engineering},
volume = {2023},
number = {},
pages = {9820647},
pmid = {37829372},
issn = {2040-2309},
abstract = {[This retracts the article DOI: 10.1155/2022/1018056.].},
}
@article {pmid37819909,
year = {2023},
author = {Wang, TY and Cui, J and Fan, Y},
title = {A wearable-based sports health monitoring system using CNN and LSTM with self-attentions.},
journal = {PloS one},
volume = {18},
number = {10},
pages = {e0292012},
pmid = {37819909},
issn = {1932-6203},
mesh = {Humans ; Athletes ; *Athletic Performance ; *Cell Phone ; Neural Networks, Computer ; *Wearable Electronic Devices ; },
abstract = {Sports performance and health monitoring are essential for athletes to maintain peak performance and avoid potential injuries. In this paper, we propose a sports health monitoring system that utilizes wearable devices, cloud computing, and deep learning to monitor the health status of sports persons. The system consists of a wearable device that collects various physiological parameters and a cloud server that contains a deep learning model to predict the sportsperson's health status. The proposed model combines a Convolutional Neural Network (CNN), Long Short-Term Memory (LSTM), and self-attention mechanisms. The model is trained on a large dataset of sports persons' physiological data and achieves an accuracy of 93%, specificity of 94%, precision of 95%, and an F1 score of 92%. The sports person can access the cloud server using their mobile phone to receive a report of their health status, which can be used to monitor their performance and make any necessary adjustments to their training or competition schedule.},
}
@article {pmid37819832,
year = {2023},
author = {Ruiz-Zafra, A and Precioso, D and Salvador, B and Lubian-Lopez, SP and Jimenez, J and Benavente-Fernandez, I and Pigueiras, J and Gomez-Ullate, D and Gontard, LC},
title = {NeoCam: An Edge-Cloud Platform for Non-Invasive Real-Time Monitoring in Neonatal Intensive Care Units.},
journal = {IEEE journal of biomedical and health informatics},
volume = {27},
number = {6},
pages = {2614-2624},
doi = {10.1109/JBHI.2023.3240245},
pmid = {37819832},
issn = {2168-2208},
mesh = {Infant, Newborn ; Infant ; Humans ; *Intensive Care Units, Neonatal ; *Cloud Computing ; Infant, Premature ; Software ; Algorithms ; },
abstract = {In this work we introduce NeoCam, an open source hardware-software platform for video-based monitoring of preterms infants in Neonatal Intensive Care Units (NICUs). NeoCam includes an edge computing device that performs video acquisition and processing in real-time. Compared to other proposed solutions, it has the advantage of handling data more efficiently by performing most of the processing on the device, including proper anonymisation for better compliance with privacy regulations. In addition, it allows to perform various video analysis tasks of clinical interest in parallel at speeds of between 20 and 30 frames-per-second. We introduce algorithms to measure without contact the breathing rate, motor activity, body pose and emotional status of the infants. For breathing rate, our system shows good agreement with existing methods provided there is sufficient light and proper imaging conditions. Models for motor activity and stress detection are new to the best of our knowledge. NeoCam has been tested on preterms in the NICU of the University Hospital Puerta del Mar (Cádiz, Spain), and we report the lessons learned from this trial.},
}
@article {pmid37819321,
year = {2023},
author = {Machado, IA and Lacerda, MAS and Martinez-Blanco, MDR and Serrano, A and García-Baonza, R and Ortiz-Rodriguez, JM},
title = {Chameleon: a cloud computing Industry 4.0 neutron spectrum unfolding code.},
journal = {Radiation protection dosimetry},
volume = {199},
number = {15-16},
pages = {1877-1882},
doi = {10.1093/rpd/ncac298},
pmid = {37819321},
issn = {1742-3406},
support = {APQ-01018-21//Fundação de Amparo à Pesquisa do Estado de Minas Gerais/ ; //Conselho Nacional de Desenvolvimento Científico e Tecnológico/ ; //OMADS Co./ ; },
mesh = {*Cloud Computing ; *Algorithms ; Neural Networks, Computer ; Internet ; Neutrons ; },
abstract = {This work presents Chameleon, a cloud computing (CC) Industry 4.0 (I4) neutron spectrum unfolding code. The code was designed under the Python programming language, using Streamlit framework®, and it is executed on the cloud, as I4 CC technology through internet, by using mobile devices with internet connectivity and a web navigator. In its first version, as a proof of concept, the SPUNIT algorithm was implemented. The main functionalities and the preliminary tests performed to validate the code are presented. Chameleon solves the neutron spectrum unfolding problem and it is easy, friendly and intuitive. It can be applied with success in various workplaces. More validation tests are in progress. Future implementations will include improving the graphical user interface, inserting other algorithms, such as GRAVEL, MAXED and neural networks, and implementing an algorithm to estimate uncertainties in the calculated integral quantities.},
}
@article {pmid37816030,
year = {2023},
author = {, },
title = {Retraction: Relationship between employees' career maturity and career planning of edge computing and cloud collaboration from the perspective of organizational behavior.},
journal = {PloS one},
volume = {18},
number = {10},
pages = {e0292209},
pmid = {37816030},
issn = {1932-6203},
}
@article {pmid37809681,
year = {2023},
author = {Chen, C and Yang, X and Jiang, S and Liu, Z},
title = {Mapping and spatiotemporal dynamics of land-use and land-cover change based on the Google Earth Engine cloud platform from Landsat imagery: A case study of Zhoushan Island, China.},
journal = {Heliyon},
volume = {9},
number = {9},
pages = {e19654},
pmid = {37809681},
issn = {2405-8440},
abstract = {Land resources are an essential foundation for socioeconomic development. Island land resources are limited, the type changes are particularly frequent, and the environment is fragile. Therefore, large-scale, long-term, and high-accuracy land-use classification and spatiotemporal characteristic analysis are of great significance for the sustainable development of islands. Based on the advantages of remote sensing indices and principal component analysis in accurate classification, and taking Zhoushan Archipelago, China, as the study area, in this work long-term satellite remote sensing data were used to perform land-use classification and spatiotemporal characteristic analysis. The classification results showed that the land-use types could be exactly classified, with the overall accuracy and Kappa coefficient greater than 94% and 0.93, respectively. The results of the spatiotemporal characteristic analysis showed that the built-up land and forest land areas increased by 90.00 km[2] and 36.83 km[2], respectively, while the area of the cropland/grassland decreased by 69.77 km[2]. The areas of the water bodies, tidal flats, and bare land exhibited slight change trends. The spatial coverage of Zhoushan Island continuously expanded toward the coast, encroaching on nearby sea areas and tidal flats. The cropland/grassland was the most transferred-out area, at up to 108.94 km[2], and built-up land was the most transferred-in areas, at up to 73.31 km[2]. This study provides a data basis and technical support for the scientific management of land resources.},
}
@article {pmid37804778,
year = {2023},
author = {Lakhan, A and Mohammed, MA and Abdulkareem, KH and Hamouda, H and Alyahya, S},
title = {Autism Spectrum Disorder detection framework for children based on federated learning integrated CNN-LSTM.},
journal = {Computers in biology and medicine},
volume = {166},
number = {},
pages = {107539},
doi = {10.1016/j.compbiomed.2023.107539},
pmid = {37804778},
issn = {1879-0534},
abstract = {The incidence of Autism Spectrum Disorder (ASD) among children, attributed to genetics and environmental factors, has been increasing daily. ASD is a non-curable neurodevelopmental disorder that affects children's communication, behavior, social interaction, and learning skills. While machine learning has been employed for ASD detection in children, existing ASD frameworks offer limited services to monitor and improve the health of ASD patients. This paper presents a complex and efficient ASD framework with comprehensive services to enhance the results of existing ASD frameworks. Our proposed approach is the Federated Learning-enabled CNN-LSTM (FCNN-LSTM) scheme, designed for ASD detection in children using multimodal datasets. The ASD framework is built in a distributed computing environment where different ASD laboratories are connected to the central hospital. The FCNN-LSTM scheme enables local laboratories to train and validate different datasets, including Ages and Stages Questionnaires (ASQ), Facial Communication and Symbolic Behavior Scales (CSBS) Dataset, Parents Evaluate Developmental Status (PEDS), Modified Checklist for Autism in Toddlers (M-CHAT), and Screening Tool for Autism in Toddlers and Children (STAT) datasets, on different computing laboratories. To ensure the security of patient data, we have implemented a security mechanism based on advanced standard encryption (AES) within the federated learning environment. This mechanism allows all laboratories to offload and download data securely. We integrate all trained datasets into the aggregated nodes and make the final decision for ASD patients based on the decision process tree. Additionally, we have designed various Internet of Things (IoT) applications to improve the efficiency of ASD patients and achieve more optimal learning results. Simulation results demonstrate that our proposed framework achieves an ASD detection accuracy of approximately 99% compared to all existing ASD frameworks.},
}
@article {pmid37794709,
year = {2024},
author = {Lee, J and Kim, H and Kron, F},
title = {Virtual education strategies in the context of sustainable health care and medical education: A topic modelling analysis of four decades of research.},
journal = {Medical education},
volume = {58},
number = {1},
pages = {47-62},
doi = {10.1111/medu.15202},
pmid = {37794709},
issn = {1365-2923},
support = {NRF-2021R1F1A1056465//Ministry of Science & ICT/ ; },
mesh = {Humans ; Artificial Intelligence ; *Education, Medical ; Delivery of Health Care ; Learning ; *Virtual Reality ; },
abstract = {BACKGROUND: The growing importance of sustainability has led to the current literature being saturated with studies on the necessity of, and suggested topics for, education for sustainable health care (ESH). Even so, ESH implementation has been hindered by educator unpreparedness and resource scarcity. A potential resolution lies in virtual education. However, research on the strategies needed for successfully implementing virtual education in the context of sustainable health care and medical education is sparse; this study aims to fill the gap.
METHODS: Topic modelling, a computational text-mining method for analysing recurring patterns of co-occurring word clusters to reveal key topics prevalent across the texts, was used to examine how sustainability was addressed in research in medicine, medical education, and virtual education. A total of 17 631 studies, retrieved from Web of Science, Scopus and PubMed, were analysed.
RESULTS: Sustainability-related topics within health care, medical education and virtual education provided systematic implications for Sustainable Virtual Medical Education (SVME)-ESH via virtual platforms in a sustainable way. Analyses of keywords, phrases, topics and their associated networks indicate that SVME should address the three pillars of environmental, social and economic sustainability and medical practices to uphold them; employ different technologies and methods including simulations, virtual reality (VR), artificial intelligence (AI), cloud computing, distance learning; and implement strategies for collaborative development, persuasive diffusion and quality assurance.
CONCLUSIONS: This research suggests that sustainable strategies in virtual education for ESH require a systems approach, encompassing components such as learning content and objectives, evaluation, targeted learners, media, methods and strategies. The advancement of SVME necessitates that medical educators and researchers play a central and bridging role, guiding both the fields of sustainable health care and medical education in the development and implementation of SVME. In this way, they can prepare future physicians to address sustainability issues that impact patient care.},
}
@article {pmid37773456,
year = {2023},
author = {Buyukcavus, MH and Aydogan Akgun, F and Solak, S and Ucar, MHB and Fındık, Y and Baykul, T},
title = {Facial recognition by cloud-based APIs following surgically assisted rapid maxillary expansion.},
journal = {Journal of orofacial orthopedics = Fortschritte der Kieferorthopadie : Organ/official journal Deutsche Gesellschaft fur Kieferorthopadie},
volume = {},
number = {},
pages = {},
pmid = {37773456},
issn = {1615-6714},
abstract = {INTRODUCTION: This study aimed to investigate whether the facial soft tissue changes of individuals who had undergone surgically assisted rapid maxillary expansion (SARME) would be detected by three different well-known facial biometric recognition applications.
METHODS: To calculate similarity scores, the pre- and postsurgical photographs of 22 patients who had undergone SARME treatment were examined using three prominent cloud computing-based facial recognition application programming interfaces (APIs): AWS Rekognition (Amazon Web Services, Seattle, WA, USA), Microsoft Azure Cognitive (Microsoft, Redmond, WA, USA), and Face++ (Megvii, Beijing, China). The pre- and post-SARME photographs of the patients (relaxed, smiling, profile, and semiprofile) were used to calculate similarity scores using the APIs. Friedman's two-way analysis of variance and the Wilcoxon signed-rank test were used to compare the similarity scores obtained from the photographs of the different aspects of the face before and after surgery using the different programs. The relationship between measurements on lateral and posteroanterior cephalograms and the similarity scores was evaluated using the Spearman rank correlation.
RESULTS: The similarity scores were found to be lower with the Face++ program. When looking at the photo types, it was observed that the similarity scores were higher in the smiling photos. A statistically significant difference in the similarity scores (P < 0.05) was found between the relaxed and smiling photographs using the different programs. The correlation between the cephalometric and posteroanterior measurements and the similarity scores was not significant (P > 0.05).
CONCLUSION: SARME treatment caused a significant change in the similarity scores calculated with the help of three different facial recognition programs. The highest similarity scores were found in the smiling photographs, whereas the lowest scores were found in the profile photographs.},
}
@article {pmid37766066,
year = {2023},
author = {Mangalampalli, S and Karri, GR and Gupta, A and Chakrabarti, T and Nallamala, SH and Chakrabarti, P and Unhelkar, B and Margala, M},
title = {Fault-Tolerant Trust-Based Task Scheduling Algorithm Using Harris Hawks Optimization in Cloud Computing.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {18},
pages = {},
pmid = {37766066},
issn = {1424-8220},
abstract = {Cloud computing is a distributed computing model which renders services for cloud users around the world. These services need to be rendered to customers with high availability and fault tolerance, but there are still chances of having single-point failures in the cloud paradigm, and one challenge to cloud providers is effectively scheduling tasks to avoid failures and acquire the trust of their cloud services by users. This research proposes a fault-tolerant trust-based task scheduling algorithm in which we carefully schedule tasks within precise virtual machines by calculating priorities for tasks and VMs. Harris hawks optimization was used as a methodology to design our scheduler. We used Cloudsim as a simulating tool for our entire experiment. For the entire simulation, we used synthetic fabricated data with different distributions and real-time supercomputer worklogs. Finally, we evaluated the proposed approach (FTTATS) with state-of-the-art approaches, i.e., ACO, PSO, and GA. From the simulation results, our proposed FTTATS greatly minimizes the makespan for ACO, PSO and GA algorithms by 24.3%, 33.31%, and 29.03%, respectively. The rate of failures for ACO, PSO, and GA were minimized by 65.31%, 65.4%, and 60.44%, respectively. Trust-based SLA parameters improved, i.e., availability improved for ACO, PSO, and GA by 33.38%, 35.71%, and 28.24%, respectively. The success rate improved for ACO, PSO, and GA by 52.69%, 39.41%, and 38.45%, respectively. Turnaround efficiency was minimized for ACO, PSO, and GA by 51.8%, 47.2%, and 33.6%, respectively.},
}
@article {pmid37765972,
year = {2023},
author = {Emish, M and Kelani, Z and Hassani, M and Young, SD},
title = {A Mobile Health Application Using Geolocation for Behavioral Activity Tracking.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {18},
pages = {},
pmid = {37765972},
issn = {1424-8220},
support = {n/a/DA/NIDA NIH HHS/United States ; n/a/AT/NCCIH NIH HHS/United States ; },
mesh = {*Mobile Applications ; Smartphone ; Advertising ; Algorithms ; *Blockchain ; },
abstract = {The increasing popularity of mHealth presents an opportunity for collecting rich datasets using mobile phone applications (apps). Our health-monitoring mobile application uses motion detection to track an individual's physical activity and location. The data collected are used to improve health outcomes, such as reducing the risk of chronic diseases and promoting healthier lifestyles through analyzing physical activity patterns. Using smartphone motion detection sensors and GPS receivers, we implemented an energy-efficient tracking algorithm that captures user locations whenever they are in motion. To ensure security and efficiency in data collection and storage, encryption algorithms are used with serverless and scalable cloud storage design. The database schema is designed around Mobile Advertising ID (MAID) as a unique identifier for each device, allowing for accurate tracking and high data quality. Our application uses Google's Activity Recognition Application Programming Interface (API) on Android OS or geofencing and motion sensors on iOS to track most smartphones available. In addition, our app leverages blockchain and traditional payments to streamline the compensations and has an intuitive user interface to encourage participation in research. The mobile tracking app was tested for 20 days on an iPhone 14 Pro Max, finding that it accurately captured location during movement and promptly resumed tracking after inactivity periods, while consuming a low percentage of battery life while running in the background.},
}
@article {pmid37765912,
year = {2023},
author = {Lilhore, UK and Manoharan, P and Simaiya, S and Alroobaea, R and Alsafyani, M and Baqasah, AM and Dalal, S and Sharma, A and Raahemifar, K},
title = {HIDM: Hybrid Intrusion Detection Model for Industry 4.0 Networks Using an Optimized CNN-LSTM with Transfer Learning.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {18},
pages = {},
pmid = {37765912},
issn = {1424-8220},
abstract = {Industrial automation systems are undergoing a revolutionary change with the use of Internet-connected operating equipment and the adoption of cutting-edge advanced technology such as AI, IoT, cloud computing, and deep learning within business organizations. These innovative and additional solutions are facilitating Industry 4.0. However, the emergence of these technological advances and the quality solutions that they enable will also introduce unique security challenges whose consequence needs to be identified. This research presents a hybrid intrusion detection model (HIDM) that uses OCNN-LSTM and transfer learning (TL) for Industry 4.0. The proposed model utilizes an optimized CNN by using enhanced parameters of the CNN via the grey wolf optimizer (GWO) method, which fine-tunes the CNN parameters and helps to improve the model's prediction accuracy. The transfer learning model helps to train the model, and it transfers the knowledge to the OCNN-LSTM model. The TL method enhances the training process, acquiring the necessary knowledge from the OCNN-LSTM model and utilizing it in each next cycle, which helps to improve detection accuracy. To measure the performance of the proposed model, we conducted a multi-class classification analysis on various online industrial IDS datasets, i.e., ToN-IoT and UNW-NB15. We have conducted two experiments for these two datasets, and various performance-measuring parameters, i.e., precision, F-measure, recall, accuracy, and detection rate, were calculated for the OCNN-LSTM model with and without TL and also for the CNN and LSTM models. For the ToN-IoT dataset, the OCNN-LSTM with TL model achieved a precision of 92.7%; for the UNW-NB15 dataset, the precision was 94.25%, which is higher than OCNN-LSTM without TL.},
}
@article {pmid37765893,
year = {2023},
author = {Li, M and Zhang, J and Lin, J and Chen, Z and Zheng, X},
title = {FireFace: Leveraging Internal Function Features for Configuration of Functions on Serverless Edge Platforms.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {18},
pages = {},
pmid = {37765893},
issn = {1424-8220},
support = {62072108//the National Natural Science Foundation of China/ ; 83021094//the Funds for Scientific Research of Fujian Provincial Department of Finance/ ; },
abstract = {The emerging serverless computing has become a captivating paradigm for deploying cloud applications, alleviating developers' concerns about infrastructure resource management by configuring necessary parameters such as latency and memory constraints. Existing resource configuration solutions for cloud-based serverless applications can be broadly classified into modeling based on historical data or a combination of sparse measurements and interpolation/modeling. In pursuit of service response and conserving network bandwidth, platforms have progressively expanded from the traditional cloud to the edge. Compared to cloud platforms, serverless edge platforms often lead to more running overhead due to their limited resources, resulting in undesirable financial costs for developers when using the existing solutions. Meanwhile, it is extremely challenging to handle the heterogeneity of edge platforms, characterized by distinct pricing owing to their varying resource preferences. To tackle these challenges, we propose an adaptive and efficient approach called FireFace, consisting of prediction and decision modules. The prediction module extracts the internal features of all functions within the serverless application and uses this information to predict the execution time of the functions under specific configuration schemes. Based on the prediction module, the decision module analyzes the environment information and uses the Adaptive Particle Swarm Optimization algorithm and Genetic Algorithm Operator (APSO-GA) algorithm to select the most suitable configuration plan for each function, including CPU, memory, and edge platforms. In this way, it is possible to effectively minimize the financial overhead while fulfilling the Service Level Objectives (SLOs). Extensive experimental results show that our prediction model obtains optimal results under all three metrics, and the prediction error rate for real-world serverless applications is in the range of 4.25∼9.51%. Our approach can find the optimal resource configuration scheme for each application, which saves 7.2∼44.8% on average compared to other classic algorithms. Moreover, FireFace exhibits rapid adaptability, efficiently adjusting resource allocation schemes in response to dynamic environments.},
}
@article {pmid37765859,
year = {2023},
author = {Yang, D and Liu, Z and Wei, S},
title = {Interactive Learning for Network Anomaly Monitoring and Detection with Human Guidance in the Loop.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {18},
pages = {},
pmid = {37765859},
issn = {1424-8220},
abstract = {With the advancement in big data and cloud computing technology, we have witnessed tremendous developments in applying intelligent techniques in network operation and management. However, learning- and data-based solutions for network operation and maintenance cannot effectively adapt to the dynamic security situation or satisfy administrators' expectations alone. Anomaly detection of time-series monitoring indicators has been a major challenge for network administrative personnel. Monitored indicators in network operations are characterized by multiple instances with high dimensions and fluctuating time-series features and rely on system resource deployment and business environment variations. Hence, there is a growing consensus that conducting anomaly detection with machine intelligence under the operation and maintenance personnel's guidance is more effective than solely using learning and modeling. This paper intends to model the anomaly detection task as a Markov Decision Process and adopts the Double Deep Q-Network algorithm to train an anomaly detection agent, in which the multidimensional temporal convolution network is applied as the principal structure of the Q network and the interactive guidance information from the operation and maintenance personnel is introduced into the procedure to facilitate model convergence. Experimental results on the SMD dataset indicate that the proposed modeling and detection method achieves higher precision and recall rates compared to other learning-based methods. Our method achieves model optimization by using human-computer interactions continuously, which guarantees a faster and more consistent model training procedure and convergence.},
}
@article {pmid37765801,
year = {2023},
author = {Canonico, M and Desimoni, F and Ferrero, A and Grassi, PA and Irwin, C and Campani, D and Dal Molin, A and Panella, M and Magistrelli, L},
title = {Gait Monitoring and Analysis: A Mathematical Approach.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {18},
pages = {},
pmid = {37765801},
issn = {1424-8220},
abstract = {Gait abnormalities are common in the elderly and individuals diagnosed with Parkinson's, often leading to reduced mobility and increased fall risk. Monitoring and assessing gait patterns in these populations play a crucial role in understanding disease progression, early detection of motor impairments, and developing personalized rehabilitation strategies. In particular, by identifying gait irregularities at an early stage, healthcare professionals can implement timely interventions and personalized therapeutic approaches, potentially delaying the onset of severe motor symptoms and improving overall patient outcomes. In this paper, we studied older adults affected by chronic diseases and/or Parkinson's disease by monitoring their gait due to wearable devices that can accurately detect a person's movements. In our study, about 50 people were involved in the trial (20 with Parkinson's disease and 30 people with chronic diseases) who have worn our device for at least 6 months. During the experimentation, each device collected 25 samples from the accelerometer sensor for each second. By analyzing those data, we propose a metric for the "gait quality" based on the measure of entropy obtained by applying the Fourier transform.},
}
@article {pmid37765790,
year = {2023},
author = {Wu, YL and Wang, CS and Weng, WC and Lin, YC},
title = {Development of a Cloud-Based Image Processing Health Checkup System for Multi-Item Urine Analysis.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {18},
pages = {},
pmid = {37765790},
issn = {1424-8220},
abstract = {With the busy pace of modern life, an increasing number of people are afflicted by lifestyle diseases. Going directly to the hospital for medical checks is not only time-consuming but also costly. Fortunately, the emergence of rapid tests has alleviated this burden. Accurately interpreting test results is extremely important; misinterpreting the results of rapid tests could lead to delayed medical treatment. Given that URS-10 serve as a rapid test capable of detecting 10 distinct parameters in urine samples, the results of assessing these parameters can offer insights into the subject's physiological condition. These parameters encompass aspects such as metabolism, renal function, diabetes, urinary tract disorders, hemolytic diseases, and acid-base balance, among others. Although the operational procedure is straightforward, the variegated color changes exhibited in the outcomes of individual parameters render it challenging for lay users to deduce causal factors solely from color variations. Moreover, potential misinterpretations could arise due to visual discrepancies. In this study, we successfully developed a cloud-based health checkup system that can be used in an indoor environment. The system is used by placing a URS-10 test strip on a colorimetric board developed for this study, then using a smartphone application to take images which are uploaded to a server for cloud computing. Finally, the interpretation results are stored in the cloud and sent back to the smartphone to be checked by the user. Furthermore, to confirm whether the color calibration technology can eliminate color differences between different cameras, and also whether the colorimetric board and the urine test strips can perform color comparisons correctly in different light intensity environments, indoor environments that could simulate a specific light intensity were established for testing purposes. When comparing the experimental results to real test strips, only two groups failed to reach an identification success rate of 100%, and in both of these cases the success rate reached 95%. The experimental results confirmed that the system developed in this study was able to eliminate color differences between camera devices and could be used without special technical requirements or training.},
}
@article {pmid37757612,
year = {2023},
author = {Palmer, GA and Tomkin, G and Martín-Alcalá, HE and Mendizabal-Ruiz, G and Cohen, J},
title = {The Internet of Things in assisted reproduction.},
journal = {Reproductive biomedicine online},
volume = {47},
number = {5},
pages = {103338},
doi = {10.1016/j.rbmo.2023.103338},
pmid = {37757612},
issn = {1472-6491},
mesh = {Humans ; *Internet of Things ; Internet ; Automation ; Laboratories ; Reproduction ; },
abstract = {The Internet of Things (IoT) is a network connecting physical objects with sensors, software and internet connectivity for data exchange. Integrating the IoT with medical devices shows promise in healthcare, particularly in IVF laboratories. By leveraging telecommunications, cybersecurity, data management and intelligent systems, the IoT can enable a data-driven laboratory with automation, improved conditions, personalized treatment and efficient workflows. The integration of 5G technology ensures fast and reliable connectivity for real-time data transmission, while blockchain technology secures patient data. Fog computing reduces latency and enables real-time analytics. Microelectromechanical systems enable wearable IoT and miniaturized monitoring devices for tracking IVF processes. However, challenges such as security risks and network issues must be addressed through cybersecurity measures and networking advancements. Clinical embryologists should maintain their expertise and knowledge for safety and oversight, even with IoT in the IVF laboratory.},
}
@article {pmid37746608,
year = {2023},
author = {Baghdadi, A and Guo, E and Lama, S and Singh, R and Chow, M and Sutherland, GR},
title = {Force Profile as Surgeon-Specific Signature.},
journal = {Annals of surgery open : perspectives of surgical history, education, and clinical approaches},
volume = {4},
number = {3},
pages = {e326},
pmid = {37746608},
issn = {2691-3593},
abstract = {OBJECTIVE: To investigate the notion that a surgeon's force profile can be the signature of their identity and performance.
SUMMARY BACKGROUND DATA: Surgeon performance in the operating room is an understudied topic. The advent of deep learning methods paired with a sensorized surgical device presents an opportunity to incorporate quantitative insight into surgical performance and processes. Using a device called the SmartForceps System and through automated analytics, we have previously reported surgeon force profile, surgical skill, and task classification. However, an investigation of whether an individual surgeon can be identified by surgical technique has yet to be studied.
METHODS: In this study, we investigate multiple neural network architectures to identify the surgeon associated with their time-series tool-tissue forces using bipolar forceps data. The surgeon associated with each 10-second window of force data was labeled, and the data were randomly split into 80% for model training and validation (10% validation) and 20% for testing. Data imbalance was mitigated through subsampling from more populated classes with a random size adjustment based on 0.1% of sample counts in the respective class. An exploratory analysis of force segments was performed to investigate underlying patterns differentiating individual surgical techniques.
RESULTS: In a dataset of 2819 ten-second time segments from 89 neurosurgical cases, the best-performing model achieved a micro-average area under the curve of 0.97, a testing F1-score of 0.82, a sensitivity of 82%, and a precision of 82%. This model was a time-series ResNet model to extract features from the time-series data followed by a linearized output into the XGBoost algorithm. Furthermore, we found that convolutional neural networks outperformed long short-term memory networks in performance and speed. Using a weighted average approach, an ensemble model was able to identify an expert surgeon with 83.8% accuracy using a validation dataset.
CONCLUSIONS: Our results demonstrate that each surgeon has a unique force profile amenable to identification using deep learning methods. We anticipate our models will enable a quantitative framework to provide bespoke feedback to surgeons and to track their skill progression longitudinally. Furthermore, the ability to recognize individual surgeons introduces the mechanism of correlating outcome to surgeon performance.},
}
@article {pmid37745890,
year = {2023},
author = {Habib, W and Connolly, J},
title = {A national-scale assessment of land use change in peatlands between 1989 and 2020 using Landsat data and Google Earth Engine-a case study of Ireland.},
journal = {Regional environmental change},
volume = {23},
number = {4},
pages = {124},
pmid = {37745890},
issn = {1436-3798},
abstract = {Over the centuries, anthropogenic pressure has severely impacted peatlands on the European continent. Peatlands cover ~ 21% (1.46 Mha) of Ireland's land surface, but 85% have been degraded due to management activities (land use). Ireland needs to meet its 2030 climate energy framework targets related to greenhouse gas (GHG) emissions from land use, land use change and forestry, including wetlands. Despite Ireland's voluntary decision to include peatlands in this system in 2020, information on land use activities and associated GHG emissions from peatlands is lacking. This study strives to fill this information gap by using Landsat (5, 8) data with Google Earth Engine and machine learning to examine and quantify land use on Irish peatlands across three time periods: 1990, 2005 and 2019. Four peatland land use classes were mapped and assessed: industrial peat extraction, forestry, grassland and residual peatland. The overall accuracy of the classification was 86% and 85% for the 2005 and 2019 maps, respectively. The accuracy of the 1990 dataset could not be assessed due to the unavailability of high-resolution reference data. The results indicate that extensive management activities have taken place in peatlands over the past three decades, which may have negative impacts on its ecological integrity and the many ecosystem services provided. By utilising cloud computing, temporal mosaicking and Landsat data, this study developed a robust methodology that overcomes cloud contamination and produces the first peatland land use maps of Ireland with wall-to-wall coverage. This has the potential for regional and global applications, providing maps that could help understand unsustainable management practices on peatlands and the impact on GHG emissions.},
}
@article {pmid37745873,
year = {2023},
author = {Intelligence And Neuroscience, C},
title = {Retracted: The Reform of University Education Teaching Based on Cloud Computing and Big Data Background.},
journal = {Computational intelligence and neuroscience},
volume = {2023},
number = {},
pages = {9893153},
pmid = {37745873},
issn = {1687-5273},
abstract = {[This retracts the article DOI: 10.1155/2022/8169938.].},
}
@article {pmid37744210,
year = {2023},
author = {Verner, E and Petropoulos, H and Baker, B and Bockholt, HJ and Fries, J and Bohsali, A and Raja, R and Trinh, DH and Calhoun, V},
title = {BrainForge: an online data analysis platform for integrative neuroimaging acquisition, analysis, and sharing.},
journal = {Concurrency and computation : practice & experience},
volume = {35},
number = {18},
pages = {},
pmid = {37744210},
issn = {1532-0626},
support = {R01 MH118695/MH/NIMH NIH HHS/United States ; R01 MH123610/MH/NIMH NIH HHS/United States ; R41 MH122201/MH/NIMH NIH HHS/United States ; R41 MH100070/MH/NIMH NIH HHS/United States ; R01 EB020407/EB/NIBIB NIH HHS/United States ; },
abstract = {BrainForge is a cloud-enabled, web-based analysis platform for neuroimaging research. This website allows users to archive data from a study and effortlessly process data on a high-performance computing cluster. After analyses are completed, results can be quickly shared with colleagues. BrainForge solves multiple problems for researchers who want to analyze neuroimaging data, including issues related to software, reproducibility, computational resources, and data sharing. BrainForge can currently process structural, functional, diffusion, and arterial spin labeling MRI modalities, including preprocessing and group level analyses. Additional pipelines are currently being added, and the pipelines can accept the BIDS format. Analyses are conducted completely inside of Singularity containers and utilize popular software packages including Nipype, Statistical Parametric Mapping, the Group ICA of fMRI Toolbox, and FreeSurfer. BrainForge also features several interfaces for group analysis, including a fully automated adaptive ICA approach.},
}
@article {pmid37738400,
year = {2023},
author = {Lim, HG and Fann, YC and Lee, YG},
title = {COWID: an efficient cloud-based genomics workflow for scalable identification of SARS-COV-2.},
journal = {Briefings in bioinformatics},
volume = {24},
number = {5},
pages = {},
pmid = {37738400},
issn = {1477-4054},
support = {HHSN261201400008C/NH/NIH HHS/United States ; ZIC NS009443/ImNIH/Intramural NIH HHS/United States ; HHSN261201500003I/CA/NCI NIH HHS/United States ; 75N91019D00024/CA/NCI NIH HHS/United States ; },
mesh = {Humans ; *COVID-19/diagnosis ; Cloud Computing ; SARS-CoV-2/genetics ; Workflow ; Genomics ; },
abstract = {Implementing a specific cloud resource to analyze extensive genomic data on severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2) poses a challenge when resources are limited. To overcome this, we repurposed a cloud platform initially designed for use in research on cancer genomics (https://cgc.sbgenomics.com) to enable its use in research on SARS-CoV-2 to build Cloud Workflow for Viral and Variant Identification (COWID). COWID is a workflow based on the Common Workflow Language that realizes the full potential of sequencing technology for use in reliable SARS-CoV-2 identification and leverages cloud computing to achieve efficient parallelization. COWID outperformed other contemporary methods for identification by offering scalable identification and reliable variant findings with no false-positive results. COWID typically processed each sample of raw sequencing data within 5 min at a cost of only US$0.01. The COWID source code is publicly available (https://github.com/hendrick0403/COWID) and can be accessed on any computer with Internet access. COWID is designed to be user-friendly; it can be implemented without prior programming knowledge. Therefore, COWID is a time-efficient tool that can be used during a pandemic.},
}
@article {pmid37732291,
year = {2023},
author = {Pessin, VZ and Santos, CAS and Yamane, LH and Siman, RR and Baldam, RL and Júnior, VL},
title = {A method of Mapping Process for scientific production using the Smart Bibliometrics.},
journal = {MethodsX},
volume = {11},
number = {},
pages = {102367},
pmid = {37732291},
issn = {2215-0161},
abstract = {Big data launches a modern way of producing science and research around the world. Due to an explosion of data available in scientific databases, combined with recent advances in information technology, the researcher has at his disposal new methods and technologies that facilitate scientific development. Considering the challenges of producing science in a dynamic and complex scenario, the main objective of this article is to present a method aligned with tools recently developed to support scientific production, based on steps and technologies that will help researchers to materialize their objectives efficiently and effectively. Applying this method, the researcher can apply science mapping and bibliometric techniques with agility, taking advantage of an easy-to-use solution with cloud computing capabilities. From the application of the "Scientific Mapping Process", the researcher will be able to generate strategic information for a result-oriented scientific production, assertively going through the main steps of research and boosting scientific discovery in the most diverse fields of investigation. •The Scientific Mapping Process provides a method and a system to boost scientific development.•It automates Science Mapping and bibliometric analysis from scientific datasets.•It facilitates the researcher's work, increasing the assertiveness in scientific production.},
}
@article {pmid37729405,
year = {2023},
author = {Willett, DS and Brannock, J and Dissen, J and Keown, P and Szura, K and Brown, OB and Simonson, A},
title = {NOAA Open Data Dissemination: Petabyte-scale Earth system data in the cloud.},
journal = {Science advances},
volume = {9},
number = {38},
pages = {eadh0032},
pmid = {37729405},
issn = {2375-2548},
abstract = {NOAA Open Data Dissemination (NODD) makes NOAA environmental data publicly and freely available on Amazon Web Services (AWS), Microsoft Azure (Azure), and Google Cloud Platform (GCP). These data can be accessed by anyone with an internet connection and span key datasets across the Earth system including satellite imagery, radar, weather models and observations, ocean databases, and climate data records. Since its inception, NODD has grown to provide public access to more than 24 PB of NOAA data and can support billions of requests and petabytes of access daily. Stakeholders routinely access more than 5 PB of NODD data every month. NODD continues to grow to support open petabyte-scale Earth system data science in the cloud by onboarding additional NOAA data and exploring performant data formats. Here, we document how this program works with a focus on provenance, key datasets, and use. We also highlight how to access these data with the goal of accelerating use of NOAA resources in the cloud.},
}
@article {pmid37718323,
year = {2023},
author = {Namazi, F and Ezoji, M and Parmehr, EG},
title = {Paddy Rice mapping in fragmented lands by improved phenology curve and correlation measurements on Sentinel-2 imagery in Google earth engine.},
journal = {Environmental monitoring and assessment},
volume = {195},
number = {10},
pages = {1220},
doi = {10.1007/s10661-023-11808-3},
pmid = {37718323},
issn = {1573-2959},
mesh = {*Oryza ; Search Engine ; Environmental Monitoring ; Algorithms ; Water ; },
abstract = {Accurate and timely rice crop mapping is important to address the challenges of food security, water management, disease transmission, and land use change. However, accurate rice crop mapping is difficult due to the presence of mixed pixels in small and fragmented rice fields as well as cloud cover. In this paper, a phenology-based method using Sentinel-2 time series images is presented to solve these problems. First, the improved rice phenology curve is extracted based on Normalized Difference Vegetation Index and Land Surface Water Index time series data of rice fields. Then, correlation was taken between rice phenology curve and time series data of each pixel. The correlation result of each pixel shows the similarity of its time series behavior with the proposed rice phenology curve. In the next step, the maximum correlation value and its occurrence time are used as the feature vectors of each pixel to classification. Since correlation measurement provides data with better separability than its input data, training the classifier can be done with fewer samples and the classification is more accurate. The implementation of the proposed correlation-based algorithm can be done in a parallel computing. All the processes were performed on the Google Earth Engine cloud platform on the time series images of the Sentinel 2. The implementations show the high accuracy of this method.},
}
@article {pmid37705635,
year = {2023},
author = {Yang, J and Han, J and Wan, Q and Xing, S and Chen, F},
title = {A novel similarity measurement for triangular cloud models based on dual consideration of shape and distance.},
journal = {PeerJ. Computer science},
volume = {9},
number = {},
pages = {e1506},
pmid = {37705635},
issn = {2376-5992},
abstract = {It is important to be able to measure the similarity between two uncertain concepts for many real-life AI applications, such as image retrieval, collaborative filtering, risk assessment, and data clustering. Cloud models are important cognitive computing models that show promise in measuring the similarity of uncertain concepts. Here, we aim to address the shortcomings of existing cloud model similarity measurement algorithms, such as poor discrimination ability and unstable measurement results. We propose an EPTCM algorithm based on the triangular fuzzy number EW-type closeness and cloud drop variance, considering the shape and distance similarities of existing cloud models. The experimental results show that the EPTCM algorithm has good recognition and classification accuracy and is more accurate than the existing Likeness comparing method (LICM), overlap-based expectation curve (OECM), fuzzy distance-based similarity (FDCM) and multidimensional similarity cloud model (MSCM) methods. The experimental results also demonstrate that the EPTCM algorithm has successfully overcome the shortcomings of existing algorithms. In summary, the EPTCM method proposed here is effective and feasible to implement.},
}
@article {pmid37702950,
year = {2024},
author = {Pribec, I and Hachinger, S and Hayek, M and Pringle, GJ and Brüchle, H and Jamitzky, F and Mathias, G},
title = {Efficient and Reliable Data Management for Biomedical Applications.},
journal = {Methods in molecular biology (Clifton, N.J.)},
volume = {2716},
number = {},
pages = {383-403},
pmid = {37702950},
issn = {1940-6029},
mesh = {*Data Management ; *Big Data ; Cloud Computing ; Documentation ; Movement ; },
abstract = {This chapter discusses the challenges and requirements of modern Research Data Management (RDM), particularly for biomedical applications in the context of high-performance computing (HPC). The FAIR data principles (Findable, Accessible, Interoperable, Reusable) are of special importance. Data formats, publication platforms, annotation schemata, automated data management and staging, the data infrastructure in HPC centers, file transfer and staging methods in HPC, and the EUDAT components are discussed. Tools and approaches for automated data movement and replication in cross-center workflows are explained, as well as the development of ontologies for structuring and quality-checking of metadata in computational biomedicine. The CompBioMed project is used as a real-world example of implementing these principles and tools in practice. The LEXIS project has built a workflow-execution and data management platform that follows the paradigm of HPC-Cloud convergence for demanding Big Data applications. It is used for orchestrating workflows with YORC, utilizing the data documentation initiative (DDI) and distributed computing resources (DCI). The platform is accessed by a user-friendly LEXIS portal for workflow and data management, making HPC and Cloud Computing significantly more accessible. Checkpointing, duplicate runs, and spare images of the data are used to create resilient workflows. The CompBioMed project is completing the implementation of such a workflow, using data replication and brokering, which will enable urgent computing on exascale platforms.},
}
@article {pmid37702940,
year = {2024},
author = {Bonde, B},
title = {Edge, Fog, and Cloud Against Disease: The Potential of High-Performance Cloud Computing for Pharma Drug Discovery.},
journal = {Methods in molecular biology (Clifton, N.J.)},
volume = {2716},
number = {},
pages = {181-202},
pmid = {37702940},
issn = {1940-6029},
mesh = {Algorithms ; *Artificial Intelligence ; *Cloud Computing ; Drug Discovery ; Software ; },
abstract = {The high-performance computing (HPC) platform for large-scale drug discovery simulation demands significant investment in speciality hardware, maintenance, resource management, and running costs. The rapid growth in computing hardware has made it possible to provide cost-effective, robust, secure, and scalable alternatives to the on-premise (on-prem) HPC via Cloud, Fog, and Edge computing. It has enabled recent state-of-the-art machine learning (ML) and artificial intelligence (AI)-based tools for drug discovery, such as BERT, BARD, AlphaFold2, and GPT. This chapter attempts to overview types of software architectures for developing scientific software or application with deployment agnostic (on-prem to cloud and hybrid) use cases. Furthermore, the chapter aims to outline how the innovation is disrupting the orthodox mindset of monolithic software running on on-prem HPC and provide the paradigm shift landscape to microservices driven application programming (API) and message parsing interface (MPI)-based scientific computing across the distributed, high-available infrastructure. This is coupled with agile DevOps, and good coding practices, low code and no-code application development frameworks for cost-efficient, secure, automated, and robust scientific application life cycle management.},
}
@article {pmid37693890,
year = {2023},
author = {Zhang, W and Zhang, C and Cao, L and Liang, F and Xie, W and Tao, L and Chen, C and Yang, M and Zhong, L},
title = {Application of digital-intelligence technology in the processing of Chinese materia medica.},
journal = {Frontiers in pharmacology},
volume = {14},
number = {},
pages = {1208055},
pmid = {37693890},
issn = {1663-9812},
abstract = {Processing of Chinese Materia Medica (PCMM) is the concentrated embodiment, which is the core of Chinese unique traditional pharmaceutical technology. The processing includes the preparation steps such as cleansing, cutting and stir-frying, to make certain impacts on the quality and efficacy of Chinese botanical drugs. The rapid development of new computer digital technologies, such as big data analysis, Internet of Things (IoT), blockchain and cloud computing artificial intelligence, has promoted the rapid development of traditional pharmaceutical manufacturing industry with digitalization and intellectualization. In this review, the application of digital intelligence technology in the PCMM was analyzed and discussed, which hopefully promoted the standardization of the process and secured the quality of botanical drugs decoction pieces. Through the intellectualization and the digitization of production, safety and effectiveness of clinical use of traditional Chinese medicine (TCM) decoction pieces were ensured. This review also provided a theoretical basis for further technical upgrading and high-quality development of TCM industry.},
}
@article {pmid37693367,
year = {2023},
author = {Griffin, AC and Khairat, S and Bailey, SC and Chung, AE},
title = {A chatbot for hypertension self-management support: user-centered design, development, and usability testing.},
journal = {JAMIA open},
volume = {6},
number = {3},
pages = {ooad073},
pmid = {37693367},
issn = {2574-2531},
support = {UM1 TR004406/TR/NCATS NIH HHS/United States ; },
abstract = {OBJECTIVES: Health-related chatbots have demonstrated early promise for improving self-management behaviors but have seldomly been utilized for hypertension. This research focused on the design, development, and usability evaluation of a chatbot for hypertension self-management, called "Medicagent."
MATERIALS AND METHODS: A user-centered design process was used to iteratively design and develop a text-based chatbot using Google Cloud's Dialogflow natural language understanding platform. Then, usability testing sessions were conducted among patients with hypertension. Each session was comprised of: (1) background questionnaires, (2) 10 representative tasks within Medicagent, (3) System Usability Scale (SUS) questionnaire, and (4) a brief semi-structured interview. Sessions were video and audio recorded using Zoom. Qualitative and quantitative analyses were used to assess effectiveness, efficiency, and satisfaction of the chatbot.
RESULTS: Participants (n = 10) completed nearly all tasks (98%, 98/100) and spent an average of 18 min (SD = 10 min) interacting with Medicagent. Only 11 (8.6%) utterances were not successfully mapped to an intent. Medicagent achieved a mean SUS score of 78.8/100, which demonstrated acceptable usability. Several participants had difficulties navigating the conversational interface without menu and back buttons, felt additional information would be useful for redirection when utterances were not recognized, and desired a health professional persona within the chatbot.
DISCUSSION: The text-based chatbot was viewed favorably for assisting with blood pressure and medication-related tasks and had good usability.
CONCLUSION: Flexibility of interaction styles, handling unrecognized utterances gracefully, and having a credible persona were highlighted as design components that may further enrich the user experience of chatbots for hypertension self-management.},
}
@article {pmid37692531,
year = {2023},
author = {Angelidis, E},
title = {A perspective on large-scale simulation as an enabler for novel biorobotics applications.},
journal = {Frontiers in robotics and AI},
volume = {10},
number = {},
pages = {1102286},
pmid = {37692531},
issn = {2296-9144},
abstract = {Our understanding of the complex mechanisms that power biological intelligence has been greatly enhanced through the explosive growth of large-scale neuroscience and robotics simulation tools that are used by the research community to perform previously infeasible experiments, such as the simulation of the neocortex's circuitry. Nevertheless, simulation falls far from being directly applicable to biorobots due to the large discrepancy between the simulated and the real world. A possible solution for this problem is the further enhancement of existing simulation tools for robotics, AI and neuroscience with multi-physics capabilities. Previously infeasible or difficult to simulate scenarios, such as robots swimming on the water surface, interacting with soft materials, walking on granular materials etc., would be rendered possible within a multi-physics simulation environment designed for robotics. In combination with multi-physics simulation, large-scale simulation tools that integrate multiple simulation modules in a closed-loop manner help address fundamental questions around the organization of neural circuits and the interplay between the brain, body and environment. We analyze existing designs for large-scale simulation running on cloud and HPC infrastructure as well as their shortcomings. Based on this analysis we propose a next-gen modular architecture design based on multi-physics engines, that we believe would greatly benefit biorobotics and AI.},
}
@article {pmid37688118,
year = {2023},
author = {Urblik, L and Kajati, E and Papcun, P and Zolotova, I},
title = {A Modular Framework for Data Processing at the Edge: Design and Implementation.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {17},
pages = {},
pmid = {37688118},
issn = {1424-8220},
support = {APVV-20-0247//Slovak Research and Development Agency/ ; },
abstract = {There is a rapid increase in the number of edge devices in IoT solutions, generating vast amounts of data that need to be processed and analyzed efficiently. Traditional cloud-based architectures can face latency, bandwidth, and privacy challenges when dealing with this data flood. There is currently no unified approach to the creation of edge computing solutions. This work addresses this problem by exploring containerization for data processing solutions at the network's edge. The current approach involves creating a specialized application compatible with the device used. Another approach involves using containerization for deployment and monitoring. The heterogeneity of edge environments would greatly benefit from a universal modular platform. Our proposed edge computing-based framework implements a streaming extract, transform, and load pipeline for data processing and analysis using ZeroMQ as the communication backbone and containerization for scalable deployment. Results demonstrate the effectiveness of the proposed framework, making it suitable for time-sensitive IoT applications.},
}
@article {pmid37688051,
year = {2023},
author = {Shi, W and Chen, L and Zhu, X},
title = {Task Offloading Decision-Making Algorithm for Vehicular Edge Computing: A Deep-Reinforcement-Learning-Based Approach.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {17},
pages = {},
pmid = {37688051},
issn = {1424-8220},
support = {2022YFB3305500//National Key Research and Development Program of China/ ; 62273089//National Natural Science Foundation of China/ ; 62102080//National Natural Science Foundation of China/ ; BK20210204//Natural Science Foundation of Jiangsu Province/ ; },
abstract = {Efficient task offloading decision is a crucial technology in vehicular edge computing, which aims to fulfill the computational performance demands of complex vehicular tasks with respect to delay and energy consumption while minimizing network resource competition and consumption. Conventional distributed task offloading decisions rely solely on the local state of the vehicle, failing to optimize the utilization of the server's resources to its fullest potential. In addition, the mobility aspect of vehicles is often neglected in these decisions. In this paper, a cloud-edge-vehicle three-tier vehicular edge computing (VEC) system is proposed, where vehicles partially offload their computing tasks to edge or cloud servers while keeping the remaining tasks local to the vehicle terminals. Under the restrictions of vehicle mobility and discrete variables, task scheduling and task offloading proportion are jointly optimized with the objective of minimizing the total system cost. Considering the non-convexity, high-dimensional complex state and continuous action space requirements of the optimization problem, we propose a task offloading decision-making algorithm based on deep deterministic policy gradient (TODM_DDPG). TODM_DDPG algorithm adopts the actor-critic framework in which the actor network outputs floating point numbers to represent deterministic policy, while the critic network evaluates the action output by the actor network, and adjusts the network evaluation policy according to the rewards with the environment to maximize the long-term reward. To explore the algorithm performance, this conduct parameter setting experiments to correct the algorithm core hyper-parameters and select the optimal combination of parameters. In addition, in order to verify algorithm performance, we also carry out a series of comparative experiments with baseline algorithms. The results demonstrate that in terms of reducing system costs, the proposed algorithm outperforms the compared baseline algorithm, such as the deep Q network (DQN) and the actor-critic (AC), and the performance is improved by about 13% on average.},
}
@article {pmid37687890,
year = {2023},
author = {Zhou, W and Qian, Z and Ni, X and Tang, Y and Guo, H and Zhuang, S},
title = {Dense Convolutional Neural Network for Identification of Raman Spectra.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {17},
pages = {},
pmid = {37687890},
issn = {1424-8220},
support = {21010502900//Science and Technology Commission of Shanghai Municipality/ ; },
abstract = {The rapid development of cloud computing and deep learning makes the intelligent modes of applications widespread in various fields. The identification of Raman spectra can be realized in the cloud, due to its powerful computing, abundant spectral databases and advanced algorithms. Thus, it can reduce the dependence on the performance of the terminal instruments. However, the complexity of the detection environment can cause great interferences, which might significantly decrease the identification accuracies of algorithms. In this paper, a deep learning algorithm based on the Dense network has been proposed to satisfy the realization of this vision. The proposed Dense convolutional neural network has a very deep structure of over 40 layers and plenty of parameters to adjust the weight of different wavebands. In the kernel Dense blocks part of the network, it has a feed-forward fashion of connection for each layer to every other layer. It can alleviate the gradient vanishing or explosion problems, strengthen feature propagations, encourage feature reuses and enhance training efficiency. The network's special architecture mitigates noise interferences and ensures precise identification. The Dense network shows more accuracy and robustness compared to other CNN-based algorithms. We set up a database of 1600 Raman spectra consisting of 32 different types of liquid chemicals. They are detected using different postures as examples of interfered Raman spectra. In the 50 repeated training and testing sets, the Dense network can achieve a weighted accuracy of 99.99%. We have also tested the RRUFF database and the Dense network has a good performance. The proposed approach advances cloud-enabled Raman spectra identification, offering improved accuracy and adaptability for diverse identification tasks.},
}
@article {pmid37687870,
year = {2023},
author = {Sangaiah, AK and Javadpour, A and Pinto, P and Chiroma, H and Gabralla, LA},
title = {Cost-Effective Resources for Computing Approximation Queries in Mobile Cloud Computing Infrastructure.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {17},
pages = {},
pmid = {37687870},
issn = {1424-8220},
abstract = {Answering a query through a peer-to-peer database presents one of the greatest challenges due to the high cost and time required to obtain a comprehensive response. Consequently, these systems were primarily designed to handle approximation queries. In our research, the primary objective was to develop an intelligent system capable of responding to approximate set-value inquiries. This paper explores the use of particle optimization to enhance the system's intelligence. In contrast to previous studies, our proposed method avoids the use of sampling. Despite the utilization of the best sampling methods, there remains a possibility of error, making it difficult to guarantee accuracy. Nonetheless, achieving a certain degree of accuracy is crucial in handling approximate queries. Various factors influence the accuracy of sampling procedures. The results of our studies indicate that the suggested method has demonstrated improvements in terms of the number of queries issued, the number of peers examined, and its execution time, which is significantly faster than the flood approach. Answering queries poses one of the most arduous challenges in peer-to-peer databases, as obtaining a complete answer is both costly and time-consuming. Consequently, approximation queries have been adopted as a solution in these systems. Our research evaluated several methods, including flood algorithms, parallel diffusion algorithms, and ISM algorithms. When it comes to query transmission, the proposed method exhibits superior cost-effectiveness and execution times.},
}
@article {pmid37687784,
year = {2023},
author = {Alsemmeari, RA and Dahab, MY and Alturki, B and Alsulami, AA and Alsini, R},
title = {Towards an Effective Service Allocation in Fog Computing.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {17},
pages = {},
pmid = {37687784},
issn = {1424-8220},
support = {IFPIP: 1033-611-1443//Deanship of Scientific Research (DSR) at King Abdulaziz University, Jeddah/ ; },
abstract = {The Internet of Things (IoT) generates a large volume of data whenever devices are interconnected and exchange data across a network. Consequently, a variety of services with diverse needs arises, including capacity requirements, data quality, and latency demands. These services operate on fog computing devices, which are limited in power and bandwidth compared to the cloud. The primary challenge lies in determining the optimal location for service implementation: in the fog, in the cloud, or in a hybrid setup. This paper introduces an efficient allocation technique that moves processing closer to the network's fog side. It explores the optimal allocation of devices and services while maintaining resource utilization within an IoT architecture. The paper also examines the significance of allocating services to devices and optimizing resource utilization in fog computing. In IoT scenarios, where a wide range of services and devices coexist, it becomes crucial to effectively assign services to devices. We propose priority-based service allocation (PSA) and sort-based service allocation (SSA) techniques, which are employed to determine the optimal order for the utilizing devices to perform different services. Experimental results demonstrate that our proposed technique reduces data communication over the network by 88%, which is achieved by allocating most services locally in the fog. We increased the distribution of services to fog devices by 96%, while simultaneously minimizing the wastage of fog resources.},
}
@article {pmid37679146,
year = {2023},
author = {Tian, L and Shang, F and Gan, C},
title = {Optimal control analysis of malware propagation in cloud environments.},
journal = {Mathematical biosciences and engineering : MBE},
volume = {20},
number = {8},
pages = {14502-14517},
doi = {10.3934/mbe.2023649},
pmid = {37679146},
issn = {1551-0018},
abstract = {Cloud computing has become a widespread technology that delivers a broad range of services across various industries globally. One of the crucial features of cloud infrastructure is virtual machine (VM) migration, which plays a pivotal role in resource allocation flexibility and reducing energy consumption, but it also provides convenience for the fast propagation of malware. To tackle the challenge of curtailing the proliferation of malware in the cloud, this paper proposes an effective strategy based on optimal dynamic immunization using a controlled dynamical model. The objective of the research is to identify the most efficient way of dynamically immunizing the cloud to minimize the spread of malware. To achieve this, we define the control strategy and loss and give the corresponding optimal control problem. The optimal control analysis of the controlled dynamical model is examined theoretically and experimentally. Finally, the theoretical and experimental results both demonstrate that the optimal strategy can minimize the incidence of infections at a reasonable loss.},
}
@article {pmid37676890,
year = {2023},
author = {Niu, S and Dong, R and Fang, L},
title = {Certificateless broadcast signcryption scheme supporting equality test in smart grid.},
journal = {PloS one},
volume = {18},
number = {9},
pages = {e0290666},
pmid = {37676890},
issn = {1932-6203},
mesh = {*Algorithms ; *Cloud Computing ; Internet ; Privacy ; Trust ; },
abstract = {With the development of cloud computing and the application of Internet of Things (IoT) in the smart grid, a massive amount of sensitive data is produced by the terminal equipment. This vast amount of data is subject to various attacks during transmission, from which users must be protected. However, most of the existing schemes require a large amount of network bandwidth resources and cannot ensure the receiver's anonymity. To solve these shortcomings, we construct a broadcast signcryption scheme supporting equality test based on certificateless cryptosystem. The scheme employs a symmetric encryption algorithm to improve encryption and transmission efficiency; The Lagrange interpolation theorem is used to encrypt the user's identity to ensure the privacy preservation of terminal devices; And a trusted third party is used to eliminate duplicated ciphertext for identical messages using an equality test, resulting in efficient network bandwidth utilization. Experimental analysis shows that our work has greater advantages in the field of practical broadcast services.},
}
@article {pmid37672552,
year = {2023},
author = {, },
title = {Retraction: Construction and optimization of inventory management system via cloud-edge collaborative computing in supply chain environment in the Internet of Things era.},
journal = {PloS one},
volume = {18},
number = {9},
pages = {e0291318},
pmid = {37672552},
issn = {1932-6203},
}
@article {pmid37669969,
year = {2023},
author = {Zhao, Y and Ye, H},
title = {Power system low delay resource scheduling model based on edge computing node.},
journal = {Scientific reports},
volume = {13},
number = {1},
pages = {14634},
pmid = {37669969},
issn = {2045-2322},
abstract = {As more and more intelligent devices are put into the field of power system, the number of connected nodes in the power network is increasing exponentially. Under the background of smart grid cooperation across power areas and voltage levels, how to effectively process the massive data generated by smart grid has become a difficult problem to ensure the stable operation of power system. In the complex calculation process of power system, the operation time of complex calculation can not be shortened to the greatest extent, and the execution efficiency can not be improved. Therefore, this paper proposes a two-phase heuristic algorithm based on edge computing. In solving the virtual machine sequence problem, for the main partition and the coordination partition, the critical path algorithm is used to sort the virtual machines to minimize the computing time. For other sub-partitions, the minimum cut algorithm is used to reduce the traffic interaction of each sub-partition. In the second stage of the virtual machine placement process, an improved best fit algorithm is used to avoid poor placement of virtual machines across physical machine configurations, resulting in increased computing time. Through the experiment on the test system, it is proved that the calculation efficiency is improved when the coordinated partition calculation belongs to the target partition. Because the edge computing is closer to the data source, it can save more data transmission time than cloud computing. This paper provides an effective algorithm for power system distributed computing in virtual machine configuration in edge computing, which can effectively reduce the computing time of power system and improve the efficiency of system resource utilization.},
}
@article {pmid37662679,
year = {2023},
author = {Healthcare Engineering, JO},
title = {Retracted: Construction and Clinical Application Effect of General Surgery Patient-Oriented Nursing Information Platform Using Cloud Computing.},
journal = {Journal of healthcare engineering},
volume = {2023},
number = {},
pages = {9784736},
pmid = {37662679},
issn = {2040-2309},
abstract = {[This retracts the article DOI: 10.1155/2022/8273701.].},
}
@article {pmid37649809,
year = {2023},
author = {Zainudin, H and Koufos, K and Lee, G and Jiang, L and Dianati, M},
title = {Impact analysis of cooperative perception on the performance of automated driving in unsignalized roundabouts.},
journal = {Frontiers in robotics and AI},
volume = {10},
number = {},
pages = {1164950},
pmid = {37649809},
issn = {2296-9144},
abstract = {This paper reports the implementation and results of a simulation-based analysis of the impact of cloud/edge-enabled cooperative perception on the performance of automated driving in unsignalized roundabouts. This is achieved by comparing the performance of automated driving assisted by cooperative perception to that of a baseline system, where the automated vehicle relies only on its onboard sensing and perception for motion planning and control. The paper first provides the descriptions of the implemented simulation model, which integrates the SUMO road traffic generator and CARLA simulator. This includes descriptions of both the baseline and cooperative perception-assisted automated driving systems. We then define a set of relevant key performance indicators for traffic efficiency, safety, and ride comfort, as well as simulation scenarios to collect relevant data for our analysis. This is followed by the description of simulation scenarios, presentation of the results, and discussions of the insights learned from the results.},
}
@article {pmid37631822,
year = {2023},
author = {Almudayni, Z and Soh, B and Li, A},
title = {Enhancing Energy Efficiency and Fast Decision Making for Medical Sensors in Healthcare Systems: An Overview and Novel Proposal.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {16},
pages = {},
pmid = {37631822},
issn = {1424-8220},
mesh = {*Conservation of Energy Resources ; Physical Phenomena ; *Algorithms ; Industry ; Decision Making ; },
abstract = {In the realm of the Internet of Things (IoT), a network of sensors and actuators collaborates to fulfill specific tasks. As the demand for IoT networks continues to rise, it becomes crucial to ensure the stability of this technology and adapt it for further expansion. Through an analysis of related works, including the feedback-based optimized fuzzy scheduling approach (FOFSA) algorithm, the adaptive task allocation technique (ATAT), and the osmosis load balancing algorithm (OLB), we identify their limitations in achieving optimal energy efficiency and fast decision making. To address these limitations, this research introduces a novel approach to enhance the processing time and energy efficiency of IoT networks. The proposed approach achieves this by efficiently allocating IoT data resources in the Mist layer during the early stages. We apply the approach to our proposed system known as the Mist-based fuzzy healthcare system (MFHS) that demonstrates promising potential to overcome the existing challenges and pave the way for the efficient industrial Internet of healthcare things (IIoHT) of the future.},
}
@article {pmid37631769,
year = {2023},
author = {Hamzei, M and Khandagh, S and Jafari Navimipour, N},
title = {A Quality-of-Service-Aware Service Composition Method in the Internet of Things Using a Multi-Objective Fuzzy-Based Hybrid Algorithm.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {16},
pages = {},
pmid = {37631769},
issn = {1424-8220},
abstract = {The Internet of Things (IoT) represents a cutting-edge technical domain, encompassing billions of intelligent objects capable of bridging the physical and virtual worlds across various locations. IoT services are responsible for delivering essential functionalities. In this dynamic and interconnected IoT landscape, providing high-quality services is paramount to enhancing user experiences and optimizing system efficiency. Service composition techniques come into play to address user requests in IoT applications, allowing various IoT services to collaborate seamlessly. Considering the resource limitations of IoT devices, they often leverage cloud infrastructures to overcome technological constraints, benefiting from unlimited resources and capabilities. Moreover, the emergence of fog computing has gained prominence, facilitating IoT application processing in edge networks closer to IoT sensors and effectively reducing delays inherent in cloud data centers. In this context, our study proposes a cloud-/fog-based service composition for IoT, introducing a novel fuzzy-based hybrid algorithm. This algorithm ingeniously combines Ant Colony Optimization (ACO) and Artificial Bee Colony (ABC) optimization algorithms, taking into account energy consumption and Quality of Service (QoS) factors during the service selection process. By leveraging this fuzzy-based hybrid algorithm, our approach aims to revolutionize service composition in IoT environments by empowering intelligent decision-making capabilities and ensuring optimal user satisfaction. Our experimental results demonstrate the effectiveness of the proposed strategy in successfully fulfilling service composition requests by identifying suitable services. When compared to recently introduced methods, our hybrid approach yields significant benefits. On average, it reduces energy consumption by 17.11%, enhances availability and reliability by 8.27% and 4.52%, respectively, and improves the average cost by 21.56%.},
}
@article {pmid37631746,
year = {2023},
author = {Alasmari, MK and Alwakeel, SS and Alohali, YA},
title = {A Multi-Classifiers Based Algorithm for Energy Efficient Tasks Offloading in Fog Computing.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {16},
pages = {},
pmid = {37631746},
issn = {1424-8220},
abstract = {The IoT has connected a vast number of devices on a massive internet scale. With the rapid increase in devices and data, offloading tasks from IoT devices to remote Cloud data centers becomes unproductive and costly. Optimizing energy consumption in IoT devices while meeting deadlines and data constraints is challenging. Fog Computing aids efficient IoT task processing with proximity to nodes and lower service delay. Cloud task offloading occurs frequently due to Fog Computing's limited resources compared to remote Cloud, necessitating improved techniques for accurate categorization and distribution of IoT device task offloading in a hybrid IoT, Fog, and Cloud paradigm. This article explores relevant offloading strategies in Fog Computing and proposes MCEETO, an intelligent energy-aware allocation strategy, utilizing a multi-classifier-based algorithm for efficient task offloading by selecting optimal Fog Devices (FDs) for module placement. MCEETO decision parameters include task attributes, Fog node characteristics, network latency, and bandwidth. The method is evaluated using the iFogSim simulator and compared with edge-ward and Cloud-only strategies. The proposed solution is more energy-efficient, saving around 11.36% compared to Cloud-only and approximately 9.30% compared to the edge-ward strategy. Additionally, the MCEETO algorithm achieved a 67% and 96% reduction in network usage compared to both strategies.},
}
@article {pmid37631678,
year = {2023},
author = {Ashraf, M and Shiraz, M and Abbasi, A and Alqahtani, O and Badshah, G and Lasisi, A},
title = {Microservice Application Scheduling in Multi-Tiered Fog-Computing-Enabled IoT.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {16},
pages = {},
pmid = {37631678},
issn = {1424-8220},
support = {Grant number will be provide later.//Funder details will be provided later./ ; RGP2/394/44//Deanship of Scientific Research at King Khalid University/ ; },
abstract = {Fog computing extends mobile cloud computing facilities at the network edge, yielding low-latency application execution. To supplement cloud services, computationally intensive applications can be distributed on resource-constrained mobile devices by leveraging underutilized nearby resources to meet the latency and bandwidth requirements of application execution. Building upon this premise, it is necessary to investigate idle or underutilized resources that are present at the edge of the network. The utilization of a microservice architecture in IoT application development, with its increased granularity in service breakdown, provides opportunities for improved scalability, maintainability, and extensibility. In this research, the proposed schedule tackles the latency requirements of applications by identifying suitable upward migration of microservices within a multi-tiered fog computing infrastructure. This approach enables optimal utilization of network edge resources. Experimental validation is performed using the iFogSim2 simulator and the results are compared with existing baselines. The results demonstrate that compared to the edgewards approach, our proposed technique significantly improves the latency requirements of application execution, network usage, and energy consumption by 66.92%, 69.83%, and 4.16%, respectively.},
}
@article {pmid37631666,
year = {2023},
author = {Xiong, H and Yu, B and Yi, Q and He, C},
title = {End-Cloud Collaboration Navigation Planning Method for Unmanned Aerial Vehicles Used in Small Areas.},
journal = {Sensors (Basel, Switzerland)},
volume = {23},
number = {16},
pages = {},
pmid = {37631666},
issn = {1424-8220},
abstract = {Unmanned aerial vehicle (UAV) collaboration has become the main means of indoor and outdoor regional search, railway patrol, and other tasks, and navigation planning is one of the key, albeit difficult, technologies. The purpose of UAV navigation planning is to plan reasonable trajectories for UAVs to avoid obstacles and reach the task area. Essentially, it is a complex optimization problem that requires the use of navigation planning algorithms to search for path-point solutions that meet the requirements under the guide of objective functions and constraints. At present, there are autonomous navigation modes of UAVs relying on airborne sensors and navigation control modes of UAVs relying on ground control stations (GCSs). However, due to the limitation of airborne processor computing power, and background command and control communication delay, a navigation planning method that takes into account accuracy and timeliness is needed. First, the navigation planning architecture of UAVs of end-cloud collaboration was designed. Then, the background cloud navigation planning algorithm of UAVs was designed based on the improved particle swarm optimization (PSO). Next, the navigation control algorithm of the UAV terminals was designed based on the multi-objective hybrid swarm intelligent optimization algorithm. Finally, the computer simulation and actual indoor-environment flight test based on small rotor UAVs were designed and conducted. The results showed that the proposed method is correct and feasible, and can improve the effectiveness and efficiency of navigation planning of UAVs.},
}
@article {pmid37630088,
year = {2023},
author = {Chen, J and Qiu, L and Zhu, Z and Sun, N and Huang, H and Ip, WH and Yung, KL},
title = {An Adaptive Infrared Small-Target-Detection Fusion Algorithm Based on Multiscale Local Gradient Contrast for Remote Sensing.},
journal = {Micromachines},
volume = {14},
number = {8},
pages = {},
pmid = {37630088},
issn = {2072-666X},
support = {2022296//Youth Innovation Promition Association of the Chinese Academy of Sciences/ ; },
abstract = {Space vehicles such as missiles and aircraft have relatively long tracking distances. Infrared (IR) detectors are used for small target detection. The target presents point target characteristics, which lack contour, shape, and texture information. The high-brightness cloud edge and high noise have an impact on the detection of small targets because of the complex background of the sky and ground environment. Traditional template-based filtering and local contrast-based methods do not distinguish between different complex background environments, and their strategy is to unify small-target template detection or to use absolute contrast differences; so, it is easy to have a high false alarm rate. It is necessary to study the detection and tracking methods in complex backgrounds and low signal-to-clutter ratios (SCRs). We use the complexity difference as a prior condition for detection in the background of thick clouds and ground highlight buildings. Then, we use the spatial domain filtering and improved local contrast joint algorithm to obtain a significant area. We also provide a new definition of gradient uniformity through the improvement of the local gradient method, which could further enhance the target contrast. It is important to distinguish between small targets, highlighted background edges, and noise. Furthermore, the method can be used for parallel computing. Compared with the traditional space filtering algorithm or local contrast algorithm, the flexible fusion strategy can achieve the rapid detection of small targets with a higher signal-to-clutter ratio gain (SCRG) and background suppression factor (BSF).},
}
@article {pmid37627775,
year = {2023},
author = {Choi, W and Choi, T and Heo, S},
title = {A Comparative Study of Automated Machine Learning Platforms for Exercise Anthropometry-Based Typology Analysis: Performance Evaluation of AWS SageMaker, GCP VertexAI, and MS Azure.},
journal = {Bioengineering (Basel, Switzerland)},
volume = {10},
number = {8},
pages = {},
pmid = {37627775},
issn = {2306-5354},
support = {INNO-2022-01//National Research Foundation of Korea/ ; },
abstract = {The increasing prevalence of machine learning (ML) and automated machine learning (AutoML) applications across diverse industries necessitates rigorous comparative evaluations of their predictive accuracies under various computational environments. The purpose of this research was to compare and analyze the predictive accuracy of several machine learning algorithms, including RNNs, LSTMs, GRUs, XGBoost, and LightGBM, when implemented on different platforms such as Google Colab Pro, AWS SageMaker, GCP Vertex AI, and MS Azure. The predictive performance of each model within its respective environment was assessed using performance metrics such as accuracy, precision, recall, F1-score, and log loss. All algorithms were trained on the same dataset and implemented on their specified platforms to ensure consistent comparisons. The dataset used in this study comprised fitness images, encompassing 41 exercise types and totaling 6 million