<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF xmlns="http://purl.org/rss/1.0/"
 xmlns:dc="http://purl.org/dc/elements/1.1/"
 xmlns:dcterms="http://purl.org/dc/terms/"
 xmlns:cc="http://web.resource.org/cc/"
 xmlns:prism="http://prismstandard.org/namespaces/basic/2.0/"
 xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
 xmlns:admin="http://webns.net/mvcb/"
 xmlns:content="http://purl.org/rss/1.0/modules/content/">
    <channel rdf:about="https://www.mdpi.com/rss/journal/computers">
		<title>Computers</title>
		<description>Latest open access articles published in Computers at https://www.mdpi.com/journal/computers</description>
		<link>https://www.mdpi.com/journal/computers</link>
		<admin:generatorAgent rdf:resource="https://www.mdpi.com/journal/computers"/>
		<admin:errorReportsTo rdf:resource="mailto:support@mdpi.com"/>
		<dc:publisher>MDPI</dc:publisher>
		<dc:language>en</dc:language>
		<dc:rights>Creative Commons Attribution (CC-BY)</dc:rights>
						<prism:copyright>MDPI</prism:copyright>
		<prism:rightsAgent>support@mdpi.com</prism:rightsAgent>
		<image rdf:resource="https://pub.mdpi-res.com/img/design/mdpi-pub-logo.png?13cf3b5bd783e021?1776065297"/>
				<items>
			<rdf:Seq>
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/241" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/240" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/239" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/238" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/237" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/236" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/235" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/234" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/233" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/232" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/231" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/230" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/229" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/228" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/227" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/226" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/225" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/224" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/223" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/222" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/221" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/220" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/219" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/217" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/218" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/216" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/215" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/214" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/213" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/212" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/211" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/210" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/209" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/208" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/207" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/206" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/205" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/204" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/203" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/202" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/201" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/200" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/199" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/198" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/197" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/196" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/195" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/194" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/193" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/192" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/191" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/190" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/189" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/188" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/187" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/186" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/185" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/184" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/183" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/182" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/181" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/180" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/179" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/178" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/177" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/176" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/175" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/174" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/172" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/173" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/171" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/170" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/169" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/168" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/167" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/166" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/165" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/164" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/163" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/162" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/161" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/160" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/159" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/158" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/157" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/156" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/155" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/154" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/153" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/152" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/151" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/150" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/149" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/148" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/147" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/146" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/144" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/145" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/143" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/3/142" />
                    	</rdf:Seq>
		</items>
				<cc:license rdf:resource="https://creativecommons.org/licenses/by/4.0/" />
	</channel>

        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/241">

	<title>Computers, Vol. 15, Pages 241: Edge Computing Approach to AI-Based Gesture for Human&amp;ndash;Robot Interaction and Control</title>
	<link>https://www.mdpi.com/2073-431X/15/4/241</link>
	<description>This paper presents an edge-deployable vision-based framework for human&amp;amp;ndash;robot interaction using a xArm collaborative robot and a single RGB camera mounted on the robot wrist, and lightweight AI-based perception modules. The system enables intuitive, contact-free control by combining hand understanding and object detection within a unified perception&amp;amp;ndash;decision&amp;amp;ndash;control pipeline. Hand landmarks are extracted using MediaPipe Hands, from which continuous hand trajectories, static gestures, and dynamic gestures are derived. Task objects are detected using a YOLO-based model, and both hand and object observations are mapped into the robot workspace using ArUco-based planar calibration. To ensure stable robot motion, the hand control signal is smoothed using low-pass and Kalman filtering, while dynamic gestures such as waving are recognized using a lightweight LSTM classifier. The complete pipeline runs locally on edge hardware, specifically NVIDIA Jetson Orin Nano and Raspberry Pi 5 with a Hailo AI accelerator. Experimental evaluation includes trajectory stability, gesture recognition reliability, and runtime performance on both platforms. Results show that filtering significantly reduces hand-tracking jitter, gesture recognition provides stable command states for control, and both edge devices support real-time operation, with Jetson achieving consistently lower runtime than Raspberry Pi. The proposed system demonstrates the feasibility of low-cost edge AI solutions for responsive and practical human&amp;amp;ndash;robot interaction in collaborative industrial environments.</description>
	<pubDate>2026-04-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 241: Edge Computing Approach to AI-Based Gesture for Human&amp;ndash;Robot Interaction and Control</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/241">doi: 10.3390/computers15040241</a></p>
	<p>Authors:
		Nikola Ivačko
		Ivan Ćirić
		Miloš Simonović
		</p>
	<p>This paper presents an edge-deployable vision-based framework for human&amp;amp;ndash;robot interaction using a xArm collaborative robot and a single RGB camera mounted on the robot wrist, and lightweight AI-based perception modules. The system enables intuitive, contact-free control by combining hand understanding and object detection within a unified perception&amp;amp;ndash;decision&amp;amp;ndash;control pipeline. Hand landmarks are extracted using MediaPipe Hands, from which continuous hand trajectories, static gestures, and dynamic gestures are derived. Task objects are detected using a YOLO-based model, and both hand and object observations are mapped into the robot workspace using ArUco-based planar calibration. To ensure stable robot motion, the hand control signal is smoothed using low-pass and Kalman filtering, while dynamic gestures such as waving are recognized using a lightweight LSTM classifier. The complete pipeline runs locally on edge hardware, specifically NVIDIA Jetson Orin Nano and Raspberry Pi 5 with a Hailo AI accelerator. Experimental evaluation includes trajectory stability, gesture recognition reliability, and runtime performance on both platforms. Results show that filtering significantly reduces hand-tracking jitter, gesture recognition provides stable command states for control, and both edge devices support real-time operation, with Jetson achieving consistently lower runtime than Raspberry Pi. The proposed system demonstrates the feasibility of low-cost edge AI solutions for responsive and practical human&amp;amp;ndash;robot interaction in collaborative industrial environments.</p>
	]]></content:encoded>

	<dc:title>Edge Computing Approach to AI-Based Gesture for Human&amp;amp;ndash;Robot Interaction and Control</dc:title>
			<dc:creator>Nikola Ivačko</dc:creator>
			<dc:creator>Ivan Ćirić</dc:creator>
			<dc:creator>Miloš Simonović</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040241</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-14</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-14</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>241</prism:startingPage>
		<prism:doi>10.3390/computers15040241</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/241</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/240">

	<title>Computers, Vol. 15, Pages 240: Quantum-Safe Blockchain: Mapping Research Fronts in Post-Quantum Cryptography, Quantum Threat Models, and QKD Integration</title>
	<link>https://www.mdpi.com/2073-431X/15/4/240</link>
	<description>Quantum computing challenges the long-term security assumptions of blockchain systems that rely on classical public-key cryptography, motivating the adoption of post-quantum cryptography and quantum key distribution (QKD). This review maps research fronts at the intersection of blockchain and quantum-safe security, linking threat assumptions to post-quantum mechanisms, blockchain layers, and QKD positioning. Records were retrieved from Scopus and Web of Science using a two-block query and filtered through a PRISMA-guided workflow for bibliometric mapping. The final corpus comprises 648 journal articles and shows accelerated publication growth after 2023, with scientific production concentrated in a small set of leading countries. Keyword structures indicate that IoT-centric deployments dominate the semantic backbone, where authentication and intelligent methods co-occur with blockchain security primitives, while post-quantum and privacy-preserving constructs form a cohesive technical stream. QKD appears as a distinct but more specialized theme, typically discussed at the system level and shaped by infrastructure and scalability constraints. Overall, the literature is moving from conceptual risk articulation toward engineering integration; however, progress is limited by inconsistent reporting of threat models, post-quantum parameter sets, and ledger-level cost trade-offs, highlighting the need for auditable and reproducible evaluation.</description>
	<pubDate>2026-04-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 240: Quantum-Safe Blockchain: Mapping Research Fronts in Post-Quantum Cryptography, Quantum Threat Models, and QKD Integration</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/240">doi: 10.3390/computers15040240</a></p>
	<p>Authors:
		Félix Díaz
		Nhell Cerna
		Rafael Liza
		Bryan Motta
		</p>
	<p>Quantum computing challenges the long-term security assumptions of blockchain systems that rely on classical public-key cryptography, motivating the adoption of post-quantum cryptography and quantum key distribution (QKD). This review maps research fronts at the intersection of blockchain and quantum-safe security, linking threat assumptions to post-quantum mechanisms, blockchain layers, and QKD positioning. Records were retrieved from Scopus and Web of Science using a two-block query and filtered through a PRISMA-guided workflow for bibliometric mapping. The final corpus comprises 648 journal articles and shows accelerated publication growth after 2023, with scientific production concentrated in a small set of leading countries. Keyword structures indicate that IoT-centric deployments dominate the semantic backbone, where authentication and intelligent methods co-occur with blockchain security primitives, while post-quantum and privacy-preserving constructs form a cohesive technical stream. QKD appears as a distinct but more specialized theme, typically discussed at the system level and shaped by infrastructure and scalability constraints. Overall, the literature is moving from conceptual risk articulation toward engineering integration; however, progress is limited by inconsistent reporting of threat models, post-quantum parameter sets, and ledger-level cost trade-offs, highlighting the need for auditable and reproducible evaluation.</p>
	]]></content:encoded>

	<dc:title>Quantum-Safe Blockchain: Mapping Research Fronts in Post-Quantum Cryptography, Quantum Threat Models, and QKD Integration</dc:title>
			<dc:creator>Félix Díaz</dc:creator>
			<dc:creator>Nhell Cerna</dc:creator>
			<dc:creator>Rafael Liza</dc:creator>
			<dc:creator>Bryan Motta</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040240</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-14</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-14</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>240</prism:startingPage>
		<prism:doi>10.3390/computers15040240</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/240</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/239">

	<title>Computers, Vol. 15, Pages 239: A Blockchain-Based Model for Managing Infectious Disease Data</title>
	<link>https://www.mdpi.com/2073-431X/15/4/239</link>
	<description>Infectious disease outbreaks continue to pose a significant threat to global health, underscoring the importance of timely detection and reliable reporting for effective interventions. Traditional reporting systems often rely on hierarchical data flows, which introduce delays, inconsistencies, and vulnerabilities, as highlighted during the COVID-19 pandemic. Blockchain, a disruptive technology, offers a promising solution. This study proposes a blockchain-based infectious disease reporting system built on Hyperledger Fabric that supports multi-level reporting and governance across national health systems. The architecture preserves hierarchical structures while enabling real-time reporting across authorized health stakeholders. It separates public test results from sensitive patient information, with private data secured via Private Data Collections and anchored using cryptographic hashes. Smart contracts enforce role-based access and validation, ensuring data integrity and controlled oversight. The system prototype was deployed within Docker containers and evaluated using illustrative COVID-19 case data. Network performance was benchmarked using Hyperledger Caliper, measuring throughput, latency, and resource utilization. The results demonstrate proper system functioning and stable transaction processing under the tested experimental conditions, supporting the feasibility of the proposed architecture for privacy-preserving multi-level infectious disease reporting systems.</description>
	<pubDate>2026-04-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 239: A Blockchain-Based Model for Managing Infectious Disease Data</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/239">doi: 10.3390/computers15040239</a></p>
	<p>Authors:
		Touria Jdid
		Mohammed Benbrahim
		Mohammed Nabil Kabbaj
		Adil Najdi
		</p>
	<p>Infectious disease outbreaks continue to pose a significant threat to global health, underscoring the importance of timely detection and reliable reporting for effective interventions. Traditional reporting systems often rely on hierarchical data flows, which introduce delays, inconsistencies, and vulnerabilities, as highlighted during the COVID-19 pandemic. Blockchain, a disruptive technology, offers a promising solution. This study proposes a blockchain-based infectious disease reporting system built on Hyperledger Fabric that supports multi-level reporting and governance across national health systems. The architecture preserves hierarchical structures while enabling real-time reporting across authorized health stakeholders. It separates public test results from sensitive patient information, with private data secured via Private Data Collections and anchored using cryptographic hashes. Smart contracts enforce role-based access and validation, ensuring data integrity and controlled oversight. The system prototype was deployed within Docker containers and evaluated using illustrative COVID-19 case data. Network performance was benchmarked using Hyperledger Caliper, measuring throughput, latency, and resource utilization. The results demonstrate proper system functioning and stable transaction processing under the tested experimental conditions, supporting the feasibility of the proposed architecture for privacy-preserving multi-level infectious disease reporting systems.</p>
	]]></content:encoded>

	<dc:title>A Blockchain-Based Model for Managing Infectious Disease Data</dc:title>
			<dc:creator>Touria Jdid</dc:creator>
			<dc:creator>Mohammed Benbrahim</dc:creator>
			<dc:creator>Mohammed Nabil Kabbaj</dc:creator>
			<dc:creator>Adil Najdi</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040239</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-13</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-13</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>239</prism:startingPage>
		<prism:doi>10.3390/computers15040239</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/239</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/238">

	<title>Computers, Vol. 15, Pages 238: Design Behaviour and Interface Consistency in Generative No-Code Tools: A Systematic Literature Review</title>
	<link>https://www.mdpi.com/2073-431X/15/4/238</link>
	<description>Generative no-code development tools enable users to create applications directly from natural-language prompts, shifting interface design from manual construction to AI-mediated generation. However, identical prompts frequently produce substantially different user interface (UI) outcomes across tools and even across repeated executions within the same tool. This paper presents a systematic literature review examining how generative no-code systems make design and aesthetic decisions with respect to layout structure, visual consistency, usability, accessibility, and reproducibility. Twenty peer-reviewed studies (2021&amp;amp;ndash;2025) were analyzed following a structured review protocol. Existing research predominantly evaluates usability and accessibility in isolation while providing limited insight into aesthetic coherence, design variability, and prompt-to-output stability. Across studies, generative tools exhibit implicit design priors and stochastic behavior that lead to inconsistent visual outcomes and partial misalignment with human-centered design principles. These findings indicate that generative no-code tools do not act as deterministic translators of user intent but instead introduce their own stylistic tendencies. The paper identifies critical evaluation gaps and outlines requirements for future systems, including reproducible generation, transparent design reasoning, and user-directed control, to support reliable and predictable interface development.</description>
	<pubDate>2026-04-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 238: Design Behaviour and Interface Consistency in Generative No-Code Tools: A Systematic Literature Review</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/238">doi: 10.3390/computers15040238</a></p>
	<p>Authors:
		Gizem Irmak
		Qusay H. Mahmoud
		</p>
	<p>Generative no-code development tools enable users to create applications directly from natural-language prompts, shifting interface design from manual construction to AI-mediated generation. However, identical prompts frequently produce substantially different user interface (UI) outcomes across tools and even across repeated executions within the same tool. This paper presents a systematic literature review examining how generative no-code systems make design and aesthetic decisions with respect to layout structure, visual consistency, usability, accessibility, and reproducibility. Twenty peer-reviewed studies (2021&amp;amp;ndash;2025) were analyzed following a structured review protocol. Existing research predominantly evaluates usability and accessibility in isolation while providing limited insight into aesthetic coherence, design variability, and prompt-to-output stability. Across studies, generative tools exhibit implicit design priors and stochastic behavior that lead to inconsistent visual outcomes and partial misalignment with human-centered design principles. These findings indicate that generative no-code tools do not act as deterministic translators of user intent but instead introduce their own stylistic tendencies. The paper identifies critical evaluation gaps and outlines requirements for future systems, including reproducible generation, transparent design reasoning, and user-directed control, to support reliable and predictable interface development.</p>
	]]></content:encoded>

	<dc:title>Design Behaviour and Interface Consistency in Generative No-Code Tools: A Systematic Literature Review</dc:title>
			<dc:creator>Gizem Irmak</dc:creator>
			<dc:creator>Qusay H. Mahmoud</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040238</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-12</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-12</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>238</prism:startingPage>
		<prism:doi>10.3390/computers15040238</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/238</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/237">

	<title>Computers, Vol. 15, Pages 237: Instructional Mediation for Equitable Computational Thinking in STEAM Learning Across Diverse School Contexts</title>
	<link>https://www.mdpi.com/2073-431X/15/4/237</link>
	<description>Guaranteeing equitable access to computational thinking (CT) remains a persistent challenge in computing education, particularly across socioeconomically diverse school contexts. Although prior research has demonstrated the effectiveness of block-based and physical computing environments, limited empirical evidence has examined whether structured instructional mediation can compensate for contextual disparities. This quasi-experimental pre&amp;amp;ndash;post study addresses this gap by analyzing CT development in three socioeconomically diverse primary schools in Chile (N=88, third grade), including private urban, public urban, and rural public institutions. Students engaged in scaffolded Scratch programming and Arduino simulation activities designed to explicitly support abstraction, sequencing, and debugging processes. These activities were framed within a broader STEAM learning approach, integrating computational thinking with problem-solving, experimentation, and interdisciplinary reasoning. Statistical analysis revealed significant differences in instructional time across contexts (F(2,85)=14.62, p&amp;amp;lt;0.001, &amp;amp;eta;2=0.26), indicating structural disparities in pacing. However, no statistically significant differences were observed in CT gains (F(2,85)=0.31, p=0.74), suggesting that structured pedagogical scaffolding buffered contextual inequalities. These findings provide empirical evidence from a Latin American non-WEIRD context and advance the conceptualization of instructional mediation as a compensatory mechanism for equity in early computing education. This study contributes to digital equity research by demonstrating that instructional design quality may play a more decisive role than infrastructural availability in enabling computational thinking development for all learners.</description>
	<pubDate>2026-04-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 237: Instructional Mediation for Equitable Computational Thinking in STEAM Learning Across Diverse School Contexts</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/237">doi: 10.3390/computers15040237</a></p>
	<p>Authors:
		Jesennia Cárdenas-Cobo
		Moyra Castro-Paredes
		Rodrigo Saens-Navarrete
		Claudia de la Fuente-Burdiles
		Cristian Vidal-Silva
		</p>
	<p>Guaranteeing equitable access to computational thinking (CT) remains a persistent challenge in computing education, particularly across socioeconomically diverse school contexts. Although prior research has demonstrated the effectiveness of block-based and physical computing environments, limited empirical evidence has examined whether structured instructional mediation can compensate for contextual disparities. This quasi-experimental pre&amp;amp;ndash;post study addresses this gap by analyzing CT development in three socioeconomically diverse primary schools in Chile (N=88, third grade), including private urban, public urban, and rural public institutions. Students engaged in scaffolded Scratch programming and Arduino simulation activities designed to explicitly support abstraction, sequencing, and debugging processes. These activities were framed within a broader STEAM learning approach, integrating computational thinking with problem-solving, experimentation, and interdisciplinary reasoning. Statistical analysis revealed significant differences in instructional time across contexts (F(2,85)=14.62, p&amp;amp;lt;0.001, &amp;amp;eta;2=0.26), indicating structural disparities in pacing. However, no statistically significant differences were observed in CT gains (F(2,85)=0.31, p=0.74), suggesting that structured pedagogical scaffolding buffered contextual inequalities. These findings provide empirical evidence from a Latin American non-WEIRD context and advance the conceptualization of instructional mediation as a compensatory mechanism for equity in early computing education. This study contributes to digital equity research by demonstrating that instructional design quality may play a more decisive role than infrastructural availability in enabling computational thinking development for all learners.</p>
	]]></content:encoded>

	<dc:title>Instructional Mediation for Equitable Computational Thinking in STEAM Learning Across Diverse School Contexts</dc:title>
			<dc:creator>Jesennia Cárdenas-Cobo</dc:creator>
			<dc:creator>Moyra Castro-Paredes</dc:creator>
			<dc:creator>Rodrigo Saens-Navarrete</dc:creator>
			<dc:creator>Claudia de la Fuente-Burdiles</dc:creator>
			<dc:creator>Cristian Vidal-Silva</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040237</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-12</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-12</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>237</prism:startingPage>
		<prism:doi>10.3390/computers15040237</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/237</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/236">

	<title>Computers, Vol. 15, Pages 236: An Integrated Information Security Governance Model for Hyperconnected IoT Ecosystems; Unified Resilient Security Governance Model (URSGM)</title>
	<link>https://www.mdpi.com/2073-431X/15/4/236</link>
	<description>Hyperconnected IoT ecosystems have become crucial for organizational operations; yet, existing governance structures remain fragmented, are technology-centric, and not well-equipped to manage the risks, compliance pressures, and resilience needs of IoT. This paper presents an integrated, theory-based information security governance model that is tailored for IoT-driven organizations. A conceptual synthesis is performed through integrating five theoretical anchors: governance theory, socio-technical systems theory, risk governance theory, institutional/compliance theory, and resilience/adaptive capacity theory. These theoretical lenses are used to derive essential governance constructs and to develop a modular architecture tailored to IoT security needs. The model&amp;amp;rsquo;s validity is grounded in theoretical integration rather than empirical testing, consistent with the nature of conceptual research. The integrated model provides six interdependent governance dimensions: strategic governance, operational governance, technical oversight, compliance alignment, risk governance, and resilience/adaptation, anchored by an ecosystem coordination layer. It provides structured decision rights, continuous risk monitoring, regulatory legitimacy, and native adaptive capabilities toward dynamic cyber-physical threats. This research addresses a known gap in the literature on IoT governance by providing an integrated, theoretically validated governance model that systematically connects the rationale and operational mechanisms of governance for resilient, future-proof IoT adoption. The model is further operationalized through a five-level maturity structure, enabling organizations to assess and progressively enhance governance capabilities.</description>
	<pubDate>2026-04-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 236: An Integrated Information Security Governance Model for Hyperconnected IoT Ecosystems; Unified Resilient Security Governance Model (URSGM)</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/236">doi: 10.3390/computers15040236</a></p>
	<p>Authors:
		Hamed Taherdoost
		Chin-Shiuh Shieh
		Shashi Kant Gupta
		</p>
	<p>Hyperconnected IoT ecosystems have become crucial for organizational operations; yet, existing governance structures remain fragmented, are technology-centric, and not well-equipped to manage the risks, compliance pressures, and resilience needs of IoT. This paper presents an integrated, theory-based information security governance model that is tailored for IoT-driven organizations. A conceptual synthesis is performed through integrating five theoretical anchors: governance theory, socio-technical systems theory, risk governance theory, institutional/compliance theory, and resilience/adaptive capacity theory. These theoretical lenses are used to derive essential governance constructs and to develop a modular architecture tailored to IoT security needs. The model&amp;amp;rsquo;s validity is grounded in theoretical integration rather than empirical testing, consistent with the nature of conceptual research. The integrated model provides six interdependent governance dimensions: strategic governance, operational governance, technical oversight, compliance alignment, risk governance, and resilience/adaptation, anchored by an ecosystem coordination layer. It provides structured decision rights, continuous risk monitoring, regulatory legitimacy, and native adaptive capabilities toward dynamic cyber-physical threats. This research addresses a known gap in the literature on IoT governance by providing an integrated, theoretically validated governance model that systematically connects the rationale and operational mechanisms of governance for resilient, future-proof IoT adoption. The model is further operationalized through a five-level maturity structure, enabling organizations to assess and progressively enhance governance capabilities.</p>
	]]></content:encoded>

	<dc:title>An Integrated Information Security Governance Model for Hyperconnected IoT Ecosystems; Unified Resilient Security Governance Model (URSGM)</dc:title>
			<dc:creator>Hamed Taherdoost</dc:creator>
			<dc:creator>Chin-Shiuh Shieh</dc:creator>
			<dc:creator>Shashi Kant Gupta</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040236</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-10</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-10</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>236</prism:startingPage>
		<prism:doi>10.3390/computers15040236</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/236</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/235">

	<title>Computers, Vol. 15, Pages 235: Adaptive Architectures for Gamified Learning in Software Engineering: A Systematic Review</title>
	<link>https://www.mdpi.com/2073-431X/15/4/235</link>
	<description>Effective software engineering education today requires tools that adapt to individual learner proficiency and progress, while ensuring positive student engagement. Gamified platforms represent an effective approach to learning and maintaining motivation, but their efficacy depends on a robust underlying architecture. This systematic literature review analyzes state-of-the-art artificial intelligence (AI)-based adaptive architectures designed to support gamified learning tools, highlighting their architectural models (such as intelligent tutoring systems, multi-agent systems, and immersive virtual reality/augmented reality environments), adaptation mechanisms (including Generative AI and chatbots), and personalization strategies. A significant focus is placed on Process Mining and Learning Analytics as methodological approaches to organize learning paths and guide dynamic adaptation based on student behavior. The results of the selected studies demonstrate advantages such as increased engagement, longer-term participation, and personalized learning pace. However, challenges remain, such as common assessment criteria, integrating different technologies, and system scalability. The findings offer concrete insights for designing the next generation of effective gamified learning tools, based on data and software engineering processes.</description>
	<pubDate>2026-04-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 235: Adaptive Architectures for Gamified Learning in Software Engineering: A Systematic Review</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/235">doi: 10.3390/computers15040235</a></p>
	<p>Authors:
		Aurora Annamaria Quartulli
		Giovanni Mignogna
		Vera Zizzo
		Marina Mongiello
		</p>
	<p>Effective software engineering education today requires tools that adapt to individual learner proficiency and progress, while ensuring positive student engagement. Gamified platforms represent an effective approach to learning and maintaining motivation, but their efficacy depends on a robust underlying architecture. This systematic literature review analyzes state-of-the-art artificial intelligence (AI)-based adaptive architectures designed to support gamified learning tools, highlighting their architectural models (such as intelligent tutoring systems, multi-agent systems, and immersive virtual reality/augmented reality environments), adaptation mechanisms (including Generative AI and chatbots), and personalization strategies. A significant focus is placed on Process Mining and Learning Analytics as methodological approaches to organize learning paths and guide dynamic adaptation based on student behavior. The results of the selected studies demonstrate advantages such as increased engagement, longer-term participation, and personalized learning pace. However, challenges remain, such as common assessment criteria, integrating different technologies, and system scalability. The findings offer concrete insights for designing the next generation of effective gamified learning tools, based on data and software engineering processes.</p>
	]]></content:encoded>

	<dc:title>Adaptive Architectures for Gamified Learning in Software Engineering: A Systematic Review</dc:title>
			<dc:creator>Aurora Annamaria Quartulli</dc:creator>
			<dc:creator>Giovanni Mignogna</dc:creator>
			<dc:creator>Vera Zizzo</dc:creator>
			<dc:creator>Marina Mongiello</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040235</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-09</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-09</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>235</prism:startingPage>
		<prism:doi>10.3390/computers15040235</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/235</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/234">

	<title>Computers, Vol. 15, Pages 234: Distilling Vision Foundation Models into LiDAR Networks via Manifold-Aware Topological Alignment</title>
	<link>https://www.mdpi.com/2073-431X/15/4/234</link>
	<description>LiDAR point cloud semantic segmentation is essential for autonomous driving, yet LiDAR-only methods remain constrained by sparsity and limited texture cues. We propose Cross-Modal Collaborative Manifold Distillation (CMCMD), which transfers open-world semantic priors from the DINOv3 Vision Foundation Model to a LiDAR student network. The framework combines an Adaptive Relation Convolution (ARConv) backbone with geometry-conditioned aggregation, a Unified Bidirectional Mapping Module (UBMM) for explicit 2D&amp;amp;ndash;3D interaction, and Manifold-Aware Topological Distillation (MATD), which aligns inter-sample affinity structures in a shared latent manifold rather than enforcing pointwise feature matching. By preserving relational topology instead of absolute feature coordinates, CMCMD mitigates negative transfer across heterogeneous modalities. Experiments on SemanticKITTI and nuScenes yield mIoU values of 72.9% and 81.2%, respectively, surpassing the compared distillation baselines and approaching the performance of multimodal fusion methods at lower inference cost. Additional evaluation on real-world campus scenes further supports the cross-domain robustness of the proposed framework.</description>
	<pubDate>2026-04-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 234: Distilling Vision Foundation Models into LiDAR Networks via Manifold-Aware Topological Alignment</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/234">doi: 10.3390/computers15040234</a></p>
	<p>Authors:
		Yuchuan Yang
		Xiaosu Xu
		</p>
	<p>LiDAR point cloud semantic segmentation is essential for autonomous driving, yet LiDAR-only methods remain constrained by sparsity and limited texture cues. We propose Cross-Modal Collaborative Manifold Distillation (CMCMD), which transfers open-world semantic priors from the DINOv3 Vision Foundation Model to a LiDAR student network. The framework combines an Adaptive Relation Convolution (ARConv) backbone with geometry-conditioned aggregation, a Unified Bidirectional Mapping Module (UBMM) for explicit 2D&amp;amp;ndash;3D interaction, and Manifold-Aware Topological Distillation (MATD), which aligns inter-sample affinity structures in a shared latent manifold rather than enforcing pointwise feature matching. By preserving relational topology instead of absolute feature coordinates, CMCMD mitigates negative transfer across heterogeneous modalities. Experiments on SemanticKITTI and nuScenes yield mIoU values of 72.9% and 81.2%, respectively, surpassing the compared distillation baselines and approaching the performance of multimodal fusion methods at lower inference cost. Additional evaluation on real-world campus scenes further supports the cross-domain robustness of the proposed framework.</p>
	]]></content:encoded>

	<dc:title>Distilling Vision Foundation Models into LiDAR Networks via Manifold-Aware Topological Alignment</dc:title>
			<dc:creator>Yuchuan Yang</dc:creator>
			<dc:creator>Xiaosu Xu</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040234</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-09</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-09</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>234</prism:startingPage>
		<prism:doi>10.3390/computers15040234</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/234</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/233">

	<title>Computers, Vol. 15, Pages 233: Adversarial Robustness in Quantum Machine Learning: A Scoping Review</title>
	<link>https://www.mdpi.com/2073-431X/15/4/233</link>
	<description>Quantum machine learning (QML) is emerging as a promising paradigm at the intersection of quantum computing and artificial intelligence, yet its security under adversarial conditions remains insufficiently understood. This scoping review aims to systematically map empirical research on adversarial robustness in QML and to identify dominant threat models, defense strategies, evaluation approaches, practical constraints, and future research directions. Following PRISMA-ScR guidelines, four major databases were searched, resulting in 53 eligible empirical studies published between 2020 and 2026. The findings show that most research concentrates on input-level evasion attacks, particularly adversarial examples, and primarily evaluates robustness in classification-oriented models such as variational quantum circuits and quantum neural networks. Defense strategies are largely adapted from classical adversarial training and noise-based mitigation, with limited deployment on real quantum hardware. Robustness assessment is predominantly empirical, relying on accuracy degradation and attack success rate, while formal certification methods remain less common. The literature also highlights substantial constraints related to hardware limitations, NISQ noise, computational cost, and dataset scale. Overall, the evidence indicates that adversarial robustness research in QML is expanding but remains methodologically concentrated, underscoring the need for standardized benchmarking, scalable defenses, and hardware-validated robustness evaluation frameworks.</description>
	<pubDate>2026-04-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 233: Adversarial Robustness in Quantum Machine Learning: A Scoping Review</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/233">doi: 10.3390/computers15040233</a></p>
	<p>Authors:
		Yanche Ari Kustiawan
		Khairil Imran Ghauth
		</p>
	<p>Quantum machine learning (QML) is emerging as a promising paradigm at the intersection of quantum computing and artificial intelligence, yet its security under adversarial conditions remains insufficiently understood. This scoping review aims to systematically map empirical research on adversarial robustness in QML and to identify dominant threat models, defense strategies, evaluation approaches, practical constraints, and future research directions. Following PRISMA-ScR guidelines, four major databases were searched, resulting in 53 eligible empirical studies published between 2020 and 2026. The findings show that most research concentrates on input-level evasion attacks, particularly adversarial examples, and primarily evaluates robustness in classification-oriented models such as variational quantum circuits and quantum neural networks. Defense strategies are largely adapted from classical adversarial training and noise-based mitigation, with limited deployment on real quantum hardware. Robustness assessment is predominantly empirical, relying on accuracy degradation and attack success rate, while formal certification methods remain less common. The literature also highlights substantial constraints related to hardware limitations, NISQ noise, computational cost, and dataset scale. Overall, the evidence indicates that adversarial robustness research in QML is expanding but remains methodologically concentrated, underscoring the need for standardized benchmarking, scalable defenses, and hardware-validated robustness evaluation frameworks.</p>
	]]></content:encoded>

	<dc:title>Adversarial Robustness in Quantum Machine Learning: A Scoping Review</dc:title>
			<dc:creator>Yanche Ari Kustiawan</dc:creator>
			<dc:creator>Khairil Imran Ghauth</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040233</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-09</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-09</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>233</prism:startingPage>
		<prism:doi>10.3390/computers15040233</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/233</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/232">

	<title>Computers, Vol. 15, Pages 232: Autoencoders in Natural Language Processing: A Comprehensive Review</title>
	<link>https://www.mdpi.com/2073-431X/15/4/232</link>
	<description>Autoencoder-based models have become a fundamental component of unsupervised and self-supervised learning in natural language processing (NLP), enabling models to learn compact latent representations through input reconstruction. From early denoising autoencoders to probabilistic variational autoencoders (VAEs) and transformer-based masked autoencoding, reconstruction-driven objectives have played a significant role in shaping modern approaches to text representation and generation. This review provides a comprehensive analysis of the evolution of autoencoder architectures and training objectives in NLP, and synthesizes applications of VAEs across language modeling, controllable text generation, machine translation, sentiment modeling, and multilingual representation learning. Although previous surveys have examined deep generative models or representation learning in NLP, there remains a lack of a unified review that systematically connects classical autoencoder variants, variational formulations, and modern transformer-based masked autoencoders within a single conceptual framework. To address this gap, this work consolidates architectural developments, training objectives, and major application domains under a reconstruction-based learning perspective, offering a structured comparison of modeling choices, datasets, and evaluation practices. Our analysis highlights the strengths and limitations of existing approaches, discusses the ongoing influence of autoencoder-style learning in NLP, and outlines future research directions focused on improving training stability, designing more structured latent spaces, and enhancing multilingual representation learning.</description>
	<pubDate>2026-04-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 232: Autoencoders in Natural Language Processing: A Comprehensive Review</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/232">doi: 10.3390/computers15040232</a></p>
	<p>Authors:
		Moussa Redah
		Wasfi G. Al-Khatib
		</p>
	<p>Autoencoder-based models have become a fundamental component of unsupervised and self-supervised learning in natural language processing (NLP), enabling models to learn compact latent representations through input reconstruction. From early denoising autoencoders to probabilistic variational autoencoders (VAEs) and transformer-based masked autoencoding, reconstruction-driven objectives have played a significant role in shaping modern approaches to text representation and generation. This review provides a comprehensive analysis of the evolution of autoencoder architectures and training objectives in NLP, and synthesizes applications of VAEs across language modeling, controllable text generation, machine translation, sentiment modeling, and multilingual representation learning. Although previous surveys have examined deep generative models or representation learning in NLP, there remains a lack of a unified review that systematically connects classical autoencoder variants, variational formulations, and modern transformer-based masked autoencoders within a single conceptual framework. To address this gap, this work consolidates architectural developments, training objectives, and major application domains under a reconstruction-based learning perspective, offering a structured comparison of modeling choices, datasets, and evaluation practices. Our analysis highlights the strengths and limitations of existing approaches, discusses the ongoing influence of autoencoder-style learning in NLP, and outlines future research directions focused on improving training stability, designing more structured latent spaces, and enhancing multilingual representation learning.</p>
	]]></content:encoded>

	<dc:title>Autoencoders in Natural Language Processing: A Comprehensive Review</dc:title>
			<dc:creator>Moussa Redah</dc:creator>
			<dc:creator>Wasfi G. Al-Khatib</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040232</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-08</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-08</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>232</prism:startingPage>
		<prism:doi>10.3390/computers15040232</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/232</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/231">

	<title>Computers, Vol. 15, Pages 231: Artificial Intelligence for High-Availability Systems: A Comprehensive Review</title>
	<link>https://www.mdpi.com/2073-431X/15/4/231</link>
	<description>High-availability (HA) systems&amp;amp;mdash;essential in many contemporary contexts&amp;amp;mdash;are designed to guarantee the availability of processes and data for more than 99% of their operational time. These systems are typically implemented as Cloud/Edge infrastructures that are properly maintained by human operators and intelligent agents in order to guarantee the required level of availability. Moreover, we are witnessing the widespread adoption of AI-based automation across many industries. AI-based software agents are increasingly being adopted to introduce more automation in highly available systems, particularly for monitoring and fault detection, fault prediction, recovery, and optimization processes. In this review paper, we discuss the state of the art of AI-based solutions for HA systems. In particular, we focus on the use of AI for the core operational mechanisms of monitoring, failure detection, and recovery. Our discussion begins by reviewing a few key background concepts of HA architectures, then we review recent work on AI-based solutions for monitoring, fault detection and recovery in HA systems.</description>
	<pubDate>2026-04-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 231: Artificial Intelligence for High-Availability Systems: A Comprehensive Review</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/231">doi: 10.3390/computers15040231</a></p>
	<p>Authors:
		Lidia Fotia
		Rosario Gaeta
		Fabrizio Messina
		Domenico Rosaci
		Giuseppe M. L. Sarné
		</p>
	<p>High-availability (HA) systems&amp;amp;mdash;essential in many contemporary contexts&amp;amp;mdash;are designed to guarantee the availability of processes and data for more than 99% of their operational time. These systems are typically implemented as Cloud/Edge infrastructures that are properly maintained by human operators and intelligent agents in order to guarantee the required level of availability. Moreover, we are witnessing the widespread adoption of AI-based automation across many industries. AI-based software agents are increasingly being adopted to introduce more automation in highly available systems, particularly for monitoring and fault detection, fault prediction, recovery, and optimization processes. In this review paper, we discuss the state of the art of AI-based solutions for HA systems. In particular, we focus on the use of AI for the core operational mechanisms of monitoring, failure detection, and recovery. Our discussion begins by reviewing a few key background concepts of HA architectures, then we review recent work on AI-based solutions for monitoring, fault detection and recovery in HA systems.</p>
	]]></content:encoded>

	<dc:title>Artificial Intelligence for High-Availability Systems: A Comprehensive Review</dc:title>
			<dc:creator>Lidia Fotia</dc:creator>
			<dc:creator>Rosario Gaeta</dc:creator>
			<dc:creator>Fabrizio Messina</dc:creator>
			<dc:creator>Domenico Rosaci</dc:creator>
			<dc:creator>Giuseppe M. L. Sarné</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040231</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-08</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-08</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>231</prism:startingPage>
		<prism:doi>10.3390/computers15040231</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/231</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/230">

	<title>Computers, Vol. 15, Pages 230: An Enhanced YOLOv8n-Based Approach for Pig Behavior Recognition</title>
	<link>https://www.mdpi.com/2073-431X/15/4/230</link>
	<description>Pig behavior statistics can reflect their health status. Conventional approaches depend on manual observation to derive behavioral information from video recordings, a process that demands substantial time and human effort. To overcome these limitations in indoor intensive farming environments, this study introduces an effective approach for recognizing pig behaviors, employing an enhanced YOLOv8n architecture. The approach utilizes advanced object detection algorithms to automatically identify pig behaviors, including stand, lie, eat, fight, and tail-bite, from overhead video footage of the enclosure. First, images of daily pig behaviors are collected using cameras to build a pig behavior dataset. To boost detection accuracy, the SE attention mechanism is embedded within the feature extraction backbone of the YOLOv8n network to enhance its representational capacity, strengthening the model&amp;amp;rsquo;s capacity to grasp overarching contextual information and improve the expressiveness of extracted features. The GIoU loss function is employed during training to reduce computational cost and accelerate model convergence. Moreover, integrating Ghost convolution into the backbone significantly reduces both computational complexity and the total number of parameters. The experimental findings reveal that the optimized YOLOv8n model contains just 1.71 million parameters, marking a 42.93% reduction relative to the baseline model. Its floating-point operations total 5.0 billion, indicating a 38.27% decrease, while the mean average precision (mAP@50) reaches 96.8%, surpassing the original by 2.6 percentage points. Compared with other widely used YOLO-based object detection frameworks, the proposed approach achieves notably higher accuracy while requiring significantly lower computational resources and model complexity.</description>
	<pubDate>2026-04-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 230: An Enhanced YOLOv8n-Based Approach for Pig Behavior Recognition</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/230">doi: 10.3390/computers15040230</a></p>
	<p>Authors:
		Jianjun Guo
		Yudian Xu
		Lijun Lin
		Beibei Zhang
		Piao Zhou
		Shangwen Luo
		Yuhan Zhuo
		Jingyu Ji
		Zhijie Luo
		Guangming Cheng
		</p>
	<p>Pig behavior statistics can reflect their health status. Conventional approaches depend on manual observation to derive behavioral information from video recordings, a process that demands substantial time and human effort. To overcome these limitations in indoor intensive farming environments, this study introduces an effective approach for recognizing pig behaviors, employing an enhanced YOLOv8n architecture. The approach utilizes advanced object detection algorithms to automatically identify pig behaviors, including stand, lie, eat, fight, and tail-bite, from overhead video footage of the enclosure. First, images of daily pig behaviors are collected using cameras to build a pig behavior dataset. To boost detection accuracy, the SE attention mechanism is embedded within the feature extraction backbone of the YOLOv8n network to enhance its representational capacity, strengthening the model&amp;amp;rsquo;s capacity to grasp overarching contextual information and improve the expressiveness of extracted features. The GIoU loss function is employed during training to reduce computational cost and accelerate model convergence. Moreover, integrating Ghost convolution into the backbone significantly reduces both computational complexity and the total number of parameters. The experimental findings reveal that the optimized YOLOv8n model contains just 1.71 million parameters, marking a 42.93% reduction relative to the baseline model. Its floating-point operations total 5.0 billion, indicating a 38.27% decrease, while the mean average precision (mAP@50) reaches 96.8%, surpassing the original by 2.6 percentage points. Compared with other widely used YOLO-based object detection frameworks, the proposed approach achieves notably higher accuracy while requiring significantly lower computational resources and model complexity.</p>
	]]></content:encoded>

	<dc:title>An Enhanced YOLOv8n-Based Approach for Pig Behavior Recognition</dc:title>
			<dc:creator>Jianjun Guo</dc:creator>
			<dc:creator>Yudian Xu</dc:creator>
			<dc:creator>Lijun Lin</dc:creator>
			<dc:creator>Beibei Zhang</dc:creator>
			<dc:creator>Piao Zhou</dc:creator>
			<dc:creator>Shangwen Luo</dc:creator>
			<dc:creator>Yuhan Zhuo</dc:creator>
			<dc:creator>Jingyu Ji</dc:creator>
			<dc:creator>Zhijie Luo</dc:creator>
			<dc:creator>Guangming Cheng</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040230</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-08</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-08</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>230</prism:startingPage>
		<prism:doi>10.3390/computers15040230</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/230</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/229">

	<title>Computers, Vol. 15, Pages 229: From Virtual Worlds to Real-World Equity: A Scoping Review of the Metaverse as Computer-Assisted Learning for STEM Competencies</title>
	<link>https://www.mdpi.com/2073-431X/15/4/229</link>
	<description>This scoping review critically synthesizes 34 studies (2015&amp;amp;ndash;2026) examining the metaverse&amp;amp;rsquo;s role in fostering six core STEM competencies, moving beyond descriptive reporting to interrogate whether these technologies constitute genuine pedagogical transformation, whose learners are served or excluded, and how isolated interventions connect into lifelong learning pathways. Following PRISMA-ScR guidelines, our analysis reveals that while technology literacy and collaboration appear in 91.2% of our selected studies, mathematical application is addressed in fewer than half (44.1%), raising unanswered questions about whether this pattern reflects an equitable distribution of mathematical learning opportunities across diverse learner populations&amp;amp;mdash;a question the current evidence base cannot answer but one that warrants urgent investigation. The evidence demonstrates substantial immediate learning gains through embodied presence and risk-free experimentation, yet a deeper reading suggests this often represents technological optimization of traditional goals rather than epistemological transformation. More troublingly, the concentration of inclusivity evidence on select populations&amp;amp;mdash;while rendering students with physical disabilities, Indigenous learners, and refugee students entirely invisible&amp;amp;mdash;reveals an equity paradox where immersive technologies may inadvertently amplify existing disparities. The absence of any longitudinal data linking short-term engagement to sustained STEM participation leaves the field&amp;amp;rsquo;s claim to transformative impact unsubstantiated. This review argues for moving beyond fragmented interventions toward designing coherent, equitable learning pathways that fulfill the metaverse&amp;amp;rsquo;s potential for all learners.</description>
	<pubDate>2026-04-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 229: From Virtual Worlds to Real-World Equity: A Scoping Review of the Metaverse as Computer-Assisted Learning for STEM Competencies</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/229">doi: 10.3390/computers15040229</a></p>
	<p>Authors:
		Franklin Parrales-Bravo
		Roberto Tolozano-Benites
		Janio Jadán-Guerrero
		Leonel Vasquez-Cevallos
		Víctor Gómez-Rodríguez
		</p>
	<p>This scoping review critically synthesizes 34 studies (2015&amp;amp;ndash;2026) examining the metaverse&amp;amp;rsquo;s role in fostering six core STEM competencies, moving beyond descriptive reporting to interrogate whether these technologies constitute genuine pedagogical transformation, whose learners are served or excluded, and how isolated interventions connect into lifelong learning pathways. Following PRISMA-ScR guidelines, our analysis reveals that while technology literacy and collaboration appear in 91.2% of our selected studies, mathematical application is addressed in fewer than half (44.1%), raising unanswered questions about whether this pattern reflects an equitable distribution of mathematical learning opportunities across diverse learner populations&amp;amp;mdash;a question the current evidence base cannot answer but one that warrants urgent investigation. The evidence demonstrates substantial immediate learning gains through embodied presence and risk-free experimentation, yet a deeper reading suggests this often represents technological optimization of traditional goals rather than epistemological transformation. More troublingly, the concentration of inclusivity evidence on select populations&amp;amp;mdash;while rendering students with physical disabilities, Indigenous learners, and refugee students entirely invisible&amp;amp;mdash;reveals an equity paradox where immersive technologies may inadvertently amplify existing disparities. The absence of any longitudinal data linking short-term engagement to sustained STEM participation leaves the field&amp;amp;rsquo;s claim to transformative impact unsubstantiated. This review argues for moving beyond fragmented interventions toward designing coherent, equitable learning pathways that fulfill the metaverse&amp;amp;rsquo;s potential for all learners.</p>
	]]></content:encoded>

	<dc:title>From Virtual Worlds to Real-World Equity: A Scoping Review of the Metaverse as Computer-Assisted Learning for STEM Competencies</dc:title>
			<dc:creator>Franklin Parrales-Bravo</dc:creator>
			<dc:creator>Roberto Tolozano-Benites</dc:creator>
			<dc:creator>Janio Jadán-Guerrero</dc:creator>
			<dc:creator>Leonel Vasquez-Cevallos</dc:creator>
			<dc:creator>Víctor Gómez-Rodríguez</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040229</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-07</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-07</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>229</prism:startingPage>
		<prism:doi>10.3390/computers15040229</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/229</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/228">

	<title>Computers, Vol. 15, Pages 228: Modified Shamir Threshold Scheme for Secure Storage of Biometric Data</title>
	<link>https://www.mdpi.com/2073-431X/15/4/228</link>
	<description>The security of biometric data is a critical challenge in modern information security due to their uniqueness and non-revocability. Compromise of biometric characteristics leads to irreversible consequences; therefore, storing or transmitting them in plaintext is unacceptable. This paper addresses the confidentiality and integrity of fingerprint data using cryptographic protection methods. Considering the specific nature of biometrics, fingerprint features are used only to generate a cryptographic secret rather than being stored directly. To protect the derived secret, a modified threshold secret-sharing scheme based on non-positional polynomial notation and the Chinese Remainder Theorem is proposed. The method generates a cryptographic secret from fingerprint minutiae described by spatial coordinates and ridge orientation. Concatenating minutiae coordinates and converting them into binary form produces a unique value deterministically linked to a specific user. Compared to the classical Shamir scheme, the modified scheme reduces the computational complexity of secret reconstruction from O(n&amp;amp;nbsp;log2n) to O(k log k), decreases data storage requirements by 30&amp;amp;ndash;40% through compact polynomial remainders, and increases successful secret reconstruction by 12&amp;amp;ndash;15% in the presence of noise in biometric samples. The results show that the proposed algorithm can be effectively applied in biometric authentication systems to protect personal data in distributed environments. Security analysis confirms resistance to major attack classes and demonstrates practical applicability in real-world systems.</description>
	<pubDate>2026-04-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 228: Modified Shamir Threshold Scheme for Secure Storage of Biometric Data</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/228">doi: 10.3390/computers15040228</a></p>
	<p>Authors:
		Saule Nyssanbayeva
		Nursulu Kapalova
		Saltanat Beisenova
		</p>
	<p>The security of biometric data is a critical challenge in modern information security due to their uniqueness and non-revocability. Compromise of biometric characteristics leads to irreversible consequences; therefore, storing or transmitting them in plaintext is unacceptable. This paper addresses the confidentiality and integrity of fingerprint data using cryptographic protection methods. Considering the specific nature of biometrics, fingerprint features are used only to generate a cryptographic secret rather than being stored directly. To protect the derived secret, a modified threshold secret-sharing scheme based on non-positional polynomial notation and the Chinese Remainder Theorem is proposed. The method generates a cryptographic secret from fingerprint minutiae described by spatial coordinates and ridge orientation. Concatenating minutiae coordinates and converting them into binary form produces a unique value deterministically linked to a specific user. Compared to the classical Shamir scheme, the modified scheme reduces the computational complexity of secret reconstruction from O(n&amp;amp;nbsp;log2n) to O(k log k), decreases data storage requirements by 30&amp;amp;ndash;40% through compact polynomial remainders, and increases successful secret reconstruction by 12&amp;amp;ndash;15% in the presence of noise in biometric samples. The results show that the proposed algorithm can be effectively applied in biometric authentication systems to protect personal data in distributed environments. Security analysis confirms resistance to major attack classes and demonstrates practical applicability in real-world systems.</p>
	]]></content:encoded>

	<dc:title>Modified Shamir Threshold Scheme for Secure Storage of Biometric Data</dc:title>
			<dc:creator>Saule Nyssanbayeva</dc:creator>
			<dc:creator>Nursulu Kapalova</dc:creator>
			<dc:creator>Saltanat Beisenova</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040228</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-07</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-07</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>228</prism:startingPage>
		<prism:doi>10.3390/computers15040228</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/228</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/227">

	<title>Computers, Vol. 15, Pages 227: PlanProjU: A BPMN-to-HDDL HTN Planning Approach for University Project Execution</title>
	<link>https://www.mdpi.com/2073-431X/15/4/227</link>
	<description>This study aims to automate the generation of execution plans for university projects by transforming BPMN-based process models into hierarchical planning representations that can be executed by HTN planners. Effective implementation of university extension projects requires explicit management of objectives, dependencies, and operational constraints, yet this process is often carried out manually and without formal planning support. To address this problem, the paper proposes PlanProjU, a web-based platform that captures project knowledge through BPMN and translates it into HDDL domain and problem files for execution with SHOP2 and PyHOP. The system was evaluated through real university project cases and a comparative analysis of alternative generated plans. The results show that BPMN-based project knowledge can be operationalized into executable hierarchical planning structures and that different planners may produce distinct plan alternatives depending on project characteristics. The originality of the study lies in the design of a traceable BPMN-to-HDDL workflow for university project planning, implemented in an integrated platform that connects business process modeling with HTN automated planning the originality of the study lies in the design of a traceable BPMN-to-HDDL workflow for university project planning, implemented in an integrated platform that connects business process modeling with HTN automated planning in a domain that has received limited attention in prior research. In this sense, the proposal serves both as an innovative research contribution and as a practical alternative for structuring implementation decisions in institutional settings.</description>
	<pubDate>2026-04-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 227: PlanProjU: A BPMN-to-HDDL HTN Planning Approach for University Project Execution</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/227">doi: 10.3390/computers15040227</a></p>
	<p>Authors:
		Jhon Wilder Sanchez-Obando
		Néstor Dario Duque-Méndez
		Luis Fernando Castillo-Ossa
		</p>
	<p>This study aims to automate the generation of execution plans for university projects by transforming BPMN-based process models into hierarchical planning representations that can be executed by HTN planners. Effective implementation of university extension projects requires explicit management of objectives, dependencies, and operational constraints, yet this process is often carried out manually and without formal planning support. To address this problem, the paper proposes PlanProjU, a web-based platform that captures project knowledge through BPMN and translates it into HDDL domain and problem files for execution with SHOP2 and PyHOP. The system was evaluated through real university project cases and a comparative analysis of alternative generated plans. The results show that BPMN-based project knowledge can be operationalized into executable hierarchical planning structures and that different planners may produce distinct plan alternatives depending on project characteristics. The originality of the study lies in the design of a traceable BPMN-to-HDDL workflow for university project planning, implemented in an integrated platform that connects business process modeling with HTN automated planning the originality of the study lies in the design of a traceable BPMN-to-HDDL workflow for university project planning, implemented in an integrated platform that connects business process modeling with HTN automated planning in a domain that has received limited attention in prior research. In this sense, the proposal serves both as an innovative research contribution and as a practical alternative for structuring implementation decisions in institutional settings.</p>
	]]></content:encoded>

	<dc:title>PlanProjU: A BPMN-to-HDDL HTN Planning Approach for University Project Execution</dc:title>
			<dc:creator>Jhon Wilder Sanchez-Obando</dc:creator>
			<dc:creator>Néstor Dario Duque-Méndez</dc:creator>
			<dc:creator>Luis Fernando Castillo-Ossa</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040227</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-07</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-07</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>227</prism:startingPage>
		<prism:doi>10.3390/computers15040227</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/227</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/226">

	<title>Computers, Vol. 15, Pages 226: A Survey on Large Language Models in Software Security: Opportunities and Threats</title>
	<link>https://www.mdpi.com/2073-431X/15/4/226</link>
	<description>The rise of large language models (LLMs), such as GPT-4, Codex, Code Llama, Claude 3, CodeGemma and DeepSeek, etc., is changing the way software development is approached. These models provide strong support for tasks like writing codes, analyzing bugs, and automation. At the same time, their use in software development creates both opportunities and new risks. This survey reviews how LLMs are being used to improve security practices in software development, including vulnerability detection, secure code generation, threat analysis, and patch development. It also discusses how attackers may exploit LLMs for malicious purposes, such as writing malware, carrying out phishing campaigns, or bypassing defenses. We draw on case studies that show LLMs can help uncover zero-day vulnerabilities and speed up secure coding but also highlight cases where they have been misused to generate harmful code, sometimes unintentionally. The paper examines technical challenges like bias in training data, the difficulty of interpreting model outputs, and the risks of adversarial attacks. It also considers ethical and regulatory issues related to accountability, compliance, and responsible use. By bringing together findings from recent research and industry practice, the survey outlines future directions for building safer models, developing stronger defensive frameworks, and shaping policies that balance innovation with security. Overall, the paper argues for a careful approach where LLMs are used to strengthen software security while addressing the risks they introduce through collaboration, oversight, and ongoing improvements.</description>
	<pubDate>2026-04-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 226: A Survey on Large Language Models in Software Security: Opportunities and Threats</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/226">doi: 10.3390/computers15040226</a></p>
	<p>Authors:
		Md Bajlur Rashid
		Mohammad Shafayet Jamil Hossain
		Mohammad Ishtiaque Khan
		Sharaban Tahora
		Aiasha Siddika
		Mahmudul Islam Prakash
		Sharmin Yeasmin
		Hossain Shahriar
		</p>
	<p>The rise of large language models (LLMs), such as GPT-4, Codex, Code Llama, Claude 3, CodeGemma and DeepSeek, etc., is changing the way software development is approached. These models provide strong support for tasks like writing codes, analyzing bugs, and automation. At the same time, their use in software development creates both opportunities and new risks. This survey reviews how LLMs are being used to improve security practices in software development, including vulnerability detection, secure code generation, threat analysis, and patch development. It also discusses how attackers may exploit LLMs for malicious purposes, such as writing malware, carrying out phishing campaigns, or bypassing defenses. We draw on case studies that show LLMs can help uncover zero-day vulnerabilities and speed up secure coding but also highlight cases where they have been misused to generate harmful code, sometimes unintentionally. The paper examines technical challenges like bias in training data, the difficulty of interpreting model outputs, and the risks of adversarial attacks. It also considers ethical and regulatory issues related to accountability, compliance, and responsible use. By bringing together findings from recent research and industry practice, the survey outlines future directions for building safer models, developing stronger defensive frameworks, and shaping policies that balance innovation with security. Overall, the paper argues for a careful approach where LLMs are used to strengthen software security while addressing the risks they introduce through collaboration, oversight, and ongoing improvements.</p>
	]]></content:encoded>

	<dc:title>A Survey on Large Language Models in Software Security: Opportunities and Threats</dc:title>
			<dc:creator>Md Bajlur Rashid</dc:creator>
			<dc:creator>Mohammad Shafayet Jamil Hossain</dc:creator>
			<dc:creator>Mohammad Ishtiaque Khan</dc:creator>
			<dc:creator>Sharaban Tahora</dc:creator>
			<dc:creator>Aiasha Siddika</dc:creator>
			<dc:creator>Mahmudul Islam Prakash</dc:creator>
			<dc:creator>Sharmin Yeasmin</dc:creator>
			<dc:creator>Hossain Shahriar</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040226</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-03</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-03</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>226</prism:startingPage>
		<prism:doi>10.3390/computers15040226</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/226</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/225">

	<title>Computers, Vol. 15, Pages 225: Proximity-Aware VM Placement in Multi-Layer Fog Computing for Efficient Resource Management: Performance Evaluation Under a Gaming Application Scenario</title>
	<link>https://www.mdpi.com/2073-431X/15/4/225</link>
	<description>The rapid proliferation of mobile devices, particularly smartphones and tablets, has transformed digital entertainment, with mobile gaming emerging as one of the fastest-growing digital segments. Such applications are inherently latency-sensitive and require effective resource management and seamless mobility support. To overcome these issues, this paper suggests a four-layered infrastructure that combines edge, fog, and cloud computing with Software-Defined Networking (SDN) and is assisted by a lightweight proximity-aware heuristic placement strategy and mobility management. The suggested structure follows a microservices contained breakdown of the gaming functionality and uses clustering algorithms to permit coordinated access to resources by edge and fog nodes. A dynamic lightweight proximity-aware virtual machine placement algorithm is presented to deploy application modules nearer to the users depending on the availability and mobility of the resources. The proposed work is simulated using IFogSim2. The proposed model reduces the latency by up to 73 percent and the rate of task completion by 25 percent relative to baseline configurations in the case of dynamic mobility of users. These results indicate that the suggested strategy can be effective in improving the latency-sensitive mobile gaming applications performance in the edge-fog networks.</description>
	<pubDate>2026-04-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 225: Proximity-Aware VM Placement in Multi-Layer Fog Computing for Efficient Resource Management: Performance Evaluation Under a Gaming Application Scenario</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/225">doi: 10.3390/computers15040225</a></p>
	<p>Authors:
		Sreebha Bhaskaran
		Supriya Muthuraman
		</p>
	<p>The rapid proliferation of mobile devices, particularly smartphones and tablets, has transformed digital entertainment, with mobile gaming emerging as one of the fastest-growing digital segments. Such applications are inherently latency-sensitive and require effective resource management and seamless mobility support. To overcome these issues, this paper suggests a four-layered infrastructure that combines edge, fog, and cloud computing with Software-Defined Networking (SDN) and is assisted by a lightweight proximity-aware heuristic placement strategy and mobility management. The suggested structure follows a microservices contained breakdown of the gaming functionality and uses clustering algorithms to permit coordinated access to resources by edge and fog nodes. A dynamic lightweight proximity-aware virtual machine placement algorithm is presented to deploy application modules nearer to the users depending on the availability and mobility of the resources. The proposed work is simulated using IFogSim2. The proposed model reduces the latency by up to 73 percent and the rate of task completion by 25 percent relative to baseline configurations in the case of dynamic mobility of users. These results indicate that the suggested strategy can be effective in improving the latency-sensitive mobile gaming applications performance in the edge-fog networks.</p>
	]]></content:encoded>

	<dc:title>Proximity-Aware VM Placement in Multi-Layer Fog Computing for Efficient Resource Management: Performance Evaluation Under a Gaming Application Scenario</dc:title>
			<dc:creator>Sreebha Bhaskaran</dc:creator>
			<dc:creator>Supriya Muthuraman</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040225</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-03</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-03</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>225</prism:startingPage>
		<prism:doi>10.3390/computers15040225</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/225</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/224">

	<title>Computers, Vol. 15, Pages 224: Enhancing Polynomial Multiplication in Post-Quantum Cryptography for IoT Applications: A Hybrid Serial&amp;ndash;Parallel Systolic Architecture</title>
	<link>https://www.mdpi.com/2073-431X/15/4/224</link>
	<description>The rapid growth of the Internet of Things (IoT) is fundamentally altering industrial and economic landscapes by embedding smart, connected devices into everyday operations. Despite these benefits, significant concerns regarding data protection and user privacy continue to obstruct the widespread use of these technologies, particularly with the looming threat of quantum computing. Implementing post-quantum cryptographic (PQC) solutions is vital for addressing these risks, yet the limited resources found in IoT edge devices present major deployment challenges. Lattice-based cryptography has become a leading solution to these problems, largely because it depends on efficient polynomial multiplication. Enhancing the execution of this mathematical operation is crucial for improving the overall performance of PQC protocols. In this work, we introduce a hybrid serial&amp;amp;ndash;parallel systolic architecture specifically engineered for polynomial multiplication within the Binary Ring Learning With Errors (BRLWE) scheme. Designed for the security processors used in IoT hardware, this architecture significantly increases processing speeds while minimizing the use of hardware resources and reducing energy consumption. Such improvements are critical for establishing a secure IoT infrastructure that is resilient against quantum-era attacks and capable of supporting industrial expansion. Moreover, this research aligns with global Sustainable Development Goals (SDGs) 8 and 9 by building trust in innovative systems and fostering a more secure, sustainable, and productive digital economy.</description>
	<pubDate>2026-04-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 224: Enhancing Polynomial Multiplication in Post-Quantum Cryptography for IoT Applications: A Hybrid Serial&amp;ndash;Parallel Systolic Architecture</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/224">doi: 10.3390/computers15040224</a></p>
	<p>Authors:
		Atef Ibrahim
		Fayez Gebali
		</p>
	<p>The rapid growth of the Internet of Things (IoT) is fundamentally altering industrial and economic landscapes by embedding smart, connected devices into everyday operations. Despite these benefits, significant concerns regarding data protection and user privacy continue to obstruct the widespread use of these technologies, particularly with the looming threat of quantum computing. Implementing post-quantum cryptographic (PQC) solutions is vital for addressing these risks, yet the limited resources found in IoT edge devices present major deployment challenges. Lattice-based cryptography has become a leading solution to these problems, largely because it depends on efficient polynomial multiplication. Enhancing the execution of this mathematical operation is crucial for improving the overall performance of PQC protocols. In this work, we introduce a hybrid serial&amp;amp;ndash;parallel systolic architecture specifically engineered for polynomial multiplication within the Binary Ring Learning With Errors (BRLWE) scheme. Designed for the security processors used in IoT hardware, this architecture significantly increases processing speeds while minimizing the use of hardware resources and reducing energy consumption. Such improvements are critical for establishing a secure IoT infrastructure that is resilient against quantum-era attacks and capable of supporting industrial expansion. Moreover, this research aligns with global Sustainable Development Goals (SDGs) 8 and 9 by building trust in innovative systems and fostering a more secure, sustainable, and productive digital economy.</p>
	]]></content:encoded>

	<dc:title>Enhancing Polynomial Multiplication in Post-Quantum Cryptography for IoT Applications: A Hybrid Serial&amp;amp;ndash;Parallel Systolic Architecture</dc:title>
			<dc:creator>Atef Ibrahim</dc:creator>
			<dc:creator>Fayez Gebali</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040224</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-03</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-03</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>224</prism:startingPage>
		<prism:doi>10.3390/computers15040224</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/224</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/223">

	<title>Computers, Vol. 15, Pages 223: AGP-GEMM: Adaptive Grouping and Partitioning Framework for Accelerating Small and Irregular Matrices on CPUs</title>
	<link>https://www.mdpi.com/2073-431X/15/4/223</link>
	<description>General Matrix Multiplication (GEMM) is a fundamental computational kernel in scientific computing, serving as the foundation for numerous complex tasks. However, in practical applications, the performance of GEMM is often constrained by irregular matrix dimensions and the diversity of hardware architectures. In particular, when processing small and irregular matrices, GEMM typically exhibits reduced computational efficiency. To address these challenges, this paper proposes a GEMM acceleration method based on an adaptive core grouping strategy. The method consists of two key components: a core grouping mechanism that alleviates workload imbalance among multi-core CPUs, and an adaptive block partitioning algorithm that dynamically selects optimal tiling schemes according to the matrix dimensions, achieving both load balance and cache-friendly data access. Experimental results on the Kunpeng CPU platform demonstrate that the proposed method achieves significant performance improvements compared to the Kunpeng KML math library, reaching a peak acceleration of up to 2.1&amp;amp;times; and an average speedup of 1.64&amp;amp;times;. These results validate the effectiveness and efficiency of the proposed approach in handling small and irregular matrix computation scenarios.</description>
	<pubDate>2026-04-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 223: AGP-GEMM: Adaptive Grouping and Partitioning Framework for Accelerating Small and Irregular Matrices on CPUs</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/223">doi: 10.3390/computers15040223</a></p>
	<p>Authors:
		Hongzhe Zhou
		Lu Lu
		Haibiao Yang
		Yu Zhang
		</p>
	<p>General Matrix Multiplication (GEMM) is a fundamental computational kernel in scientific computing, serving as the foundation for numerous complex tasks. However, in practical applications, the performance of GEMM is often constrained by irregular matrix dimensions and the diversity of hardware architectures. In particular, when processing small and irregular matrices, GEMM typically exhibits reduced computational efficiency. To address these challenges, this paper proposes a GEMM acceleration method based on an adaptive core grouping strategy. The method consists of two key components: a core grouping mechanism that alleviates workload imbalance among multi-core CPUs, and an adaptive block partitioning algorithm that dynamically selects optimal tiling schemes according to the matrix dimensions, achieving both load balance and cache-friendly data access. Experimental results on the Kunpeng CPU platform demonstrate that the proposed method achieves significant performance improvements compared to the Kunpeng KML math library, reaching a peak acceleration of up to 2.1&amp;amp;times; and an average speedup of 1.64&amp;amp;times;. These results validate the effectiveness and efficiency of the proposed approach in handling small and irregular matrix computation scenarios.</p>
	]]></content:encoded>

	<dc:title>AGP-GEMM: Adaptive Grouping and Partitioning Framework for Accelerating Small and Irregular Matrices on CPUs</dc:title>
			<dc:creator>Hongzhe Zhou</dc:creator>
			<dc:creator>Lu Lu</dc:creator>
			<dc:creator>Haibiao Yang</dc:creator>
			<dc:creator>Yu Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040223</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-03</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-03</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>223</prism:startingPage>
		<prism:doi>10.3390/computers15040223</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/223</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/222">

	<title>Computers, Vol. 15, Pages 222: An Empirical Comparison of Cascade and Direct End-to-End Speech Translation for Low-Resource Language Pair</title>
	<link>https://www.mdpi.com/2073-431X/15/4/222</link>
	<description>Speech-to-text translation (S2TT) for low-resource languages remains challenging due to the scarcity of parallel speech translation data and the susceptibility of modular pipelines to error propagation. This paper presents a controlled empirical comparison of cascade and end-to-end approaches for Kazakh&amp;amp;ndash;Russian speech translation using the ST-kk-ru dataset (&amp;amp;asymp;332 h, 140 k triplets). The cascade framework is strengthened with recent pre-trained models for automatic speech recognition and neural machine translation, achieving 21.3 BLEU on the test set. Three representative end-to-end architectures are evaluated under identical data conditions. The strongest direct model, combining a Wav2Vec 2.0 encoder with an mBART decoder augmented by a length adaptor and adapter modules, reaches 17.97 BLEU, compared with 15.35 BLEU for FAIRSEQ S2T and 16.3 BLEU for ESPnet-ST. Automatic evaluation is complemented by expert manual assessment and targeted linguistic analysis. Results indicate that, under current low-resource conditions, cascade systems provide higher translation accuracy and better morpho-syntactic fidelity, while end-to-end models remain competitive and offer advantages in architectural simplicity and potentially reduced inference latency (due to single-pass processing), although empirical measurements were not conducted in this study. This study establishes a reproducible benchmark for Kazakh&amp;amp;ndash;Russian speech translation and highlights practical trade-offs between modeling paradigms in low-resource, morphologically rich settings.</description>
	<pubDate>2026-04-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 222: An Empirical Comparison of Cascade and Direct End-to-End Speech Translation for Low-Resource Language Pair</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/222">doi: 10.3390/computers15040222</a></p>
	<p>Authors:
		Zhanibek Kozhirbayev
		</p>
	<p>Speech-to-text translation (S2TT) for low-resource languages remains challenging due to the scarcity of parallel speech translation data and the susceptibility of modular pipelines to error propagation. This paper presents a controlled empirical comparison of cascade and end-to-end approaches for Kazakh&amp;amp;ndash;Russian speech translation using the ST-kk-ru dataset (&amp;amp;asymp;332 h, 140 k triplets). The cascade framework is strengthened with recent pre-trained models for automatic speech recognition and neural machine translation, achieving 21.3 BLEU on the test set. Three representative end-to-end architectures are evaluated under identical data conditions. The strongest direct model, combining a Wav2Vec 2.0 encoder with an mBART decoder augmented by a length adaptor and adapter modules, reaches 17.97 BLEU, compared with 15.35 BLEU for FAIRSEQ S2T and 16.3 BLEU for ESPnet-ST. Automatic evaluation is complemented by expert manual assessment and targeted linguistic analysis. Results indicate that, under current low-resource conditions, cascade systems provide higher translation accuracy and better morpho-syntactic fidelity, while end-to-end models remain competitive and offer advantages in architectural simplicity and potentially reduced inference latency (due to single-pass processing), although empirical measurements were not conducted in this study. This study establishes a reproducible benchmark for Kazakh&amp;amp;ndash;Russian speech translation and highlights practical trade-offs between modeling paradigms in low-resource, morphologically rich settings.</p>
	]]></content:encoded>

	<dc:title>An Empirical Comparison of Cascade and Direct End-to-End Speech Translation for Low-Resource Language Pair</dc:title>
			<dc:creator>Zhanibek Kozhirbayev</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040222</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-02</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-02</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>222</prism:startingPage>
		<prism:doi>10.3390/computers15040222</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/222</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/221">

	<title>Computers, Vol. 15, Pages 221: Table-Aware Row-Level RAG for Classical Chinese Understanding</title>
	<link>https://www.mdpi.com/2073-431X/15/4/221</link>
	<description>The classical Chinese language is characterized by a high density of meaning, wide use of polysemy, and strong dependence on history and culture, which pose challenges to existing large language models (LLMs). Retrieval-augmented generation (RAG) technology has become a prevailing option that could address these issues without retraining the model, but most of the existing RAG systems regard structured tables as unstructured text, encoding a whole table into one vector. Such a schema usually hides the row-level semantic information and raises the reasoning cost for LLMs. In this study, we propose a new table-aware row-wise retrieval system in which each row of a table is treated as an individual semantic unit, explicitly (instead of implicitly) reasoning at generation time. We organize the table into row-level vector representations, which makes retrieval more deterministic and semantically interpretable, in particular, for pedagogical or philological datasets. Based on LangChain and integrated with Qwen LLMs, our system can be evaluated experimentally for classical Chinese learning tasks, where we find that compared with the traditional RAG systems, this system improves on retrieval performance, semantic consistency, and explainability, with no model training or extra computation time required.</description>
	<pubDate>2026-04-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 221: Table-Aware Row-Level RAG for Classical Chinese Understanding</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/221">doi: 10.3390/computers15040221</a></p>
	<p>Authors:
		Zhihao Liu
		Waiyie Leong
		</p>
	<p>The classical Chinese language is characterized by a high density of meaning, wide use of polysemy, and strong dependence on history and culture, which pose challenges to existing large language models (LLMs). Retrieval-augmented generation (RAG) technology has become a prevailing option that could address these issues without retraining the model, but most of the existing RAG systems regard structured tables as unstructured text, encoding a whole table into one vector. Such a schema usually hides the row-level semantic information and raises the reasoning cost for LLMs. In this study, we propose a new table-aware row-wise retrieval system in which each row of a table is treated as an individual semantic unit, explicitly (instead of implicitly) reasoning at generation time. We organize the table into row-level vector representations, which makes retrieval more deterministic and semantically interpretable, in particular, for pedagogical or philological datasets. Based on LangChain and integrated with Qwen LLMs, our system can be evaluated experimentally for classical Chinese learning tasks, where we find that compared with the traditional RAG systems, this system improves on retrieval performance, semantic consistency, and explainability, with no model training or extra computation time required.</p>
	]]></content:encoded>

	<dc:title>Table-Aware Row-Level RAG for Classical Chinese Understanding</dc:title>
			<dc:creator>Zhihao Liu</dc:creator>
			<dc:creator>Waiyie Leong</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040221</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-02</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-02</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>221</prism:startingPage>
		<prism:doi>10.3390/computers15040221</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/221</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/220">

	<title>Computers, Vol. 15, Pages 220: Toward a Unified Framework for Secure Coding: A Comprehensive Synthesis of Best Practices</title>
	<link>https://www.mdpi.com/2073-431X/15/4/220</link>
	<description>The challenge of software vulnerabilities persists globally, despite the widespread availability of advanced security tools and comprehensive developer guidelines. This issue is not the result of professional negligence, but rather the complex and non-intuitive nature of secure coding. This research takes on the massive data silos in the security industry by providing a comprehensive review of best practices drawn from 35 reputable academic and corporate sources. Authentication, cryptography, input validation, and deployment hardening are some of the key development domains into which these technologies are organized. We conduct a comprehensive analysis of each practice, elucidating the specific security issue it addresses, prevalent implementation patterns, and potential hazards, in addition to serving as a checklist. Simple precautions, like not using passwords that are hardcoded, and more involved methods, such correctly encoding output and configuring access controls effectively, are all part of the range of practices. We assert that despite the prevalent usage of tools like as static analyzers, numerous vulnerabilities persist due to developers&amp;amp;rsquo; insufficient training in integrating security considerations into their coding practices. This work aspires to serve as a comprehensive, organized resource that supplies developers with the necessary context and guidance to make informed, security-oriented decisions along the software development lifecycle. The aim is to develop a more extensive resource than those presently accessible, which can also assist educators or security teams during code instruction or evaluation.</description>
	<pubDate>2026-04-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 220: Toward a Unified Framework for Secure Coding: A Comprehensive Synthesis of Best Practices</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/220">doi: 10.3390/computers15040220</a></p>
	<p>Authors:
		Alyah Alromaizan
		Ghala Alzahrani
		Aliza Khan
		Lulwah Alhumaid
		Md Kamrul Siam
		Muhammad Umair Khan
		Md Jobair Hossain Faruk
		Hossain Shahriar
		</p>
	<p>The challenge of software vulnerabilities persists globally, despite the widespread availability of advanced security tools and comprehensive developer guidelines. This issue is not the result of professional negligence, but rather the complex and non-intuitive nature of secure coding. This research takes on the massive data silos in the security industry by providing a comprehensive review of best practices drawn from 35 reputable academic and corporate sources. Authentication, cryptography, input validation, and deployment hardening are some of the key development domains into which these technologies are organized. We conduct a comprehensive analysis of each practice, elucidating the specific security issue it addresses, prevalent implementation patterns, and potential hazards, in addition to serving as a checklist. Simple precautions, like not using passwords that are hardcoded, and more involved methods, such correctly encoding output and configuring access controls effectively, are all part of the range of practices. We assert that despite the prevalent usage of tools like as static analyzers, numerous vulnerabilities persist due to developers&amp;amp;rsquo; insufficient training in integrating security considerations into their coding practices. This work aspires to serve as a comprehensive, organized resource that supplies developers with the necessary context and guidance to make informed, security-oriented decisions along the software development lifecycle. The aim is to develop a more extensive resource than those presently accessible, which can also assist educators or security teams during code instruction or evaluation.</p>
	]]></content:encoded>

	<dc:title>Toward a Unified Framework for Secure Coding: A Comprehensive Synthesis of Best Practices</dc:title>
			<dc:creator>Alyah Alromaizan</dc:creator>
			<dc:creator>Ghala Alzahrani</dc:creator>
			<dc:creator>Aliza Khan</dc:creator>
			<dc:creator>Lulwah Alhumaid</dc:creator>
			<dc:creator>Md Kamrul Siam</dc:creator>
			<dc:creator>Muhammad Umair Khan</dc:creator>
			<dc:creator>Md Jobair Hossain Faruk</dc:creator>
			<dc:creator>Hossain Shahriar</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040220</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-02</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-02</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>220</prism:startingPage>
		<prism:doi>10.3390/computers15040220</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/220</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/219">

	<title>Computers, Vol. 15, Pages 219: Energy-Efficient Dual-Core RISC-V Architecture for Edge AI Acceleration with Dynamic MAC Unit Reuse</title>
	<link>https://www.mdpi.com/2073-431X/15/4/219</link>
	<description>This paper presents a dual-core RISC-V architecture designed for energy-efficient AI acceleration at the edge, featuring dynamic MAC unit sharing, frequency scaling (DFS), and FIFO-based resource arbitration. The system comprises two RISC-V cores that compete for shared computational resources&amp;amp;mdash;a single Multiply&amp;amp;ndash;Accumulate (MAC) unit and a shared external memory subsystem&amp;amp;mdash;governed by a channel-based arbitration mechanism with CPU-priority semantics, while each core maintains private instruction and data caches. The architecture implements a tightly coupled Neural Processing Unit (NPU) with CONV, GEMM, and POOL operations that execute opportunistically in the background when the MAC unit is available. Dynamic frequency scaling (DFS) with three levels (100/200/400 MHz) is applied to the shared MAC unit, allowing the dynamic acceleration of CNN workloads. The arbitration mechanism uses SystemC sc_fifo channels with CPU-priority polling, ensuring that CPU execution is minimally impacted by background AI processing while the NPU makes progress during idle MAC slots. The NPU supports 3 &amp;amp;times; 3 convolutions, matrix multiplication (GEMM) with 10 &amp;amp;times; 10 tiles, and pooling operations. The implementation is cycle-accurate in SystemC, targeting FPGA deployment. Experimental evaluation demonstrates that the dual-core architecture achieves 1.87&amp;amp;times; speedup with 93.5% efficiency for parallel workloads, while DFS enables 70% power reduction at low frequency. The system successfully executes simultaneous CPU and AI workloads, with CPU-priority arbitration ensuring no CPU starvation under contention. The proposed design offers a practical solution for embedded AI applications requiring both general-purpose computation and neural network acceleration, validated through comprehensive SystemC simulation on modern FPGA platforms.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 219: Energy-Efficient Dual-Core RISC-V Architecture for Edge AI Acceleration with Dynamic MAC Unit Reuse</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/219">doi: 10.3390/computers15040219</a></p>
	<p>Authors:
		Cristian Andy Tanase
		</p>
	<p>This paper presents a dual-core RISC-V architecture designed for energy-efficient AI acceleration at the edge, featuring dynamic MAC unit sharing, frequency scaling (DFS), and FIFO-based resource arbitration. The system comprises two RISC-V cores that compete for shared computational resources&amp;amp;mdash;a single Multiply&amp;amp;ndash;Accumulate (MAC) unit and a shared external memory subsystem&amp;amp;mdash;governed by a channel-based arbitration mechanism with CPU-priority semantics, while each core maintains private instruction and data caches. The architecture implements a tightly coupled Neural Processing Unit (NPU) with CONV, GEMM, and POOL operations that execute opportunistically in the background when the MAC unit is available. Dynamic frequency scaling (DFS) with three levels (100/200/400 MHz) is applied to the shared MAC unit, allowing the dynamic acceleration of CNN workloads. The arbitration mechanism uses SystemC sc_fifo channels with CPU-priority polling, ensuring that CPU execution is minimally impacted by background AI processing while the NPU makes progress during idle MAC slots. The NPU supports 3 &amp;amp;times; 3 convolutions, matrix multiplication (GEMM) with 10 &amp;amp;times; 10 tiles, and pooling operations. The implementation is cycle-accurate in SystemC, targeting FPGA deployment. Experimental evaluation demonstrates that the dual-core architecture achieves 1.87&amp;amp;times; speedup with 93.5% efficiency for parallel workloads, while DFS enables 70% power reduction at low frequency. The system successfully executes simultaneous CPU and AI workloads, with CPU-priority arbitration ensuring no CPU starvation under contention. The proposed design offers a practical solution for embedded AI applications requiring both general-purpose computation and neural network acceleration, validated through comprehensive SystemC simulation on modern FPGA platforms.</p>
	]]></content:encoded>

	<dc:title>Energy-Efficient Dual-Core RISC-V Architecture for Edge AI Acceleration with Dynamic MAC Unit Reuse</dc:title>
			<dc:creator>Cristian Andy Tanase</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040219</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>219</prism:startingPage>
		<prism:doi>10.3390/computers15040219</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/219</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/217">

	<title>Computers, Vol. 15, Pages 217: Stylometry Analyzis of Human and Machine Text for Academic Integrity</title>
	<link>https://www.mdpi.com/2073-431X/15/4/217</link>
	<description>This work addresses critical challenges to academic integrity, including plagiarism, fabrication, and verification of authorship of educational content, by proposing a Natural Language Processing (NLP)-based framework for authenticating students&amp;amp;rsquo; content through author attribution and style change detection. Despite some initial efforts, several aspects of the topic are yet to be explored. In contrast to existing solutions, the paper provides a comprehensive analyzis of the topic by targeting four relevant tasks, including (i) classification of human and machine text, (ii) differentiating in single and multi-authored documents, (iii) author change detection within multi-authored documents, and (iv) author recognition in collaboratively produced documents. The solutions proposed for the tasks are evaluated on two datasets generated with Gemini using two different prompts, including a normal and a strict set of instructions. During experiments, some performance reduction is observed for the proposed solutions on the dataset generated by the strict prompt, demonstrating the complexities involved in detecting machine-generated text with cleverly crafted prompts. The generated datasets, code, and other relevant materials are made publicly available on GitHub, which are expected to provide a baseline for future research in the domain.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 217: Stylometry Analyzis of Human and Machine Text for Academic Integrity</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/217">doi: 10.3390/computers15040217</a></p>
	<p>Authors:
		Hezam Albaqami
		Muhammad Asif Ayub
		Nasir Ahmad
		Yaseen Ahmad
		Mohammad M. Alqahtani
		Abdullah M. Algamdi
		Almoaid A. Owaidah
		Kashif Ahmad
		</p>
	<p>This work addresses critical challenges to academic integrity, including plagiarism, fabrication, and verification of authorship of educational content, by proposing a Natural Language Processing (NLP)-based framework for authenticating students&amp;amp;rsquo; content through author attribution and style change detection. Despite some initial efforts, several aspects of the topic are yet to be explored. In contrast to existing solutions, the paper provides a comprehensive analyzis of the topic by targeting four relevant tasks, including (i) classification of human and machine text, (ii) differentiating in single and multi-authored documents, (iii) author change detection within multi-authored documents, and (iv) author recognition in collaboratively produced documents. The solutions proposed for the tasks are evaluated on two datasets generated with Gemini using two different prompts, including a normal and a strict set of instructions. During experiments, some performance reduction is observed for the proposed solutions on the dataset generated by the strict prompt, demonstrating the complexities involved in detecting machine-generated text with cleverly crafted prompts. The generated datasets, code, and other relevant materials are made publicly available on GitHub, which are expected to provide a baseline for future research in the domain.</p>
	]]></content:encoded>

	<dc:title>Stylometry Analyzis of Human and Machine Text for Academic Integrity</dc:title>
			<dc:creator>Hezam Albaqami</dc:creator>
			<dc:creator>Muhammad Asif Ayub</dc:creator>
			<dc:creator>Nasir Ahmad</dc:creator>
			<dc:creator>Yaseen Ahmad</dc:creator>
			<dc:creator>Mohammad M. Alqahtani</dc:creator>
			<dc:creator>Abdullah M. Algamdi</dc:creator>
			<dc:creator>Almoaid A. Owaidah</dc:creator>
			<dc:creator>Kashif Ahmad</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040217</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>217</prism:startingPage>
		<prism:doi>10.3390/computers15040217</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/217</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/218">

	<title>Computers, Vol. 15, Pages 218: Towards a Reference Architecture for Machine Learning Operations</title>
	<link>https://www.mdpi.com/2073-431X/15/4/218</link>
	<description>Industrial organisations increasingly rely on machine learning (ML) to improve quality, maintenance, and planning in Industry 4.0/5.0 ecosystems. However, turning experimental models into reliable services on the production floor remains complex due to the heterogeneity of operational technologies (OTs) and information technologies (ITs), including implementation constraints, latency in edge-fog-cloud scenarios, governance requirements, and continuous performance degradation caused by data drift. Although Machine Learning Operations (MLOps) provides lifecycle practices for deployment, monitoring, and retraining, the evidence is fragmented across tool-centric descriptions, case-specific pipelines, and conceptual architectures, offering limited guidance on which industrial constraints should inform architectural decisions and how to evaluate solutions. This work addresses that gap through a PRISMA-guided systematic review of 49 studies on industrial MLOps (with the search and screening primarily targeting Industry 4.0/IIoT operationalisation contexts, as reflected in the search strategy and corpus) and an evidence-based synthesis of principles, challenges, lifecycle practices, and enabling technologies. From this synthesis, industrial requirements are derived that encompass OT/IT integration, edge-fog-cloud orchestration, security and traceability, and observability-based lifecycle control. On this basis, a reference architecture is proposed that maps these requirements to functional layers, data and control flows, and verifiable responsibilities. To support reproducibility and practical inspectability, the article also presents an open-source architectural instantiation aligned with the proposed decomposition. Finally, the evaluation is illustrated through a predictive maintenance use case (tool breakage) in a single CNC machining cell, where the objective is to demonstrate end-to-end feasibility under realistic operational constraints rather than cross-scenario superiority or broad industrial generalisability.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 218: Towards a Reference Architecture for Machine Learning Operations</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/218">doi: 10.3390/computers15040218</a></p>
	<p>Authors:
		Miguel Ángel Mateo-Casalí
		Andrés Boza
		Francisco Fraile
		</p>
	<p>Industrial organisations increasingly rely on machine learning (ML) to improve quality, maintenance, and planning in Industry 4.0/5.0 ecosystems. However, turning experimental models into reliable services on the production floor remains complex due to the heterogeneity of operational technologies (OTs) and information technologies (ITs), including implementation constraints, latency in edge-fog-cloud scenarios, governance requirements, and continuous performance degradation caused by data drift. Although Machine Learning Operations (MLOps) provides lifecycle practices for deployment, monitoring, and retraining, the evidence is fragmented across tool-centric descriptions, case-specific pipelines, and conceptual architectures, offering limited guidance on which industrial constraints should inform architectural decisions and how to evaluate solutions. This work addresses that gap through a PRISMA-guided systematic review of 49 studies on industrial MLOps (with the search and screening primarily targeting Industry 4.0/IIoT operationalisation contexts, as reflected in the search strategy and corpus) and an evidence-based synthesis of principles, challenges, lifecycle practices, and enabling technologies. From this synthesis, industrial requirements are derived that encompass OT/IT integration, edge-fog-cloud orchestration, security and traceability, and observability-based lifecycle control. On this basis, a reference architecture is proposed that maps these requirements to functional layers, data and control flows, and verifiable responsibilities. To support reproducibility and practical inspectability, the article also presents an open-source architectural instantiation aligned with the proposed decomposition. Finally, the evaluation is illustrated through a predictive maintenance use case (tool breakage) in a single CNC machining cell, where the objective is to demonstrate end-to-end feasibility under realistic operational constraints rather than cross-scenario superiority or broad industrial generalisability.</p>
	]]></content:encoded>

	<dc:title>Towards a Reference Architecture for Machine Learning Operations</dc:title>
			<dc:creator>Miguel Ángel Mateo-Casalí</dc:creator>
			<dc:creator>Andrés Boza</dc:creator>
			<dc:creator>Francisco Fraile</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040218</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>218</prism:startingPage>
		<prism:doi>10.3390/computers15040218</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/218</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/216">

	<title>Computers, Vol. 15, Pages 216: Adaptive Sequence-Based Heuristic for Two-Dimensional Guillotine Cutting and Packing Problems</title>
	<link>https://www.mdpi.com/2073-431X/15/4/216</link>
	<description>This paper proposes adaptive sequence-based heuristics for solving rectangular two-dimensional guillotine Cutting and Packing Problems (CPPs). These problems are essential in various industrial sectors, aiming to maximise resource utilisation by selecting profitable item subsets or minimise waste by using the fewest possible identical large objects. The core methodology is grounded in the principle that if a specific item sequence generates a high-quality solution, incremental adjustments to that sequence can yield even better outcomes. By iteratively refining item ordering through the BubbleSearch method, the heuristics balance search intensification with the diversification of the solution space. Extensive computational experiments were conducted on benchmark datasets, including SET1, ATP, and CLASS, across multiple problem variants such as the Single Stock-Size Cutting Stock Problem (SSSCSP) and the Single Large Object Placement Problem (SLOPP). The results confirm that these heuristics and their extension with path relinking consistently deliver optimal or near-optimal solutions. These heuristics achieve high performance in computational times that are significantly shorter than existing state-of-the-art methods, demonstrating their robustness, flexibility, and suitability for software transferability and real-world industrial adoption.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 216: Adaptive Sequence-Based Heuristic for Two-Dimensional Guillotine Cutting and Packing Problems</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/216">doi: 10.3390/computers15040216</a></p>
	<p>Authors:
		Óscar Oliveira
		Dorabela Gamboa
		</p>
	<p>This paper proposes adaptive sequence-based heuristics for solving rectangular two-dimensional guillotine Cutting and Packing Problems (CPPs). These problems are essential in various industrial sectors, aiming to maximise resource utilisation by selecting profitable item subsets or minimise waste by using the fewest possible identical large objects. The core methodology is grounded in the principle that if a specific item sequence generates a high-quality solution, incremental adjustments to that sequence can yield even better outcomes. By iteratively refining item ordering through the BubbleSearch method, the heuristics balance search intensification with the diversification of the solution space. Extensive computational experiments were conducted on benchmark datasets, including SET1, ATP, and CLASS, across multiple problem variants such as the Single Stock-Size Cutting Stock Problem (SSSCSP) and the Single Large Object Placement Problem (SLOPP). The results confirm that these heuristics and their extension with path relinking consistently deliver optimal or near-optimal solutions. These heuristics achieve high performance in computational times that are significantly shorter than existing state-of-the-art methods, demonstrating their robustness, flexibility, and suitability for software transferability and real-world industrial adoption.</p>
	]]></content:encoded>

	<dc:title>Adaptive Sequence-Based Heuristic for Two-Dimensional Guillotine Cutting and Packing Problems</dc:title>
			<dc:creator>Óscar Oliveira</dc:creator>
			<dc:creator>Dorabela Gamboa</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040216</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>216</prism:startingPage>
		<prism:doi>10.3390/computers15040216</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/216</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/215">

	<title>Computers, Vol. 15, Pages 215: Drivers and Barriers to the Use of Generative Artificial Intelligence in the Spanish Active Population: Insights from Artificial Neural Network Modeling and Shapley Additive Explanations</title>
	<link>https://www.mdpi.com/2073-431X/15/4/215</link>
	<description>This study analyzes the determinants of generative artificial intelligence (GAI) use intensity among the Spanish working population, as well as the possible existence of gender gaps in its adoption. To this end, a conceptual model is proposed that incorporates perceived economic and productive usefulness (PEU), perceived social usefulness (PSU), three dimensions of the Technology Readiness Index&amp;amp;mdash;technological optimism (OPTI), innovativeness (INNOV), and insecurity (INSEC)&amp;amp;mdash;and three sociodemographic variables: entrepreneurial status, gender, and generational cohort. The model is implemented using artificial neural networks (ANNs) endowed with explanatory capability through Shapley Additive Explanations (SHAP). The application of SHAP enables the assessment of both the global and local importance of the explanatory variables, as well as the potential existence of gender biases in their contribution to GAI use. The results indicate that the most relevant variables are PEU, generational cohort, and INNOV. Although gender does not rank among the most important variables in terms of global importance, women exhibit lower levels of GAI use, and gender-related differences are also observed in the contribution of several explanatory variables. In particular, substantive effect sizes are observed for PSU, OPTI, INSEC, entrepreneurial status, and membership in Generation Y. By contrast, differences associated with especially relevant variables such as PEU and INNOV, as well as membership in Generation Z, do not exhibit meaningful effect sizes.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 215: Drivers and Barriers to the Use of Generative Artificial Intelligence in the Spanish Active Population: Insights from Artificial Neural Network Modeling and Shapley Additive Explanations</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/215">doi: 10.3390/computers15040215</a></p>
	<p>Authors:
		Teresa Torres-Coronas
		Jorge de Andrés-Sánchez
		Orlando Lima Rua
		Álvaro Carrasco-Aguilar
		</p>
	<p>This study analyzes the determinants of generative artificial intelligence (GAI) use intensity among the Spanish working population, as well as the possible existence of gender gaps in its adoption. To this end, a conceptual model is proposed that incorporates perceived economic and productive usefulness (PEU), perceived social usefulness (PSU), three dimensions of the Technology Readiness Index&amp;amp;mdash;technological optimism (OPTI), innovativeness (INNOV), and insecurity (INSEC)&amp;amp;mdash;and three sociodemographic variables: entrepreneurial status, gender, and generational cohort. The model is implemented using artificial neural networks (ANNs) endowed with explanatory capability through Shapley Additive Explanations (SHAP). The application of SHAP enables the assessment of both the global and local importance of the explanatory variables, as well as the potential existence of gender biases in their contribution to GAI use. The results indicate that the most relevant variables are PEU, generational cohort, and INNOV. Although gender does not rank among the most important variables in terms of global importance, women exhibit lower levels of GAI use, and gender-related differences are also observed in the contribution of several explanatory variables. In particular, substantive effect sizes are observed for PSU, OPTI, INSEC, entrepreneurial status, and membership in Generation Y. By contrast, differences associated with especially relevant variables such as PEU and INNOV, as well as membership in Generation Z, do not exhibit meaningful effect sizes.</p>
	]]></content:encoded>

	<dc:title>Drivers and Barriers to the Use of Generative Artificial Intelligence in the Spanish Active Population: Insights from Artificial Neural Network Modeling and Shapley Additive Explanations</dc:title>
			<dc:creator>Teresa Torres-Coronas</dc:creator>
			<dc:creator>Jorge de Andrés-Sánchez</dc:creator>
			<dc:creator>Orlando Lima Rua</dc:creator>
			<dc:creator>Álvaro Carrasco-Aguilar</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040215</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>215</prism:startingPage>
		<prism:doi>10.3390/computers15040215</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/215</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/214">

	<title>Computers, Vol. 15, Pages 214: A Dynamic Clustering Framework for Intelligent Task Orchestration in Mobile Edge Computing</title>
	<link>https://www.mdpi.com/2073-431X/15/4/214</link>
	<description>Mobile edge computing (MEC) enables resource-constrained mobile devices to execute delay-sensitive and compute-intensive applications by offloading tasks to nearby edge servers. However, task orchestration in MEC is challenged by the highly dynamic system conditions, unreliable networks, and distributed edge environments. Moreover, as the number of mobile users, tasks, and distributed computing resources (edge/cloud servers) increases, the task orchestration process becomes more complex due to the expanded decision space and the need to efficiently allocate heterogeneous resources under latency and capacity constraints. As the decision space grows, exhaustive-search-based orchestration becomes computationally infeasible. Clustering approaches often rely on proximity-only grouping, while learning-based solutions require extensive training and parameter tuning. To address these challenges, this paper proposes a Multi-Criteria Hierarchical Clustering-based Task Orchestrator (MCHC-TO), a novel framework that integrates multi-criteria decision making with divisive hierarchical clustering for preference-aware and adaptive workload orchestration. Edge servers are first evaluated using multiple decision criteria, and the resulting preference rankings are exploited to form hierarchical preference-based clusters. Incoming tasks are then assigned to the most suitable cluster based on task requirements, enabling efficient resource utilization and dynamic decision-making. Extensive simulations conducted using an edge computing simulator demonstrate that the proposed MCHC-TO framework consistently outperforms benchmark approaches, achieving reductions in average service delay and task failure rate of up to 48% and 92%, respectively. These results highlight the effectiveness of combining multi-criteria evaluation with hierarchical clustering for robust and dynamic task orchestration in MEC environments.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 214: A Dynamic Clustering Framework for Intelligent Task Orchestration in Mobile Edge Computing</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/214">doi: 10.3390/computers15040214</a></p>
	<p>Authors:
		Mona Alghamdi
		Atm S. Alam
		Asma Cherif
		</p>
	<p>Mobile edge computing (MEC) enables resource-constrained mobile devices to execute delay-sensitive and compute-intensive applications by offloading tasks to nearby edge servers. However, task orchestration in MEC is challenged by the highly dynamic system conditions, unreliable networks, and distributed edge environments. Moreover, as the number of mobile users, tasks, and distributed computing resources (edge/cloud servers) increases, the task orchestration process becomes more complex due to the expanded decision space and the need to efficiently allocate heterogeneous resources under latency and capacity constraints. As the decision space grows, exhaustive-search-based orchestration becomes computationally infeasible. Clustering approaches often rely on proximity-only grouping, while learning-based solutions require extensive training and parameter tuning. To address these challenges, this paper proposes a Multi-Criteria Hierarchical Clustering-based Task Orchestrator (MCHC-TO), a novel framework that integrates multi-criteria decision making with divisive hierarchical clustering for preference-aware and adaptive workload orchestration. Edge servers are first evaluated using multiple decision criteria, and the resulting preference rankings are exploited to form hierarchical preference-based clusters. Incoming tasks are then assigned to the most suitable cluster based on task requirements, enabling efficient resource utilization and dynamic decision-making. Extensive simulations conducted using an edge computing simulator demonstrate that the proposed MCHC-TO framework consistently outperforms benchmark approaches, achieving reductions in average service delay and task failure rate of up to 48% and 92%, respectively. These results highlight the effectiveness of combining multi-criteria evaluation with hierarchical clustering for robust and dynamic task orchestration in MEC environments.</p>
	]]></content:encoded>

	<dc:title>A Dynamic Clustering Framework for Intelligent Task Orchestration in Mobile Edge Computing</dc:title>
			<dc:creator>Mona Alghamdi</dc:creator>
			<dc:creator>Atm S. Alam</dc:creator>
			<dc:creator>Asma Cherif</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040214</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>214</prism:startingPage>
		<prism:doi>10.3390/computers15040214</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/214</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/213">

	<title>Computers, Vol. 15, Pages 213: A Reproducible Computational Pipeline for Cross-Database Scientometric Network Construction: Architecture, Algorithms, and Structural Validation</title>
	<link>https://www.mdpi.com/2073-431X/15/4/213</link>
	<description>The rapid expansion of scientific publications indexed in multiple bibliographic databases has created new computational challenges for large-scale scientometric analysis. Differences in metadata schemas, identifier structures, and export formats across indexing systems such as Web of Science and Scopus introduce inconsistencies that may distort network-based bibliometric analyses. These issues affect duplicate detection, node identification, and network topology construction. This study proposes a reproducible computational pipeline for cross-database scientometric network construction. The framework formalizes the preprocessing workflow into explicit computational modules, including metadata harmonization, deterministic duplicate detection, sparse graph construction, normalization, and structural diagnostics. The proposed architecture separates preprocessing stages into reproducible algorithmic components, enabling transparent evaluation of methodological assumptions. Empirical evaluation using an interdisciplinary dataset of 317 publications (1990&amp;amp;ndash;2023) demonstrate that deterministic preprocessing significantly improves network stability and preserves clustering structure. Structural diagnostics based on modularity, Herfindahl&amp;amp;ndash;Hirschman Index, Shannon entropy, and Gini coefficient provide multi-dimensional evaluation of network topology. Scalability experiments confirm near-linear computational growth under sparse graph construction. The principal contribution of this work lies in the formalization of a transparent and extensible computational architecture for reproducible scientometric analysis. The proposed pipeline supports reliable cross-database integration and enables scalable knowledge-mapping applications in interdisciplinary research domains.</description>
	<pubDate>2026-03-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 213: A Reproducible Computational Pipeline for Cross-Database Scientometric Network Construction: Architecture, Algorithms, and Structural Validation</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/213">doi: 10.3390/computers15040213</a></p>
	<p>Authors:
		Denny Moreno-Castro
		Omar Orlando Franco-Arias
		Cícero Pimenteira
		Nicolás Márquez
		Cristian Vidal-Silva
		</p>
	<p>The rapid expansion of scientific publications indexed in multiple bibliographic databases has created new computational challenges for large-scale scientometric analysis. Differences in metadata schemas, identifier structures, and export formats across indexing systems such as Web of Science and Scopus introduce inconsistencies that may distort network-based bibliometric analyses. These issues affect duplicate detection, node identification, and network topology construction. This study proposes a reproducible computational pipeline for cross-database scientometric network construction. The framework formalizes the preprocessing workflow into explicit computational modules, including metadata harmonization, deterministic duplicate detection, sparse graph construction, normalization, and structural diagnostics. The proposed architecture separates preprocessing stages into reproducible algorithmic components, enabling transparent evaluation of methodological assumptions. Empirical evaluation using an interdisciplinary dataset of 317 publications (1990&amp;amp;ndash;2023) demonstrate that deterministic preprocessing significantly improves network stability and preserves clustering structure. Structural diagnostics based on modularity, Herfindahl&amp;amp;ndash;Hirschman Index, Shannon entropy, and Gini coefficient provide multi-dimensional evaluation of network topology. Scalability experiments confirm near-linear computational growth under sparse graph construction. The principal contribution of this work lies in the formalization of a transparent and extensible computational architecture for reproducible scientometric analysis. The proposed pipeline supports reliable cross-database integration and enables scalable knowledge-mapping applications in interdisciplinary research domains.</p>
	]]></content:encoded>

	<dc:title>A Reproducible Computational Pipeline for Cross-Database Scientometric Network Construction: Architecture, Algorithms, and Structural Validation</dc:title>
			<dc:creator>Denny Moreno-Castro</dc:creator>
			<dc:creator>Omar Orlando Franco-Arias</dc:creator>
			<dc:creator>Cícero Pimenteira</dc:creator>
			<dc:creator>Nicolás Márquez</dc:creator>
			<dc:creator>Cristian Vidal-Silva</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040213</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-31</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-31</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>213</prism:startingPage>
		<prism:doi>10.3390/computers15040213</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/213</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/212">

	<title>Computers, Vol. 15, Pages 212: A Federated FHIR-Based Interoperability Framework for Multi-Site Heart Failure Monitoring: The RETENTION Project</title>
	<link>https://www.mdpi.com/2073-431X/15/4/212</link>
	<description>Heart failure management increasingly relies on heterogeneous clinical and real-world data generated through remote monitoring technologies. However, transforming these multimodal data streams into actionable insights requires robust interoperability infrastructures. This study presents the RETENTION interoperability framework, a federated HL7 Fast Healthcare Interoperability Resources (FHIR)-based architecture designed to support multi-site heart failure monitoring across five independent clinical environments. A semantic reference model comprising 444 clinical and contextual variables was developed and aligned with FHIR R4 resources and internationally recognised terminology systems. The platform adopts a selective profiling strategy, extending only the Patient resource while standardising the remaining variables through example-driven Implementation Guide documentation. Identifiable data are retained locally within Clinical Site Backends, whereas anonymised datasets are periodically aggregated into a Global Insights Cloud to enable centralised analytics and controlled third-party interactions. The framework was deployed across six hospitals (with two Spanish hospitals sharing the same deployment), supporting 390 patients and over 130,000 patient-days of monitoring, with more than 3.6 million remote device data points harmonised without schema conflicts. The results demonstrate that large-scale semantic harmonisation and privacy-preserving aggregation can be achieved using a lightweight profiling approach, providing a scalable and reproducible interoperability model for multi-centre digital health research infrastructures.</description>
	<pubDate>2026-03-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 212: A Federated FHIR-Based Interoperability Framework for Multi-Site Heart Failure Monitoring: The RETENTION Project</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/212">doi: 10.3390/computers15040212</a></p>
	<p>Authors:
		Nikolaos Vasileiou
		Olympia Giannakopoulou
		Ourania Manta
		Konstantinos Bromis
		Theodoros P. Vagenas
		Ioannis Kouris
		Maria Roumpi
		Lefteris Koumakis
		Yorgos Goletsis
		Maria Haritou
		George K. Matsopoulos
		Dimitris Fotiadis
		Dimitris D. Koutsouris
		</p>
	<p>Heart failure management increasingly relies on heterogeneous clinical and real-world data generated through remote monitoring technologies. However, transforming these multimodal data streams into actionable insights requires robust interoperability infrastructures. This study presents the RETENTION interoperability framework, a federated HL7 Fast Healthcare Interoperability Resources (FHIR)-based architecture designed to support multi-site heart failure monitoring across five independent clinical environments. A semantic reference model comprising 444 clinical and contextual variables was developed and aligned with FHIR R4 resources and internationally recognised terminology systems. The platform adopts a selective profiling strategy, extending only the Patient resource while standardising the remaining variables through example-driven Implementation Guide documentation. Identifiable data are retained locally within Clinical Site Backends, whereas anonymised datasets are periodically aggregated into a Global Insights Cloud to enable centralised analytics and controlled third-party interactions. The framework was deployed across six hospitals (with two Spanish hospitals sharing the same deployment), supporting 390 patients and over 130,000 patient-days of monitoring, with more than 3.6 million remote device data points harmonised without schema conflicts. The results demonstrate that large-scale semantic harmonisation and privacy-preserving aggregation can be achieved using a lightweight profiling approach, providing a scalable and reproducible interoperability model for multi-centre digital health research infrastructures.</p>
	]]></content:encoded>

	<dc:title>A Federated FHIR-Based Interoperability Framework for Multi-Site Heart Failure Monitoring: The RETENTION Project</dc:title>
			<dc:creator>Nikolaos Vasileiou</dc:creator>
			<dc:creator>Olympia Giannakopoulou</dc:creator>
			<dc:creator>Ourania Manta</dc:creator>
			<dc:creator>Konstantinos Bromis</dc:creator>
			<dc:creator>Theodoros P. Vagenas</dc:creator>
			<dc:creator>Ioannis Kouris</dc:creator>
			<dc:creator>Maria Roumpi</dc:creator>
			<dc:creator>Lefteris Koumakis</dc:creator>
			<dc:creator>Yorgos Goletsis</dc:creator>
			<dc:creator>Maria Haritou</dc:creator>
			<dc:creator>George K. Matsopoulos</dc:creator>
			<dc:creator>Dimitris Fotiadis</dc:creator>
			<dc:creator>Dimitris D. Koutsouris</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040212</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-31</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-31</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>212</prism:startingPage>
		<prism:doi>10.3390/computers15040212</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/212</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/211">

	<title>Computers, Vol. 15, Pages 211: Novel Hybrid Nature-Inspired Metaheuristic Algorithm for Global and Engineering Design Optimization</title>
	<link>https://www.mdpi.com/2073-431X/15/4/211</link>
	<description>Metaheuristic algorithms have become indispensable for solving high-dimensional, non-convex, and constrained optimization problems arising in science and engineering. However, no single method can simultaneously provide strong global exploration, accurate local exploitation, and robust performance across diverse problem classes. This paper proposes JADEFLO, a new hybrid nature-inspired metaheuristic that couples Adaptive Differential Evolution with Optional External Archive (JADE) and Frilled Lizard Optimization (FLO) in a two-stage search framework. In the first stage, JADE drives global exploration using p-best mutation, an external archive, and adaptive control of the mutation factor and crossover rate to maintain population diversity. In the second stage, FLO performs intensive local refinement by mimicking the hunting and tree-climbing behaviors of frilled lizards through dedicated exploration and exploitation moves. The resulting algorithm has linear time complexity with respect to the population size, dimensionality, and number of iterations. JADEFLO is evaluated on the IEEE CEC 2022 single-objective benchmark suite (F1&amp;amp;ndash;F12) and three constrained engineering design problems (Pressure Vessel, tension/compression spring, and speed reducer), using 30 independent runs and comparisons against more than thirty state-of-the-art metaheuristics, including GA, PSO, DE variants, GWO, WOA, MFO, and FLO. The results show that JADEFLO attains the best overall rank on the CEC functions, delivers faster convergence and higher accuracy on most test cases, and matches or improves the best-known designs with markedly reduced variance. These findings indicate that JADEFLO is a promising general-purpose optimizer and a flexible foundation for future extensions to multi-objective and large-scale optimization.</description>
	<pubDate>2026-03-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 211: Novel Hybrid Nature-Inspired Metaheuristic Algorithm for Global and Engineering Design Optimization</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/211">doi: 10.3390/computers15040211</a></p>
	<p>Authors:
		Hasan Kanaker
		Osama Al Sayaydeh
		Essam Alhroob
		Nader Abdel Karim
		Sami Smadi
		Nurul Halimatul Asmak Ismail
		</p>
	<p>Metaheuristic algorithms have become indispensable for solving high-dimensional, non-convex, and constrained optimization problems arising in science and engineering. However, no single method can simultaneously provide strong global exploration, accurate local exploitation, and robust performance across diverse problem classes. This paper proposes JADEFLO, a new hybrid nature-inspired metaheuristic that couples Adaptive Differential Evolution with Optional External Archive (JADE) and Frilled Lizard Optimization (FLO) in a two-stage search framework. In the first stage, JADE drives global exploration using p-best mutation, an external archive, and adaptive control of the mutation factor and crossover rate to maintain population diversity. In the second stage, FLO performs intensive local refinement by mimicking the hunting and tree-climbing behaviors of frilled lizards through dedicated exploration and exploitation moves. The resulting algorithm has linear time complexity with respect to the population size, dimensionality, and number of iterations. JADEFLO is evaluated on the IEEE CEC 2022 single-objective benchmark suite (F1&amp;amp;ndash;F12) and three constrained engineering design problems (Pressure Vessel, tension/compression spring, and speed reducer), using 30 independent runs and comparisons against more than thirty state-of-the-art metaheuristics, including GA, PSO, DE variants, GWO, WOA, MFO, and FLO. The results show that JADEFLO attains the best overall rank on the CEC functions, delivers faster convergence and higher accuracy on most test cases, and matches or improves the best-known designs with markedly reduced variance. These findings indicate that JADEFLO is a promising general-purpose optimizer and a flexible foundation for future extensions to multi-objective and large-scale optimization.</p>
	]]></content:encoded>

	<dc:title>Novel Hybrid Nature-Inspired Metaheuristic Algorithm for Global and Engineering Design Optimization</dc:title>
			<dc:creator>Hasan Kanaker</dc:creator>
			<dc:creator>Osama Al Sayaydeh</dc:creator>
			<dc:creator>Essam Alhroob</dc:creator>
			<dc:creator>Nader Abdel Karim</dc:creator>
			<dc:creator>Sami Smadi</dc:creator>
			<dc:creator>Nurul Halimatul Asmak Ismail</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040211</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-27</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-27</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>211</prism:startingPage>
		<prism:doi>10.3390/computers15040211</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/211</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/210">

	<title>Computers, Vol. 15, Pages 210: SQDPoS: A Secure and Practical Semi-Quantum Blockchain System for the Post-Quantum Era</title>
	<link>https://www.mdpi.com/2073-431X/15/4/210</link>
	<description>The rapid development of quantum computing poses severe threats to traditional blockchain security mechanisms, while existing full-quantum blockchains face challenges regarding high hardware costs and limited scalability. To address these issues, this paper proposes a secure and practical semi-quantum blockchain system. Specifically, a Semi-Quantum Delegated Proof of Stake consensus mechanism is constructed by integrating an adapted semi-quantum voting protocol with the Borda count method and a malicious behavior penalty model. Furthermore, a lightweight transaction verification framework is designed based on semi-quantum key distribution, enabling classical users with limited quantum capabilities to participate securely. Theoretical analysis demonstrates that the system achieves unconditional security against quantum attacks while maintaining high throughput. These results indicate that the proposed asymmetric resource design significantly lowers hardware barriers compared to full-quantum schemes, effectively balancing security, practicality, and cost-effectiveness for post-quantum blockchain networks.</description>
	<pubDate>2026-03-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 210: SQDPoS: A Secure and Practical Semi-Quantum Blockchain System for the Post-Quantum Era</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/210">doi: 10.3390/computers15040210</a></p>
	<p>Authors:
		Ang Liu
		Qi An
		Sijiang Xie
		Yalong Yan
		</p>
	<p>The rapid development of quantum computing poses severe threats to traditional blockchain security mechanisms, while existing full-quantum blockchains face challenges regarding high hardware costs and limited scalability. To address these issues, this paper proposes a secure and practical semi-quantum blockchain system. Specifically, a Semi-Quantum Delegated Proof of Stake consensus mechanism is constructed by integrating an adapted semi-quantum voting protocol with the Borda count method and a malicious behavior penalty model. Furthermore, a lightweight transaction verification framework is designed based on semi-quantum key distribution, enabling classical users with limited quantum capabilities to participate securely. Theoretical analysis demonstrates that the system achieves unconditional security against quantum attacks while maintaining high throughput. These results indicate that the proposed asymmetric resource design significantly lowers hardware barriers compared to full-quantum schemes, effectively balancing security, practicality, and cost-effectiveness for post-quantum blockchain networks.</p>
	]]></content:encoded>

	<dc:title>SQDPoS: A Secure and Practical Semi-Quantum Blockchain System for the Post-Quantum Era</dc:title>
			<dc:creator>Ang Liu</dc:creator>
			<dc:creator>Qi An</dc:creator>
			<dc:creator>Sijiang Xie</dc:creator>
			<dc:creator>Yalong Yan</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040210</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-27</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-27</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>210</prism:startingPage>
		<prism:doi>10.3390/computers15040210</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/210</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/209">

	<title>Computers, Vol. 15, Pages 209: Interpretable Photoplethysmography Feature Engineering for Multi-Class Blood Pressure Staging</title>
	<link>https://www.mdpi.com/2073-431X/15/4/209</link>
	<description>Hypertension is a leading global health risk and requires accurate and continuous monitoring for effective management. Although photoplethysmography (PPG) is a promising non-invasive modality for cuffless blood pressure (BP) assessment, many existing approaches (especially raw-signal deep learning) are vulnerable to data leakage, overfitting on small datasets, limited interpretability, and poor performance on minority BP stages. To address these limitations, we propose a robust and physiologically grounded framework for multi-class BP stage classification based on interpretable PPG features. Our approach centers on a comprehensive multi-domain feature engineering pipeline that extracts 124 PPG features, including demographic, morphological, functional decomposition, spectral, nonlinear dynamics, and clinical composite indices. We apply rigorous preprocessing and feature selection prior to model training. We validate the framework on two datasets: PPG-BP dataset (657 segments, 4 classes) for benchmarking and PulseDB (283,773 segments, 3 classes) to assess scalability. We evaluate the proposed framework using a segment-level train/test split, appropriate for assessing intra-subject BP tracking after initial personalization. For the PulseDB dataset, this follows the protocol established by the dataset creators, while for the PPG-BP dataset, it enables direct comparison with prior work given practical dataset constraints. On PPG-BP, LightGBM trained on the selected features achieved macro-F1 = 0.78 and accuracy = 0.74, outperforming comparable deep-learning models. On the PulseDB, a custom Residual MLP achieved accuracy = 0.81 and macro-F1 = 0.79, supporting generalization at scale. These results show that the proposed feature-based approach can outperform complex end-to-end deep-learning models on small datasets while providing improved interpretability. This work establishes a reliable and transparent pathway toward clinically viable continuous BP staging, moving beyond black-box models toward physiologically grounded decision support. Ablation analysis reveals that engineered features provide most of the predictive power (F1 = 0.911), while raw PPG features alone achieve modest performance (F1 = 0.384). For the minority hypertension stage 2 (HT-2) class, a bootstrap 95% confidence interval of [0.762, 1.000] is reported, reflecting uncertainty due to limited sample size.</description>
	<pubDate>2026-03-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 209: Interpretable Photoplethysmography Feature Engineering for Multi-Class Blood Pressure Staging</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/209">doi: 10.3390/computers15040209</a></p>
	<p>Authors:
		Souhair Msokar
		Roman Davydov
		Vadim Davydov
		</p>
	<p>Hypertension is a leading global health risk and requires accurate and continuous monitoring for effective management. Although photoplethysmography (PPG) is a promising non-invasive modality for cuffless blood pressure (BP) assessment, many existing approaches (especially raw-signal deep learning) are vulnerable to data leakage, overfitting on small datasets, limited interpretability, and poor performance on minority BP stages. To address these limitations, we propose a robust and physiologically grounded framework for multi-class BP stage classification based on interpretable PPG features. Our approach centers on a comprehensive multi-domain feature engineering pipeline that extracts 124 PPG features, including demographic, morphological, functional decomposition, spectral, nonlinear dynamics, and clinical composite indices. We apply rigorous preprocessing and feature selection prior to model training. We validate the framework on two datasets: PPG-BP dataset (657 segments, 4 classes) for benchmarking and PulseDB (283,773 segments, 3 classes) to assess scalability. We evaluate the proposed framework using a segment-level train/test split, appropriate for assessing intra-subject BP tracking after initial personalization. For the PulseDB dataset, this follows the protocol established by the dataset creators, while for the PPG-BP dataset, it enables direct comparison with prior work given practical dataset constraints. On PPG-BP, LightGBM trained on the selected features achieved macro-F1 = 0.78 and accuracy = 0.74, outperforming comparable deep-learning models. On the PulseDB, a custom Residual MLP achieved accuracy = 0.81 and macro-F1 = 0.79, supporting generalization at scale. These results show that the proposed feature-based approach can outperform complex end-to-end deep-learning models on small datasets while providing improved interpretability. This work establishes a reliable and transparent pathway toward clinically viable continuous BP staging, moving beyond black-box models toward physiologically grounded decision support. Ablation analysis reveals that engineered features provide most of the predictive power (F1 = 0.911), while raw PPG features alone achieve modest performance (F1 = 0.384). For the minority hypertension stage 2 (HT-2) class, a bootstrap 95% confidence interval of [0.762, 1.000] is reported, reflecting uncertainty due to limited sample size.</p>
	]]></content:encoded>

	<dc:title>Interpretable Photoplethysmography Feature Engineering for Multi-Class Blood Pressure Staging</dc:title>
			<dc:creator>Souhair Msokar</dc:creator>
			<dc:creator>Roman Davydov</dc:creator>
			<dc:creator>Vadim Davydov</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040209</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-27</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-27</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>209</prism:startingPage>
		<prism:doi>10.3390/computers15040209</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/209</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/208">

	<title>Computers, Vol. 15, Pages 208: Context-Aware Decision Fusion for Multimodal Access Control Under Contradictory Biometric Evidence</title>
	<link>https://www.mdpi.com/2073-431X/15/4/208</link>
	<description>Access control systems rely increasingly on multimodal biometric and behavioral signals to enhance security and robustness against sophisticated attacks. However, when heterogeneous modalities provide conflicting evidence, such as valid biometric credentials accompanied by abnormal behavioral or acoustic patterns, traditional fusion strategies based on static thresholds or majority voting often fail, leading to false alarms or insecure authorization decisions. This paper addresses this critical limitation by proposing a contextual decision-making fusion framework designed to resolve conflicting multimodal evidence at the decision-making level. The proposed approach models access control as a decision-making problem in a context of uncertainty, where independent agents generate modality-specific evidence from authentication channels based on face, voice, and fingerprints. A centralized fusion mechanism integrates heterogeneous results using adaptive reliability weighting and contextual reasoning to resolve conflicts before operational decisions are made. Rather than treating each modality independently, the framework explicitly considers inconsistencies, uncertainties, and situational context when aggregating evidence. The framework is evaluated using public benchmarks, including VGGFace2, VoxCeleb2, and FVC2004, combined with controlled multimodal scenarios that induce conflicting evidence. Experimental results obtained under controlled contradiction scenarios show that the proposed fusion strategy reduces false alarms and improves decision consistency by approximately 18%. These results are interpreted within the scope of controlled multimodal simulations.</description>
	<pubDate>2026-03-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 208: Context-Aware Decision Fusion for Multimodal Access Control Under Contradictory Biometric Evidence</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/208">doi: 10.3390/computers15040208</a></p>
	<p>Authors:
		Yasser Hmimou
		Azedine Khiat
		Hassna Bensag
		Zineb Hidila
		Mohamed Tabaa
		</p>
	<p>Access control systems rely increasingly on multimodal biometric and behavioral signals to enhance security and robustness against sophisticated attacks. However, when heterogeneous modalities provide conflicting evidence, such as valid biometric credentials accompanied by abnormal behavioral or acoustic patterns, traditional fusion strategies based on static thresholds or majority voting often fail, leading to false alarms or insecure authorization decisions. This paper addresses this critical limitation by proposing a contextual decision-making fusion framework designed to resolve conflicting multimodal evidence at the decision-making level. The proposed approach models access control as a decision-making problem in a context of uncertainty, where independent agents generate modality-specific evidence from authentication channels based on face, voice, and fingerprints. A centralized fusion mechanism integrates heterogeneous results using adaptive reliability weighting and contextual reasoning to resolve conflicts before operational decisions are made. Rather than treating each modality independently, the framework explicitly considers inconsistencies, uncertainties, and situational context when aggregating evidence. The framework is evaluated using public benchmarks, including VGGFace2, VoxCeleb2, and FVC2004, combined with controlled multimodal scenarios that induce conflicting evidence. Experimental results obtained under controlled contradiction scenarios show that the proposed fusion strategy reduces false alarms and improves decision consistency by approximately 18%. These results are interpreted within the scope of controlled multimodal simulations.</p>
	]]></content:encoded>

	<dc:title>Context-Aware Decision Fusion for Multimodal Access Control Under Contradictory Biometric Evidence</dc:title>
			<dc:creator>Yasser Hmimou</dc:creator>
			<dc:creator>Azedine Khiat</dc:creator>
			<dc:creator>Hassna Bensag</dc:creator>
			<dc:creator>Zineb Hidila</dc:creator>
			<dc:creator>Mohamed Tabaa</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040208</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-27</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-27</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>208</prism:startingPage>
		<prism:doi>10.3390/computers15040208</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/208</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/207">

	<title>Computers, Vol. 15, Pages 207: D-SOMA: A Dynamic Self-Organizing Map-Assisted Multi-Objective Evolutionary Algorithm with Adaptive Subregion Characterization</title>
	<link>https://www.mdpi.com/2073-431X/15/4/207</link>
	<description>Multi-objective evolutionary optimization faces significant challenges due to guidance mismatch under complex Pareto-front geometries. This paper proposes a dynamic self-organizing map-assisted evolutionary algorithm (D-SOMA), a manifold-aware framework that harmonizes knowledge-informed priors with unsupervised objective-space characterization. Specifically, a knowledge-informed guided resampling strategy is formulated to bridge stochastic initialization and targeted exploitation. By distilling spatial distribution priors from the decision-variable boundaries of early-stage elite solutions, it establishes a high-quality starting population biased towards promising regions. To capture the intrinsic geometry of the evolving population, a self-organizing map (SOM)-based adaptive subregion characterization strategy leverages the topological preservation of self-organizing maps to extract latent modeling parameters. This strategy adaptively determines subregion centers and influence radii, enabling a data-driven partitioning that respects the underlying manifold structure. Furthermore, a density-driven phase-responsive scale adjustment strategy is introduced. By synthesizing spatial density feedback and temporal evolutionary trajectories, it dynamically modulates the characterization granularity K, thereby maintaining a rigorous balance between geometric modeling fidelity and computational overhead. Extensive experiments on 50 benchmark problems from the DTLZ, WFG, MaF and RWMOP suites demonstrate that D-SOMA is statistically superior to seven state-of-the-art algorithms, exhibiting robust convergence and superior diversity across diverse problem landscapes.</description>
	<pubDate>2026-03-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 207: D-SOMA: A Dynamic Self-Organizing Map-Assisted Multi-Objective Evolutionary Algorithm with Adaptive Subregion Characterization</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/207">doi: 10.3390/computers15040207</a></p>
	<p>Authors:
		Xinru Zhang
		Tianyu Liu
		</p>
	<p>Multi-objective evolutionary optimization faces significant challenges due to guidance mismatch under complex Pareto-front geometries. This paper proposes a dynamic self-organizing map-assisted evolutionary algorithm (D-SOMA), a manifold-aware framework that harmonizes knowledge-informed priors with unsupervised objective-space characterization. Specifically, a knowledge-informed guided resampling strategy is formulated to bridge stochastic initialization and targeted exploitation. By distilling spatial distribution priors from the decision-variable boundaries of early-stage elite solutions, it establishes a high-quality starting population biased towards promising regions. To capture the intrinsic geometry of the evolving population, a self-organizing map (SOM)-based adaptive subregion characterization strategy leverages the topological preservation of self-organizing maps to extract latent modeling parameters. This strategy adaptively determines subregion centers and influence radii, enabling a data-driven partitioning that respects the underlying manifold structure. Furthermore, a density-driven phase-responsive scale adjustment strategy is introduced. By synthesizing spatial density feedback and temporal evolutionary trajectories, it dynamically modulates the characterization granularity K, thereby maintaining a rigorous balance between geometric modeling fidelity and computational overhead. Extensive experiments on 50 benchmark problems from the DTLZ, WFG, MaF and RWMOP suites demonstrate that D-SOMA is statistically superior to seven state-of-the-art algorithms, exhibiting robust convergence and superior diversity across diverse problem landscapes.</p>
	]]></content:encoded>

	<dc:title>D-SOMA: A Dynamic Self-Organizing Map-Assisted Multi-Objective Evolutionary Algorithm with Adaptive Subregion Characterization</dc:title>
			<dc:creator>Xinru Zhang</dc:creator>
			<dc:creator>Tianyu Liu</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040207</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-26</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-26</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>207</prism:startingPage>
		<prism:doi>10.3390/computers15040207</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/207</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/206">

	<title>Computers, Vol. 15, Pages 206: Zero-Knowledge Federated Learning for Privacy-Preserving 5G Authentication</title>
	<link>https://www.mdpi.com/2073-431X/15/4/206</link>
	<description>Fifth-generation (5G) networks are facing critical security challenges in device authentication for massive Internet of Things deployments while preserving privacy. Traditional federated learning approaches depend on the computationally expensive homomorphic encryption to protect model gradients, resulting in substantial latency and communication overhead, leading to impractical energy consumption for resource-constrained 5G devices. This paper proposes Zero-Knowledge Federated Learning (ZK-FL), eliminating homomorphic encryption by enabling devices to prove model correctness without revealing gradients. Our approach integrates zero-knowledge proofs with FL updates, where each device generates a proof Proofi=ZK(Gradienti,Hashi), demonstrating computational integrity. The experimental results from 10,000 authentication attempts demonstrate ZK-FL achieves 78.4 ms average authentication latency versus 342.5 ms for homomorphic encryption-based FL (77% reduction), proof sizes of 0.128 kB versus 512 kB (99.97% reduction), and energy consumption of 284.5 mJ versus 6525 mJ (95% reduction), while maintaining 99.3% authentication success rate with formal privacy guarantees. These results demonstrate ZK-FL enables practical privacy-preserving authentication for massive-scale 5G deployment.</description>
	<pubDate>2026-03-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 206: Zero-Knowledge Federated Learning for Privacy-Preserving 5G Authentication</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/206">doi: 10.3390/computers15040206</a></p>
	<p>Authors:
		Ahmed Lateef Salih Al-Karawi
		Rafet Akdeniz
		</p>
	<p>Fifth-generation (5G) networks are facing critical security challenges in device authentication for massive Internet of Things deployments while preserving privacy. Traditional federated learning approaches depend on the computationally expensive homomorphic encryption to protect model gradients, resulting in substantial latency and communication overhead, leading to impractical energy consumption for resource-constrained 5G devices. This paper proposes Zero-Knowledge Federated Learning (ZK-FL), eliminating homomorphic encryption by enabling devices to prove model correctness without revealing gradients. Our approach integrates zero-knowledge proofs with FL updates, where each device generates a proof Proofi=ZK(Gradienti,Hashi), demonstrating computational integrity. The experimental results from 10,000 authentication attempts demonstrate ZK-FL achieves 78.4 ms average authentication latency versus 342.5 ms for homomorphic encryption-based FL (77% reduction), proof sizes of 0.128 kB versus 512 kB (99.97% reduction), and energy consumption of 284.5 mJ versus 6525 mJ (95% reduction), while maintaining 99.3% authentication success rate with formal privacy guarantees. These results demonstrate ZK-FL enables practical privacy-preserving authentication for massive-scale 5G deployment.</p>
	]]></content:encoded>

	<dc:title>Zero-Knowledge Federated Learning for Privacy-Preserving 5G Authentication</dc:title>
			<dc:creator>Ahmed Lateef Salih Al-Karawi</dc:creator>
			<dc:creator>Rafet Akdeniz</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040206</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-26</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-26</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>206</prism:startingPage>
		<prism:doi>10.3390/computers15040206</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/206</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/205">

	<title>Computers, Vol. 15, Pages 205: Course-Bound and Beyond-Course Interaction in Higher Education: Exploring the Latent Structure of a Perception Scale</title>
	<link>https://www.mdpi.com/2073-431X/15/4/205</link>
	<description>This study explores the latent structure of students&amp;amp;rsquo; perceptions about (a) teacher strategies to promote interaction (FOM) and (b) perceived interaction with actors and content (ACT) in undergraduate courses. Using survey responses from 158 students, we conducted exploratory factor analysis (MINRES, varimax) after assessing factorability (KMO, Bartlett) and factor retention (parallel analysis). Analyses were conducted in Python (Google Colab) using pandas, NumPy, SciPy, factor_analyzer, and Pingouin. an interpretable two-factor solution in which FOM items load primarily on one factor, while a subset of ACT items (Act5&amp;amp;ndash;Act8) loads more strongly on a second factor, although several ACT items and some FOM items also show non-trivial cross-loadings. We interpreted factor loadings &amp;amp;ge; 0.30 and report results based on the corrected item set. Findings are exploratory and suggest that perceived promotion of interaction aligns more closely with within-course interaction targets than with extra-course ones.</description>
	<pubDate>2026-03-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 205: Course-Bound and Beyond-Course Interaction in Higher Education: Exploring the Latent Structure of a Perception Scale</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/205">doi: 10.3390/computers15040205</a></p>
	<p>Authors:
		Andrés F. Mena-Guacas
		Eilien Tovio-Martínez
		Claudia Liliana Muñoz
		Eloy López-Meneses
		</p>
	<p>This study explores the latent structure of students&amp;amp;rsquo; perceptions about (a) teacher strategies to promote interaction (FOM) and (b) perceived interaction with actors and content (ACT) in undergraduate courses. Using survey responses from 158 students, we conducted exploratory factor analysis (MINRES, varimax) after assessing factorability (KMO, Bartlett) and factor retention (parallel analysis). Analyses were conducted in Python (Google Colab) using pandas, NumPy, SciPy, factor_analyzer, and Pingouin. an interpretable two-factor solution in which FOM items load primarily on one factor, while a subset of ACT items (Act5&amp;amp;ndash;Act8) loads more strongly on a second factor, although several ACT items and some FOM items also show non-trivial cross-loadings. We interpreted factor loadings &amp;amp;ge; 0.30 and report results based on the corrected item set. Findings are exploratory and suggest that perceived promotion of interaction aligns more closely with within-course interaction targets than with extra-course ones.</p>
	]]></content:encoded>

	<dc:title>Course-Bound and Beyond-Course Interaction in Higher Education: Exploring the Latent Structure of a Perception Scale</dc:title>
			<dc:creator>Andrés F. Mena-Guacas</dc:creator>
			<dc:creator>Eilien Tovio-Martínez</dc:creator>
			<dc:creator>Claudia Liliana Muñoz</dc:creator>
			<dc:creator>Eloy López-Meneses</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040205</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-26</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-26</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>205</prism:startingPage>
		<prism:doi>10.3390/computers15040205</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/205</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/204">

	<title>Computers, Vol. 15, Pages 204: AI-Powered Natural Language Processing Framework for Reverse-Engineering Examination Questions from Marking Schemes</title>
	<link>https://www.mdpi.com/2073-431X/15/4/204</link>
	<description>The generation of examination questions from examiner-provided marking schemes remains a critical yet underexplored challenge in automated assessment. This study proposes an AI-powered natural language processing (NLP) framework that reverse-engineers exam questions using transformer-based generative modeling, semantic reconstruction, and pedagogical constraints. Marking schemes are encoded with MPNet embeddings and decoded into candidate questions by a T5-small model, with a reconstruction module ensuring semantic fidelity and Bloom-level embeddings enforcing cognitive alignment. Evaluation on a dataset of 7021 marking schemes from Sol Plaatje University demonstrated strong performance, with BLEU = 0.71, ROUGE-L = 0.68, METEOR = 0.65, reconstruction fidelity = 0.84, and Bloom-level accuracy = 0.79. Comparative baselines, including an unconstrained T5 (BLEU = 0.62, RF = 0.68, Bloom = 0.56) and rule-based methods (BLEU = 0.48, RF = 0.51, Bloom = 0.43), confirmed the effectiveness of the proposed approach. The results indicate that the framework generates questions that are semantically accurate, structurally coherent, and pedagogically valid, offering a scalable solution for adaptive assessment, digital archiving, and automated exam construction.</description>
	<pubDate>2026-03-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 204: AI-Powered Natural Language Processing Framework for Reverse-Engineering Examination Questions from Marking Schemes</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/204">doi: 10.3390/computers15040204</a></p>
	<p>Authors:
		Julius Olaniyan
		Silas Formunyuy Verkijika
		Ibidun Christiana Obagbuwa
		</p>
	<p>The generation of examination questions from examiner-provided marking schemes remains a critical yet underexplored challenge in automated assessment. This study proposes an AI-powered natural language processing (NLP) framework that reverse-engineers exam questions using transformer-based generative modeling, semantic reconstruction, and pedagogical constraints. Marking schemes are encoded with MPNet embeddings and decoded into candidate questions by a T5-small model, with a reconstruction module ensuring semantic fidelity and Bloom-level embeddings enforcing cognitive alignment. Evaluation on a dataset of 7021 marking schemes from Sol Plaatje University demonstrated strong performance, with BLEU = 0.71, ROUGE-L = 0.68, METEOR = 0.65, reconstruction fidelity = 0.84, and Bloom-level accuracy = 0.79. Comparative baselines, including an unconstrained T5 (BLEU = 0.62, RF = 0.68, Bloom = 0.56) and rule-based methods (BLEU = 0.48, RF = 0.51, Bloom = 0.43), confirmed the effectiveness of the proposed approach. The results indicate that the framework generates questions that are semantically accurate, structurally coherent, and pedagogically valid, offering a scalable solution for adaptive assessment, digital archiving, and automated exam construction.</p>
	]]></content:encoded>

	<dc:title>AI-Powered Natural Language Processing Framework for Reverse-Engineering Examination Questions from Marking Schemes</dc:title>
			<dc:creator>Julius Olaniyan</dc:creator>
			<dc:creator>Silas Formunyuy Verkijika</dc:creator>
			<dc:creator>Ibidun Christiana Obagbuwa</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040204</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-26</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-26</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>204</prism:startingPage>
		<prism:doi>10.3390/computers15040204</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/204</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/203">

	<title>Computers, Vol. 15, Pages 203: GraphRAG-Vet: A Knowledge Graph-Augmented Large Language Model for Precision Bovine Disease Diagnosis</title>
	<link>https://www.mdpi.com/2073-431X/15/4/203</link>
	<description>When LLMs are applied in the veterinary field, they often produce serious hallucinations and logical restrictions, especially in the accurate diagnosis of bovine disease, where accuracy is crucial. To meet this challenge, this paper proposes GraphRAG-Vet, a Knowledge Graph Retrieval-Augmented Generation framework specifically designed for the dairy industry. First, we constructed a domain knowledge map comprising 2500 elements and 3000 relationships, covering high-frequency diseases in cows such as mastitis and ketosis. Second, the semantic-to-password parsing module is designed to retrieve disease symptom subgraphs from the Neo4j database accurately. Finally, the hard constraint injection mechanism is introduced to force LLMs to generate diagnoses strictly in accordance with the retrieved graph context, thereby implementing the &amp;amp;ldquo;refuse to answer&amp;amp;rdquo; function for foreign queries. The experimental results showed that GraphRAG-Vet achieved 100% accuracy in diagnosing core infectious diseases and had an almost-zero hallucination rate compared with baseline LLMs. This study provides a reliable, low-resource solution for automated veterinary consultation.</description>
	<pubDate>2026-03-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 203: GraphRAG-Vet: A Knowledge Graph-Augmented Large Language Model for Precision Bovine Disease Diagnosis</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/203">doi: 10.3390/computers15040203</a></p>
	<p>Authors:
		Licheng Qu
		Xuan Zhao
		Cunjin Zhang
		Guanghui Li
		</p>
	<p>When LLMs are applied in the veterinary field, they often produce serious hallucinations and logical restrictions, especially in the accurate diagnosis of bovine disease, where accuracy is crucial. To meet this challenge, this paper proposes GraphRAG-Vet, a Knowledge Graph Retrieval-Augmented Generation framework specifically designed for the dairy industry. First, we constructed a domain knowledge map comprising 2500 elements and 3000 relationships, covering high-frequency diseases in cows such as mastitis and ketosis. Second, the semantic-to-password parsing module is designed to retrieve disease symptom subgraphs from the Neo4j database accurately. Finally, the hard constraint injection mechanism is introduced to force LLMs to generate diagnoses strictly in accordance with the retrieved graph context, thereby implementing the &amp;amp;ldquo;refuse to answer&amp;amp;rdquo; function for foreign queries. The experimental results showed that GraphRAG-Vet achieved 100% accuracy in diagnosing core infectious diseases and had an almost-zero hallucination rate compared with baseline LLMs. This study provides a reliable, low-resource solution for automated veterinary consultation.</p>
	]]></content:encoded>

	<dc:title>GraphRAG-Vet: A Knowledge Graph-Augmented Large Language Model for Precision Bovine Disease Diagnosis</dc:title>
			<dc:creator>Licheng Qu</dc:creator>
			<dc:creator>Xuan Zhao</dc:creator>
			<dc:creator>Cunjin Zhang</dc:creator>
			<dc:creator>Guanghui Li</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040203</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-25</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-25</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>203</prism:startingPage>
		<prism:doi>10.3390/computers15040203</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/203</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/202">

	<title>Computers, Vol. 15, Pages 202: FedTheftDetect: Optimizing Anomaly Detection in Smart Grid Metering Systems Using Federated Learning</title>
	<link>https://www.mdpi.com/2073-431X/15/4/202</link>
	<description>The detection of anomaly energy consumption patterns in smart grid metering systems remains a critical issue. This is due to data imbalance, privacy constraints, and the dynamic nature of consumption patterns. To address these concerns, we present a privacy-preserving and scalable anomaly detection framework named as FedTheftDetect framework. The proposed framework integrates deep learning algorithms into a federated learning (FL) architecture through the incorporation of advanced ensemble classifiers to detect behavioral anomalies in daily consumption patterns. A real-world smart meter dataset with significant class imbalance is used to assess the suggested framework. The dataset had significant preprocessing to identify consumption-related anomalies in behavior. Experimental results demonstrate that the suggested framework outperforms the competitive centralized and distributed models. It achieves significant improvements in Accuracy, Precision, Recall, and F1-score, all of which are close to 0.95, which indicates a great predictive capability and reliability.</description>
	<pubDate>2026-03-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 202: FedTheftDetect: Optimizing Anomaly Detection in Smart Grid Metering Systems Using Federated Learning</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/202">doi: 10.3390/computers15040202</a></p>
	<p>Authors:
		Samar M. Nour
		Ahmed Rady
		Mohammed S. Hussien
		Sameh A. Salem
		Samar A. Said
		</p>
	<p>The detection of anomaly energy consumption patterns in smart grid metering systems remains a critical issue. This is due to data imbalance, privacy constraints, and the dynamic nature of consumption patterns. To address these concerns, we present a privacy-preserving and scalable anomaly detection framework named as FedTheftDetect framework. The proposed framework integrates deep learning algorithms into a federated learning (FL) architecture through the incorporation of advanced ensemble classifiers to detect behavioral anomalies in daily consumption patterns. A real-world smart meter dataset with significant class imbalance is used to assess the suggested framework. The dataset had significant preprocessing to identify consumption-related anomalies in behavior. Experimental results demonstrate that the suggested framework outperforms the competitive centralized and distributed models. It achieves significant improvements in Accuracy, Precision, Recall, and F1-score, all of which are close to 0.95, which indicates a great predictive capability and reliability.</p>
	]]></content:encoded>

	<dc:title>FedTheftDetect: Optimizing Anomaly Detection in Smart Grid Metering Systems Using Federated Learning</dc:title>
			<dc:creator>Samar M. Nour</dc:creator>
			<dc:creator>Ahmed Rady</dc:creator>
			<dc:creator>Mohammed S. Hussien</dc:creator>
			<dc:creator>Sameh A. Salem</dc:creator>
			<dc:creator>Samar A. Said</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040202</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-25</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-25</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>202</prism:startingPage>
		<prism:doi>10.3390/computers15040202</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/202</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/201">

	<title>Computers, Vol. 15, Pages 201: A Unified Multi-Dimensional Cost Analysis of Speculative Parallel Conflict Detection and Diagnosis</title>
	<link>https://www.mdpi.com/2073-431X/15/4/201</link>
	<description>Speculative parallelization has been proposed to accelerate computationally intensive reasoning tasks in constraint-based systems, particularly minimal conflict detection and preferred diagnosis computation. Parallel variants of QUICKXPLAIN enable concurrent conflict detection, while parallel FASTDIAG supports speculative diagnosis computation. Existing evaluations of these approaches primarily emphasize runtime reduction and speedup metrics. However, runtime alone does not fully characterize computational efficiency in multi-core environments, where synchronization overhead and speculative execution costs may significantly influence performance. This paper introduces a unified multi-dimensional cost model for analyzing speculative parallel conflict detection and diagnosis algorithms. Rather than proposing new algorithms, we reinterpret previously reported experimental results under a formal cost perspective integrating runtime, speedup, efficiency, parallel overhead, and conflict-normalized cost metrics. Our analysis reveals that speculative parallelization provides substantial benefits in high-cardinality conflict scenarios and complex diagnosis tasks, but scalability is limited by coordination overhead and diminishing efficiency as the number of parallel workers increases. We further identify parallel breakdown points beyond which additional workers degrade performance. The proposed framework offers a systematic basis for cost-aware evaluation of parallel reasoning strategies and provides practical insights into when parallelization is beneficial for conflict detection and diagnosis tasks in large-scale constraint systems.</description>
	<pubDate>2026-03-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 201: A Unified Multi-Dimensional Cost Analysis of Speculative Parallel Conflict Detection and Diagnosis</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/201">doi: 10.3390/computers15040201</a></p>
	<p>Authors:
		Mariuxi Vinueza-Morales
		Miguel Tupac-Yupanqui
		Nicolás Márquez
		Cristian Vidal-Silva
		</p>
	<p>Speculative parallelization has been proposed to accelerate computationally intensive reasoning tasks in constraint-based systems, particularly minimal conflict detection and preferred diagnosis computation. Parallel variants of QUICKXPLAIN enable concurrent conflict detection, while parallel FASTDIAG supports speculative diagnosis computation. Existing evaluations of these approaches primarily emphasize runtime reduction and speedup metrics. However, runtime alone does not fully characterize computational efficiency in multi-core environments, where synchronization overhead and speculative execution costs may significantly influence performance. This paper introduces a unified multi-dimensional cost model for analyzing speculative parallel conflict detection and diagnosis algorithms. Rather than proposing new algorithms, we reinterpret previously reported experimental results under a formal cost perspective integrating runtime, speedup, efficiency, parallel overhead, and conflict-normalized cost metrics. Our analysis reveals that speculative parallelization provides substantial benefits in high-cardinality conflict scenarios and complex diagnosis tasks, but scalability is limited by coordination overhead and diminishing efficiency as the number of parallel workers increases. We further identify parallel breakdown points beyond which additional workers degrade performance. The proposed framework offers a systematic basis for cost-aware evaluation of parallel reasoning strategies and provides practical insights into when parallelization is beneficial for conflict detection and diagnosis tasks in large-scale constraint systems.</p>
	]]></content:encoded>

	<dc:title>A Unified Multi-Dimensional Cost Analysis of Speculative Parallel Conflict Detection and Diagnosis</dc:title>
			<dc:creator>Mariuxi Vinueza-Morales</dc:creator>
			<dc:creator>Miguel Tupac-Yupanqui</dc:creator>
			<dc:creator>Nicolás Márquez</dc:creator>
			<dc:creator>Cristian Vidal-Silva</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040201</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-25</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-25</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>201</prism:startingPage>
		<prism:doi>10.3390/computers15040201</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/201</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/200">

	<title>Computers, Vol. 15, Pages 200: Performance Comparison of Python-Based Complex Event Processing Engines for IoT Intrusion Detection: Faust Versus Streamz</title>
	<link>https://www.mdpi.com/2073-431X/15/3/200</link>
	<description>The proliferation of Internet of Things (IoT) devices has intensified the need for efficient real-time anomaly and intrusion detection, making the selection of an appropriate Complex Event Processing (CEP) engine a critical architectural decision for security-aware data pipelines. Python-based CEP frameworks offer compelling advantages through the seamless integration with data science and machine learning ecosystems; however, rigorous comparative evaluations of such frameworks under realistic IoT security workloads remain absent from the literature. This study presents the first systematic comparative evaluation of Faust and Streamz&amp;amp;mdash;two Python-native CEP engines representing fundamentally different architectural philosophies&amp;amp;mdash;specifically in the context of IoT network intrusion detection. Faust was selected for its actor-based stateful processing model with native Kafka integration and distributed table support, while Streamz was selected for its reactive, lightweight pipeline design targeting high-throughput stateless processing, making them representative of the two dominant paradigms in Python stream processing. Although both engines target different application niches, their performance characteristics under realistic CEP workloads have never been rigorously compared, leaving practitioners without empirical guidance. The primary evaluation employs an IoT network intrusion dataset comprising 583,485 events from 83 heterogeneous devices. To assess whether the observed performance characteristics are specific to this single dataset or generalize across different workload profiles, a secondary IoT-adjacent benchmark is included: the PaySim financial transaction dataset (6.4 million records), selected because its event schema, fraud-pattern temporal structure, and volume differ substantially from the intrusion dataset, providing a stress test for cross-workload robustness rather than a claim of domain equivalence. We acknowledge the reviewer&amp;amp;rsquo;s valid point that a second IoT-specific intrusion dataset (such as TON_IoT or Bot-IoT) would constitute a more directly comparable validation; this is identified as a priority for future work. The load levels used in scalability experiments (up to 5000 events per second) intentionally exceed the dataset&amp;amp;rsquo;s natural rate to stress-test each engine&amp;amp;rsquo;s architectural ceiling and identify saturation thresholds relevant to large-scale or multi-sensor IoT deployments. We conducted controlled experiments with comprehensive statistical analysis. Our results demonstrate that Streamz achieves superior throughput at 4450 events per second with 89% efficiency and minimal resource consumption (40 MB memory, 12 ms median latency), while Faust provides robust intrusion pattern detection with 93&amp;amp;ndash;98% accuracy and stable, predictable resource utilization (1.4% CPU standard deviation). A multi-framework comparison including Apache Kafka Streams and offline scikit-learn baselines confirms that Faust achieves detection quality competitive with JVM-based alternatives (Faust: 96.2%; Kafka Streams: 96.8%; absolute difference of 0.6 percentage points, not statistically significant at p=0.318) while retaining the Python ecosystem advantages. Statistical analysis confirms significant performance differences across all metrics (p&amp;amp;lt;0.001, Cohen&amp;amp;rsquo;s d&amp;amp;gt;0.8). Critical scalability thresholds are identified: Streamz maintains efficiency above 95% up to 3500 events per second, while Faust degrades beyond 2500 events per second. These findings provide IoT security engineers and system architects with actionable, empirically grounded guidance for CEP engine selection, establish reproducible benchmarking methodology applicable to future Python-based stream processing evaluations, and advance theoretical understanding of the accuracy&amp;amp;ndash;throughput trade-off in stateful versus stateless Python CEP architectures.</description>
	<pubDate>2026-03-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 200: Performance Comparison of Python-Based Complex Event Processing Engines for IoT Intrusion Detection: Faust Versus Streamz</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/200">doi: 10.3390/computers15030200</a></p>
	<p>Authors:
		Maryam Abbasi
		Filipe Cardoso
		Paulo Váz
		José Silva
		Filipe Sá
		Pedro Martins
		</p>
	<p>The proliferation of Internet of Things (IoT) devices has intensified the need for efficient real-time anomaly and intrusion detection, making the selection of an appropriate Complex Event Processing (CEP) engine a critical architectural decision for security-aware data pipelines. Python-based CEP frameworks offer compelling advantages through the seamless integration with data science and machine learning ecosystems; however, rigorous comparative evaluations of such frameworks under realistic IoT security workloads remain absent from the literature. This study presents the first systematic comparative evaluation of Faust and Streamz&amp;amp;mdash;two Python-native CEP engines representing fundamentally different architectural philosophies&amp;amp;mdash;specifically in the context of IoT network intrusion detection. Faust was selected for its actor-based stateful processing model with native Kafka integration and distributed table support, while Streamz was selected for its reactive, lightweight pipeline design targeting high-throughput stateless processing, making them representative of the two dominant paradigms in Python stream processing. Although both engines target different application niches, their performance characteristics under realistic CEP workloads have never been rigorously compared, leaving practitioners without empirical guidance. The primary evaluation employs an IoT network intrusion dataset comprising 583,485 events from 83 heterogeneous devices. To assess whether the observed performance characteristics are specific to this single dataset or generalize across different workload profiles, a secondary IoT-adjacent benchmark is included: the PaySim financial transaction dataset (6.4 million records), selected because its event schema, fraud-pattern temporal structure, and volume differ substantially from the intrusion dataset, providing a stress test for cross-workload robustness rather than a claim of domain equivalence. We acknowledge the reviewer&amp;amp;rsquo;s valid point that a second IoT-specific intrusion dataset (such as TON_IoT or Bot-IoT) would constitute a more directly comparable validation; this is identified as a priority for future work. The load levels used in scalability experiments (up to 5000 events per second) intentionally exceed the dataset&amp;amp;rsquo;s natural rate to stress-test each engine&amp;amp;rsquo;s architectural ceiling and identify saturation thresholds relevant to large-scale or multi-sensor IoT deployments. We conducted controlled experiments with comprehensive statistical analysis. Our results demonstrate that Streamz achieves superior throughput at 4450 events per second with 89% efficiency and minimal resource consumption (40 MB memory, 12 ms median latency), while Faust provides robust intrusion pattern detection with 93&amp;amp;ndash;98% accuracy and stable, predictable resource utilization (1.4% CPU standard deviation). A multi-framework comparison including Apache Kafka Streams and offline scikit-learn baselines confirms that Faust achieves detection quality competitive with JVM-based alternatives (Faust: 96.2%; Kafka Streams: 96.8%; absolute difference of 0.6 percentage points, not statistically significant at p=0.318) while retaining the Python ecosystem advantages. Statistical analysis confirms significant performance differences across all metrics (p&amp;amp;lt;0.001, Cohen&amp;amp;rsquo;s d&amp;amp;gt;0.8). Critical scalability thresholds are identified: Streamz maintains efficiency above 95% up to 3500 events per second, while Faust degrades beyond 2500 events per second. These findings provide IoT security engineers and system architects with actionable, empirically grounded guidance for CEP engine selection, establish reproducible benchmarking methodology applicable to future Python-based stream processing evaluations, and advance theoretical understanding of the accuracy&amp;amp;ndash;throughput trade-off in stateful versus stateless Python CEP architectures.</p>
	]]></content:encoded>

	<dc:title>Performance Comparison of Python-Based Complex Event Processing Engines for IoT Intrusion Detection: Faust Versus Streamz</dc:title>
			<dc:creator>Maryam Abbasi</dc:creator>
			<dc:creator>Filipe Cardoso</dc:creator>
			<dc:creator>Paulo Váz</dc:creator>
			<dc:creator>José Silva</dc:creator>
			<dc:creator>Filipe Sá</dc:creator>
			<dc:creator>Pedro Martins</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030200</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-23</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-23</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>200</prism:startingPage>
		<prism:doi>10.3390/computers15030200</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/200</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/199">

	<title>Computers, Vol. 15, Pages 199: RepuTrade: A Reputation-Based Deposit Consensus Mechanism for P2P Energy Trading in Smart Environments</title>
	<link>https://www.mdpi.com/2073-431X/15/3/199</link>
	<description>Current peer-to-peer (P2P) energy trading systems face important challenges in decentralised trading environments, particularly in managing participant trustworthiness, preventing dishonest behaviour, and mitigating transaction defaults. These limitations reduce transaction reliability and weaken trust among participants in community-scale energy trading markets. Although P2P energy trading enables communities to exchange locally generated renewable energy in smart environments, existing platforms often lack effective mechanisms to regulate participant behaviour and support reliable transactions. This paper proposes RepuTrade, a blockchain-based P2P energy trading platform tailored for community-scale microgrids. The proposed framework integrates a reputation-based consensus mechanism and a dynamic collateral management scheme that is directly linked to participant reputations such that trading reliability can be strengthened through behavioural incentives. In addition, a reputation-driven matching algorithm preferentially pairs highly reputable participants to improve market stability and trust. Simulation-based evaluation, involving 200 users across 8 trading rounds, shows that the RepuTrade framework consistently achieves higher trade success rates (92&amp;amp;ndash;99% compared to 83&amp;amp;ndash;95% in the baseline) and reduces defaults by more than 40% (27&amp;amp;ndash;44 vs. 55&amp;amp;ndash;72 per run). The results further reveal a strong negative correlation between user reputation and default probability, indicating that higher reputation is associated with a lower likelihood of dishonest behaviour. Overall, under the simulated settings considered in this study, the proposed framework improves transaction reliability and execution efficiency by reducing failed trades and lowering consensus validation latency. These findings contribute to the design of trust-aware decentralised energy trading mechanisms and provide simulation-based insights for developing more reliable and transparent community-scale renewable energy markets.</description>
	<pubDate>2026-03-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 199: RepuTrade: A Reputation-Based Deposit Consensus Mechanism for P2P Energy Trading in Smart Environments</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/199">doi: 10.3390/computers15030199</a></p>
	<p>Authors:
		Xingyu Yang
		Ben Chen
		Hui Cui
		</p>
	<p>Current peer-to-peer (P2P) energy trading systems face important challenges in decentralised trading environments, particularly in managing participant trustworthiness, preventing dishonest behaviour, and mitigating transaction defaults. These limitations reduce transaction reliability and weaken trust among participants in community-scale energy trading markets. Although P2P energy trading enables communities to exchange locally generated renewable energy in smart environments, existing platforms often lack effective mechanisms to regulate participant behaviour and support reliable transactions. This paper proposes RepuTrade, a blockchain-based P2P energy trading platform tailored for community-scale microgrids. The proposed framework integrates a reputation-based consensus mechanism and a dynamic collateral management scheme that is directly linked to participant reputations such that trading reliability can be strengthened through behavioural incentives. In addition, a reputation-driven matching algorithm preferentially pairs highly reputable participants to improve market stability and trust. Simulation-based evaluation, involving 200 users across 8 trading rounds, shows that the RepuTrade framework consistently achieves higher trade success rates (92&amp;amp;ndash;99% compared to 83&amp;amp;ndash;95% in the baseline) and reduces defaults by more than 40% (27&amp;amp;ndash;44 vs. 55&amp;amp;ndash;72 per run). The results further reveal a strong negative correlation between user reputation and default probability, indicating that higher reputation is associated with a lower likelihood of dishonest behaviour. Overall, under the simulated settings considered in this study, the proposed framework improves transaction reliability and execution efficiency by reducing failed trades and lowering consensus validation latency. These findings contribute to the design of trust-aware decentralised energy trading mechanisms and provide simulation-based insights for developing more reliable and transparent community-scale renewable energy markets.</p>
	]]></content:encoded>

	<dc:title>RepuTrade: A Reputation-Based Deposit Consensus Mechanism for P2P Energy Trading in Smart Environments</dc:title>
			<dc:creator>Xingyu Yang</dc:creator>
			<dc:creator>Ben Chen</dc:creator>
			<dc:creator>Hui Cui</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030199</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-23</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-23</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>199</prism:startingPage>
		<prism:doi>10.3390/computers15030199</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/199</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/198">

	<title>Computers, Vol. 15, Pages 198: A Blockchain-Enabled Smart Contract Architecture for Enhancing Transparency, Traceability, and Trust in Global Supply Chain Management</title>
	<link>https://www.mdpi.com/2073-431X/15/3/198</link>
	<description>There is diminished transparency, fragmented information exchange, and lack of trust among geographically dispersed stakeholders, which increasingly challenge global supply chains. The classic centralized systems of supply chain management are not always capable of being able to offer real-time traceability and data integrity which is dependable and effective in contract enforcement. The proposed study is a blockchain-based smart contract design that is focused on ensuring increased transparency, traceability and trust in global supply chain management. The suggested framework will combine automated smart contracts, cryptographic provenance tracking, permissioned blockchain consensus, and a decentralized trust score evaluation mechanism to overcome some of the major operation and governance challenges. A simulated assessment with a multi-tier global supply chain setting of 15 blockchain nodes and 12,000 transactions was performed through experimentation. The findings show that the proposed system attained an average transaction delay of 210 ms, which is very low compared to centralized systems (520 ms), with throughput being raised to 120 transactions per minute. End-to-end traceability performance also improved significantly, with a reduction in trace-back time to 8 s compared with 95s this represents a 100% tampering detection rate. The consensus mechanism ensured that the ledger integrity failed only at a rate of less than 1.1%, even when more than 30% of nodes were faulty. Risk-wise, the trust evaluation algorithm dynamically enhanced reliable supplier scores up to 12%, which facilitated the selection of reliable partners. On the whole, the results prove that smart contracts based on blockchains can drastically enhance the efficiency of operations, data integrity, and confidence in global supply chains, with the platform capable of providing a resilient and scalable backbone for the future supply chain management model.</description>
	<pubDate>2026-03-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 198: A Blockchain-Enabled Smart Contract Architecture for Enhancing Transparency, Traceability, and Trust in Global Supply Chain Management</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/198">doi: 10.3390/computers15030198</a></p>
	<p>Authors:
		Naim Ayadi
		Syed Arshad Hussain
		Arif Deen
		Asadullah Ullah
		Dil Nawaz Hakro
		Muhammad Babar
		Mushtaque Ali Jariko
		Alya Al Farsi
		Akhtar Hussain
		</p>
	<p>There is diminished transparency, fragmented information exchange, and lack of trust among geographically dispersed stakeholders, which increasingly challenge global supply chains. The classic centralized systems of supply chain management are not always capable of being able to offer real-time traceability and data integrity which is dependable and effective in contract enforcement. The proposed study is a blockchain-based smart contract design that is focused on ensuring increased transparency, traceability and trust in global supply chain management. The suggested framework will combine automated smart contracts, cryptographic provenance tracking, permissioned blockchain consensus, and a decentralized trust score evaluation mechanism to overcome some of the major operation and governance challenges. A simulated assessment with a multi-tier global supply chain setting of 15 blockchain nodes and 12,000 transactions was performed through experimentation. The findings show that the proposed system attained an average transaction delay of 210 ms, which is very low compared to centralized systems (520 ms), with throughput being raised to 120 transactions per minute. End-to-end traceability performance also improved significantly, with a reduction in trace-back time to 8 s compared with 95s this represents a 100% tampering detection rate. The consensus mechanism ensured that the ledger integrity failed only at a rate of less than 1.1%, even when more than 30% of nodes were faulty. Risk-wise, the trust evaluation algorithm dynamically enhanced reliable supplier scores up to 12%, which facilitated the selection of reliable partners. On the whole, the results prove that smart contracts based on blockchains can drastically enhance the efficiency of operations, data integrity, and confidence in global supply chains, with the platform capable of providing a resilient and scalable backbone for the future supply chain management model.</p>
	]]></content:encoded>

	<dc:title>A Blockchain-Enabled Smart Contract Architecture for Enhancing Transparency, Traceability, and Trust in Global Supply Chain Management</dc:title>
			<dc:creator>Naim Ayadi</dc:creator>
			<dc:creator>Syed Arshad Hussain</dc:creator>
			<dc:creator>Arif Deen</dc:creator>
			<dc:creator>Asadullah Ullah</dc:creator>
			<dc:creator>Dil Nawaz Hakro</dc:creator>
			<dc:creator>Muhammad Babar</dc:creator>
			<dc:creator>Mushtaque Ali Jariko</dc:creator>
			<dc:creator>Alya Al Farsi</dc:creator>
			<dc:creator>Akhtar Hussain</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030198</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-22</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-22</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>198</prism:startingPage>
		<prism:doi>10.3390/computers15030198</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/198</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/197">

	<title>Computers, Vol. 15, Pages 197: Factually Consistent Prompting with LLMs for Cross-Lingual Dialogue Summarization</title>
	<link>https://www.mdpi.com/2073-431X/15/3/197</link>
	<description>Recent breakthroughs in large language models have made it feasible to effectively summarize cross-lingual dialogue information, proving essential for the global communication context. However, existing methodologies encounter difficulties in maintaining factual consistency across multiple dialogue exchanges and lack clear explanations of the summarization process. This paper presents a novel factually consistent prompting technology with large language models to address these challenges in cross-lingual dialogue summarization. First, we propose a factual replacement mechanism to enhance information analysis by incorporating noise information into summarization candidates. We adopt a self-guidance framework to enforce factual consistency, enhancing information flow tracking in cross-lingual hybrid dialogue scenarios with the assistance of GPT-based models. Furthermore, we introduce a view-aware chain-of-thought-driven architecture to improve the interpretability and transparency of the cross-lingual dialogue summarization process. Comprehensive experimental evaluations on cross-lingual summarization tasks, spanning English, French, Spanish, Russian, Chinese, and Arabic, and hybrid cross-lingual tasks substantiate that the proposed model achieves superior performance relative to state-of-the-art baselines.</description>
	<pubDate>2026-03-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 197: Factually Consistent Prompting with LLMs for Cross-Lingual Dialogue Summarization</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/197">doi: 10.3390/computers15030197</a></p>
	<p>Authors:
		Zhongtian Bao
		Wenjian Ding
		Yao Zhang
		Jun Wang
		Zhe Sun
		Andrzej Cichocki
		Zhenglu Yang
		</p>
	<p>Recent breakthroughs in large language models have made it feasible to effectively summarize cross-lingual dialogue information, proving essential for the global communication context. However, existing methodologies encounter difficulties in maintaining factual consistency across multiple dialogue exchanges and lack clear explanations of the summarization process. This paper presents a novel factually consistent prompting technology with large language models to address these challenges in cross-lingual dialogue summarization. First, we propose a factual replacement mechanism to enhance information analysis by incorporating noise information into summarization candidates. We adopt a self-guidance framework to enforce factual consistency, enhancing information flow tracking in cross-lingual hybrid dialogue scenarios with the assistance of GPT-based models. Furthermore, we introduce a view-aware chain-of-thought-driven architecture to improve the interpretability and transparency of the cross-lingual dialogue summarization process. Comprehensive experimental evaluations on cross-lingual summarization tasks, spanning English, French, Spanish, Russian, Chinese, and Arabic, and hybrid cross-lingual tasks substantiate that the proposed model achieves superior performance relative to state-of-the-art baselines.</p>
	]]></content:encoded>

	<dc:title>Factually Consistent Prompting with LLMs for Cross-Lingual Dialogue Summarization</dc:title>
			<dc:creator>Zhongtian Bao</dc:creator>
			<dc:creator>Wenjian Ding</dc:creator>
			<dc:creator>Yao Zhang</dc:creator>
			<dc:creator>Jun Wang</dc:creator>
			<dc:creator>Zhe Sun</dc:creator>
			<dc:creator>Andrzej Cichocki</dc:creator>
			<dc:creator>Zhenglu Yang</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030197</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-21</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-21</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>197</prism:startingPage>
		<prism:doi>10.3390/computers15030197</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/197</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/196">

	<title>Computers, Vol. 15, Pages 196: Black-White Bakery Algorithm Made RW-Safe</title>
	<link>https://www.mdpi.com/2073-431X/15/3/196</link>
	<description>Lamport&amp;amp;rsquo;s Bakery algorithm is a well-known, simple, and elegant solution to the mutual exclusion problem for N &amp;amp;ge; 2 concurrent/parallel processes. However, the algorithm generates an unbounded number of tickets, even when only 2 processes are arbitrated. Various proposals in the literature were introduced to bound the number of tickets. Anyway, almost all these proposals prove to be correct when operated with atomic registers (AR) only. They become incorrect when working with non-atomic registers (NAR), as may occur in embedded hardware platforms with multi-port memory and relaxed memory-bus control, such as microcontrollers, FPGA-based systems, or specialized network devices. A notable solution with bounded tickets is Taubenfeld&amp;amp;rsquo;s Black-White Bakery (BWB) algorithm. BWB relies on tickets which are couples &amp;amp;lt;number,mycolor&amp;amp;gt; where mycolor can be Black or White and number ranges in [0, N]. BWB, too, was confirmed, through informal reasoning, it is correct with AR only. The original contribution of this paper is a reformulation of BWB, which is formally modelled and exhaustively verified by timed automata in the Uppaal toolbox. In the reformulation, a ticket&amp;amp;rsquo;s couple is coded as a single integer, and decoded and processed according to the BWB logic. The reformulated BWB remains fully correct with AR regardless of the number N of processes, but it is also correct with NAR for N = 2 processes. As a further original contribution, the paper demonstrates that the BWB version for 2 processes can be embedded in a general, state-of-the-art solution, based on a binary tournament tree (TT), to become AR/NAR correct, that is, RW-safe, for any number of processes. However, due to model complexity, the correctness of the TT versions of BWB, that is, based on atomic and non-atomic registers, is mainly studied by stochastic simulation of the formal model reduced to actors in Java.</description>
	<pubDate>2026-03-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 196: Black-White Bakery Algorithm Made RW-Safe</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/196">doi: 10.3390/computers15030196</a></p>
	<p>Authors:
		Libero Nigro
		Franco Cicirelli
		</p>
	<p>Lamport&amp;amp;rsquo;s Bakery algorithm is a well-known, simple, and elegant solution to the mutual exclusion problem for N &amp;amp;ge; 2 concurrent/parallel processes. However, the algorithm generates an unbounded number of tickets, even when only 2 processes are arbitrated. Various proposals in the literature were introduced to bound the number of tickets. Anyway, almost all these proposals prove to be correct when operated with atomic registers (AR) only. They become incorrect when working with non-atomic registers (NAR), as may occur in embedded hardware platforms with multi-port memory and relaxed memory-bus control, such as microcontrollers, FPGA-based systems, or specialized network devices. A notable solution with bounded tickets is Taubenfeld&amp;amp;rsquo;s Black-White Bakery (BWB) algorithm. BWB relies on tickets which are couples &amp;amp;lt;number,mycolor&amp;amp;gt; where mycolor can be Black or White and number ranges in [0, N]. BWB, too, was confirmed, through informal reasoning, it is correct with AR only. The original contribution of this paper is a reformulation of BWB, which is formally modelled and exhaustively verified by timed automata in the Uppaal toolbox. In the reformulation, a ticket&amp;amp;rsquo;s couple is coded as a single integer, and decoded and processed according to the BWB logic. The reformulated BWB remains fully correct with AR regardless of the number N of processes, but it is also correct with NAR for N = 2 processes. As a further original contribution, the paper demonstrates that the BWB version for 2 processes can be embedded in a general, state-of-the-art solution, based on a binary tournament tree (TT), to become AR/NAR correct, that is, RW-safe, for any number of processes. However, due to model complexity, the correctness of the TT versions of BWB, that is, based on atomic and non-atomic registers, is mainly studied by stochastic simulation of the formal model reduced to actors in Java.</p>
	]]></content:encoded>

	<dc:title>Black-White Bakery Algorithm Made RW-Safe</dc:title>
			<dc:creator>Libero Nigro</dc:creator>
			<dc:creator>Franco Cicirelli</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030196</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-20</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-20</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>196</prism:startingPage>
		<prism:doi>10.3390/computers15030196</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/196</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/195">

	<title>Computers, Vol. 15, Pages 195: An Ontology-Driven Framework for Personalised Context-Aware Running Event Recommendations</title>
	<link>https://www.mdpi.com/2073-431X/15/3/195</link>
	<description>Sport tourism has experienced significant growth within the tourism industry, driven by the increasing demand of special interest tourists to watch or participate in sports events with local sightseeing. However, the massive volume of available information related to sport events may cause challenges to existing recommendation systems, which struggle to provide tailored suggestions for these niche tourists. Therefore, this paper proposes a novel, context-aware recommender framework that utilises the ontology-driven approach with unsupervised machine learning techniques to deliver personalised event matches for running tourists. Using an ontology-driven approach, the framework establishes a knowledge base of user profiles and running events. Furthermore, K-modes clustering was also applied to categorise participants based on their event participation characteristics, while the Apriori algorithm was used to uncover hidden relationships influencing event selection. To ensure the statistical integrity of the discovered association rule, permutation testing was implemented to mitigate bias inherent in small sample sizes. By integrating refined association rules with Jena rules, the resulting prototype offers adaptive, personalised, and contextually relevant running event recommendations that evolve with shifting user preferences and trends. The effectiveness of the prototype is confirmed through rigorous validation and evaluation across various sport tourism scenarios.</description>
	<pubDate>2026-03-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 195: An Ontology-Driven Framework for Personalised Context-Aware Running Event Recommendations</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/195">doi: 10.3390/computers15030195</a></p>
	<p>Authors:
		Adisak Intana
		Kuljaree Tantayakul
		Wasupon Tanthavanich
		Wachiravit Chumchuay
		</p>
	<p>Sport tourism has experienced significant growth within the tourism industry, driven by the increasing demand of special interest tourists to watch or participate in sports events with local sightseeing. However, the massive volume of available information related to sport events may cause challenges to existing recommendation systems, which struggle to provide tailored suggestions for these niche tourists. Therefore, this paper proposes a novel, context-aware recommender framework that utilises the ontology-driven approach with unsupervised machine learning techniques to deliver personalised event matches for running tourists. Using an ontology-driven approach, the framework establishes a knowledge base of user profiles and running events. Furthermore, K-modes clustering was also applied to categorise participants based on their event participation characteristics, while the Apriori algorithm was used to uncover hidden relationships influencing event selection. To ensure the statistical integrity of the discovered association rule, permutation testing was implemented to mitigate bias inherent in small sample sizes. By integrating refined association rules with Jena rules, the resulting prototype offers adaptive, personalised, and contextually relevant running event recommendations that evolve with shifting user preferences and trends. The effectiveness of the prototype is confirmed through rigorous validation and evaluation across various sport tourism scenarios.</p>
	]]></content:encoded>

	<dc:title>An Ontology-Driven Framework for Personalised Context-Aware Running Event Recommendations</dc:title>
			<dc:creator>Adisak Intana</dc:creator>
			<dc:creator>Kuljaree Tantayakul</dc:creator>
			<dc:creator>Wasupon Tanthavanich</dc:creator>
			<dc:creator>Wachiravit Chumchuay</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030195</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-19</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-19</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>195</prism:startingPage>
		<prism:doi>10.3390/computers15030195</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/195</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/194">

	<title>Computers, Vol. 15, Pages 194: Impact of Neural Network Initialisation Seed and Architecture on Accuracy, Generalisation and Generative Consistency in Data-Driven Internal Combustion Engine Modelling</title>
	<link>https://www.mdpi.com/2073-431X/15/3/194</link>
	<description>Artificial neural networks (ANNs) are widely used to approximate nonlinear mappings, yet their ability to capture thermodynamic behaviour in dynamic physical systems remains insufficiently characterised. This study investigates how representational capacity influences surrogate modelling accuracy for a crank-angle-resolved internal combustion engine (ICE) simulation with a maximum dynamic state dimension of six. Two feedforward ANN configurations are evaluated: a low-capacity 5&amp;amp;ndash;5 architecture containing 84 trainable parameters and a high-capacity 25&amp;amp;ndash;25&amp;amp;ndash;25 architecture containing 1554 parameters (18.5&amp;amp;times; larger). Both networks approximate the nonlinear mapping from five embedded operating parameters to four peak thermodynamic outputs (maximum pressure, pressure phasing, maximum temperature, and temperature phasing). Evaluation across 53,178 operating points demonstrates that the high-capacity configuration reduces root mean squared error by factors of 30&amp;amp;ndash;50&amp;amp;times; relative to the low-capacity network, decreasing peak temperature error from 17.68 K to 0.36 K and peak pressure error from 0.116 MPa to 0.0025 MPa. Although both models achieve coefficients of determination exceeding 0.99, the low-capacity network exhibits heavy-tailed residual distributions and regime-dependent error amplification, whereas the high-capacity model reduces both central dispersion and extreme-case error. These results demonstrate that high correlation alone does not guarantee engineering reliability in nonlinear thermodynamic systems. Distribution-level analysis, including percentile and extreme-case characterisation, is required to evaluate engineering robustness. The findings provide a quantitative framework linking ANN capacity, nonlinear dynamic system representation, and predictive robustness.</description>
	<pubDate>2026-03-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 194: Impact of Neural Network Initialisation Seed and Architecture on Accuracy, Generalisation and Generative Consistency in Data-Driven Internal Combustion Engine Modelling</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/194">doi: 10.3390/computers15030194</a></p>
	<p>Authors:
		Arturas Gulevskis
		Redha Benhadj-Djilali
		Konstantin Volkov
		</p>
	<p>Artificial neural networks (ANNs) are widely used to approximate nonlinear mappings, yet their ability to capture thermodynamic behaviour in dynamic physical systems remains insufficiently characterised. This study investigates how representational capacity influences surrogate modelling accuracy for a crank-angle-resolved internal combustion engine (ICE) simulation with a maximum dynamic state dimension of six. Two feedforward ANN configurations are evaluated: a low-capacity 5&amp;amp;ndash;5 architecture containing 84 trainable parameters and a high-capacity 25&amp;amp;ndash;25&amp;amp;ndash;25 architecture containing 1554 parameters (18.5&amp;amp;times; larger). Both networks approximate the nonlinear mapping from five embedded operating parameters to four peak thermodynamic outputs (maximum pressure, pressure phasing, maximum temperature, and temperature phasing). Evaluation across 53,178 operating points demonstrates that the high-capacity configuration reduces root mean squared error by factors of 30&amp;amp;ndash;50&amp;amp;times; relative to the low-capacity network, decreasing peak temperature error from 17.68 K to 0.36 K and peak pressure error from 0.116 MPa to 0.0025 MPa. Although both models achieve coefficients of determination exceeding 0.99, the low-capacity network exhibits heavy-tailed residual distributions and regime-dependent error amplification, whereas the high-capacity model reduces both central dispersion and extreme-case error. These results demonstrate that high correlation alone does not guarantee engineering reliability in nonlinear thermodynamic systems. Distribution-level analysis, including percentile and extreme-case characterisation, is required to evaluate engineering robustness. The findings provide a quantitative framework linking ANN capacity, nonlinear dynamic system representation, and predictive robustness.</p>
	]]></content:encoded>

	<dc:title>Impact of Neural Network Initialisation Seed and Architecture on Accuracy, Generalisation and Generative Consistency in Data-Driven Internal Combustion Engine Modelling</dc:title>
			<dc:creator>Arturas Gulevskis</dc:creator>
			<dc:creator>Redha Benhadj-Djilali</dc:creator>
			<dc:creator>Konstantin Volkov</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030194</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-17</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-17</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>194</prism:startingPage>
		<prism:doi>10.3390/computers15030194</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/194</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/193">

	<title>Computers, Vol. 15, Pages 193: Predicting Cybersickness in Virtual Reality from Head&amp;ndash;Torso Kinematics Using a Hybrid Convolutional&amp;ndash;Recurrent Network Model</title>
	<link>https://www.mdpi.com/2073-431X/15/3/193</link>
	<description>Motion sickness (MS) is a prevalent condition that can significantly degrade user comfort and immersion, particularly in virtual reality (VR) environments. Accurate prediction models are essential for early detection and mitigation of MS symptoms, thereby improving the overall VR experience. Most existing approaches rely on bio-physiological data acquired through body-mounted sensors, which may restrict user mobility and diminish immersion. This study proposes a less intrusive alternative, leveraging head and torso kinematic data for MS prediction. We introduce a hybrid Convolutional&amp;amp;ndash;Recurrent Neural Network (C-RNN) designed to capture both spatial and temporal features for enhanced classification accuracy. Using a dataset of 40 participants, the proposed C-RNN outperformed traditional machine learning models&amp;amp;mdash;including Support Vector Machines (SVMs), k-Nearest Neighbors (KNN), Decision Trees (DT), and a baseline Recurrent Neural Network (RNN)&amp;amp;mdash;across multiple evaluation metrics. The C-RNN achieved 85.63% accuracy, surpassing SVM (60%), KNN (73.75%), DT (74.38%), and RNN (81.88%), with corresponding gains in precision, recall, F1-score, and ROC AUC. These results demonstrate that head&amp;amp;ndash;torso motion patterns provide sufficient predictive signal for accurate MS detection, offering a non-intrusive, efficient alternative to physiological sensing that supports improved comfort and sustained immersion in VR.</description>
	<pubDate>2026-03-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 193: Predicting Cybersickness in Virtual Reality from Head&amp;ndash;Torso Kinematics Using a Hybrid Convolutional&amp;ndash;Recurrent Network Model</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/193">doi: 10.3390/computers15030193</a></p>
	<p>Authors:
		Ala Hag
		Houshyar Asadi
		Mohammad Reza Chalak Qazani
		Thuong Hoang
		Ambarish Kulkarni
		Stefan Greuter
		Saeid Nahavandi
		</p>
	<p>Motion sickness (MS) is a prevalent condition that can significantly degrade user comfort and immersion, particularly in virtual reality (VR) environments. Accurate prediction models are essential for early detection and mitigation of MS symptoms, thereby improving the overall VR experience. Most existing approaches rely on bio-physiological data acquired through body-mounted sensors, which may restrict user mobility and diminish immersion. This study proposes a less intrusive alternative, leveraging head and torso kinematic data for MS prediction. We introduce a hybrid Convolutional&amp;amp;ndash;Recurrent Neural Network (C-RNN) designed to capture both spatial and temporal features for enhanced classification accuracy. Using a dataset of 40 participants, the proposed C-RNN outperformed traditional machine learning models&amp;amp;mdash;including Support Vector Machines (SVMs), k-Nearest Neighbors (KNN), Decision Trees (DT), and a baseline Recurrent Neural Network (RNN)&amp;amp;mdash;across multiple evaluation metrics. The C-RNN achieved 85.63% accuracy, surpassing SVM (60%), KNN (73.75%), DT (74.38%), and RNN (81.88%), with corresponding gains in precision, recall, F1-score, and ROC AUC. These results demonstrate that head&amp;amp;ndash;torso motion patterns provide sufficient predictive signal for accurate MS detection, offering a non-intrusive, efficient alternative to physiological sensing that supports improved comfort and sustained immersion in VR.</p>
	]]></content:encoded>

	<dc:title>Predicting Cybersickness in Virtual Reality from Head&amp;amp;ndash;Torso Kinematics Using a Hybrid Convolutional&amp;amp;ndash;Recurrent Network Model</dc:title>
			<dc:creator>Ala Hag</dc:creator>
			<dc:creator>Houshyar Asadi</dc:creator>
			<dc:creator>Mohammad Reza Chalak Qazani</dc:creator>
			<dc:creator>Thuong Hoang</dc:creator>
			<dc:creator>Ambarish Kulkarni</dc:creator>
			<dc:creator>Stefan Greuter</dc:creator>
			<dc:creator>Saeid Nahavandi</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030193</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-17</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-17</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>193</prism:startingPage>
		<prism:doi>10.3390/computers15030193</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/193</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/192">

	<title>Computers, Vol. 15, Pages 192: A Modular Non-Immersive VR Serious Game Framework for Telerehabilitation: Design and Proof-of-Concept Feasibility Study</title>
	<link>https://www.mdpi.com/2073-431X/15/3/192</link>
	<description>There is a growing need for accessible and engaging rehabilitation tools for individuals with neurodevelopmental disorders such as Cerebral palsy (CP), Down syndrome (DS), and Autism spectrum disorder (ASD). Serious games offer a promising approach, yet few are tailor-made to meet the therapeutic demands of these populations. A tailor-made, non-immersive virtual reality (VR) serious games framework featuring a basketball task was developed, with therapist-controlled modules for customization and monitoring. Twenty-eight participants (CP: 14; DS: 7; ASD: 7) completed the game across eight sessions, grouped into three practice phases: an initial session, an early adaptation phase, and a consolidated practice phase. Performance metrics included accuracy, reaction time, and number of victories. All groups improved performance across phases, with accuracy increasing significantly in central (p = 0.005) and total positions (p = 0.007). The number of victories also increased from the initial to the early adaptation phase (p = 0.019) and from the initial to the consolidated practice phase (p = 0.008). Participants with ASD showed significantly higher accuracy than the DS group, while CP and DS participants showed a temporary increase in reaction time during the early adaptation phase, followed by a reduction in the consolidated phase, suggesting task adaptation. These findings support the feasibility and short-term effectiveness of a modular, tailor-made serious games platform for telerehabilitation.</description>
	<pubDate>2026-03-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 192: A Modular Non-Immersive VR Serious Game Framework for Telerehabilitation: Design and Proof-of-Concept Feasibility Study</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/192">doi: 10.3390/computers15030192</a></p>
	<p>Authors:
		Rodrigo G. Pontes
		Eduardo D. Dias
		Juliana P. Weingartner
		Natalia K. Monteiro
		Elisa J. Valenzuela
		Renata M. Rosa
		Victoria Y. H. Silva
		Íbis A. P. Moraes
		Talita D. Silva-Magalhães
		Carlos B. M. Monteiro
		Luciano V. Araújo
		</p>
	<p>There is a growing need for accessible and engaging rehabilitation tools for individuals with neurodevelopmental disorders such as Cerebral palsy (CP), Down syndrome (DS), and Autism spectrum disorder (ASD). Serious games offer a promising approach, yet few are tailor-made to meet the therapeutic demands of these populations. A tailor-made, non-immersive virtual reality (VR) serious games framework featuring a basketball task was developed, with therapist-controlled modules for customization and monitoring. Twenty-eight participants (CP: 14; DS: 7; ASD: 7) completed the game across eight sessions, grouped into three practice phases: an initial session, an early adaptation phase, and a consolidated practice phase. Performance metrics included accuracy, reaction time, and number of victories. All groups improved performance across phases, with accuracy increasing significantly in central (p = 0.005) and total positions (p = 0.007). The number of victories also increased from the initial to the early adaptation phase (p = 0.019) and from the initial to the consolidated practice phase (p = 0.008). Participants with ASD showed significantly higher accuracy than the DS group, while CP and DS participants showed a temporary increase in reaction time during the early adaptation phase, followed by a reduction in the consolidated phase, suggesting task adaptation. These findings support the feasibility and short-term effectiveness of a modular, tailor-made serious games platform for telerehabilitation.</p>
	]]></content:encoded>

	<dc:title>A Modular Non-Immersive VR Serious Game Framework for Telerehabilitation: Design and Proof-of-Concept Feasibility Study</dc:title>
			<dc:creator>Rodrigo G. Pontes</dc:creator>
			<dc:creator>Eduardo D. Dias</dc:creator>
			<dc:creator>Juliana P. Weingartner</dc:creator>
			<dc:creator>Natalia K. Monteiro</dc:creator>
			<dc:creator>Elisa J. Valenzuela</dc:creator>
			<dc:creator>Renata M. Rosa</dc:creator>
			<dc:creator>Victoria Y. H. Silva</dc:creator>
			<dc:creator>Íbis A. P. Moraes</dc:creator>
			<dc:creator>Talita D. Silva-Magalhães</dc:creator>
			<dc:creator>Carlos B. M. Monteiro</dc:creator>
			<dc:creator>Luciano V. Araújo</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030192</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-16</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-16</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>192</prism:startingPage>
		<prism:doi>10.3390/computers15030192</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/192</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/191">

	<title>Computers, Vol. 15, Pages 191: Efficient Deep Learning Models Integrated with a Smart Web Application for Classifying Heart Diseases Based on ECG Signals</title>
	<link>https://www.mdpi.com/2073-431X/15/3/191</link>
	<description>Recent advancements in the accuracy of deep learning (DL) hold significant promise for improving the classification of heart patients. Nevertheless, continued refinement is essential to achieve even greater levels of precision in DL techniques. This paper proposes three efficient DL models: Swin Transformer (Swin-T), Visual Geometry Group (VGG)-19, and Vision Transformer (ViT), which are implemented to classify different types of heart patients. The three DL models are learned on a balanced dataset comprising 600 electrocardiogram (ECG) samples. This dataset contains three classes: Arrhythmia Patient, Myocardic Patient, and Normal Patient. The DL models are applied using a PyTorch framework v2.10.0, with fine-tuning for the models&amp;amp;rsquo; hyperparameters to maximize the classification accuracy, and data augmentation techniques are implemented for the ECG samples. Additionally, a smart web application is designed for classifying heart patients into three different diagnostic categories. The performance of the three models is assessed by several metrics such as area under precision-recall (AUPR) curves and normalized confusion matrices (NCMs). The proposed three models achieve high testing accuracy for the classification of heart patients. Regarding testing loss (TL) rates for the Swin-T, VGG-19, and ViT achieve rates of 0.0707, 0.4138, and 0.0015, respectively. Also, the ViT achieves an F1-score, true positive rate (TPR), and AUPR curves of 100%.</description>
	<pubDate>2026-03-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 191: Efficient Deep Learning Models Integrated with a Smart Web Application for Classifying Heart Diseases Based on ECG Signals</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/191">doi: 10.3390/computers15030191</a></p>
	<p>Authors:
		Saeed Mohsen
		Ahmed F. Ibrahim
		Osama F. Hassan
		Norah Alnaim
		Noorah Albehaijan
		M. Abdel-Aziz
		</p>
	<p>Recent advancements in the accuracy of deep learning (DL) hold significant promise for improving the classification of heart patients. Nevertheless, continued refinement is essential to achieve even greater levels of precision in DL techniques. This paper proposes three efficient DL models: Swin Transformer (Swin-T), Visual Geometry Group (VGG)-19, and Vision Transformer (ViT), which are implemented to classify different types of heart patients. The three DL models are learned on a balanced dataset comprising 600 electrocardiogram (ECG) samples. This dataset contains three classes: Arrhythmia Patient, Myocardic Patient, and Normal Patient. The DL models are applied using a PyTorch framework v2.10.0, with fine-tuning for the models&amp;amp;rsquo; hyperparameters to maximize the classification accuracy, and data augmentation techniques are implemented for the ECG samples. Additionally, a smart web application is designed for classifying heart patients into three different diagnostic categories. The performance of the three models is assessed by several metrics such as area under precision-recall (AUPR) curves and normalized confusion matrices (NCMs). The proposed three models achieve high testing accuracy for the classification of heart patients. Regarding testing loss (TL) rates for the Swin-T, VGG-19, and ViT achieve rates of 0.0707, 0.4138, and 0.0015, respectively. Also, the ViT achieves an F1-score, true positive rate (TPR), and AUPR curves of 100%.</p>
	]]></content:encoded>

	<dc:title>Efficient Deep Learning Models Integrated with a Smart Web Application for Classifying Heart Diseases Based on ECG Signals</dc:title>
			<dc:creator>Saeed Mohsen</dc:creator>
			<dc:creator>Ahmed F. Ibrahim</dc:creator>
			<dc:creator>Osama F. Hassan</dc:creator>
			<dc:creator>Norah Alnaim</dc:creator>
			<dc:creator>Noorah Albehaijan</dc:creator>
			<dc:creator>M. Abdel-Aziz</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030191</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-16</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-16</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>191</prism:startingPage>
		<prism:doi>10.3390/computers15030191</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/191</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/190">

	<title>Computers, Vol. 15, Pages 190: Progressive Optimization of Target Distribution and Effective Refinement of Hard Samples for Source-Free Domain Adaptation</title>
	<link>https://www.mdpi.com/2073-431X/15/3/190</link>
	<description>In recent years, the research on source-free domain adaptation has received increasing attention and has achieved considerable progress. It can overcome the dependence on source domain data and obtain a target domain model with robust performance on the target domain only by using the source domain model and unlabeled target domain data. However, existing studies typically handle the target domain distribution in a relatively coarse manner and are consistently susceptible to model noise interference. Therefore, we propose a progressive optimization strategy for the target domain distribution, including two parts: inter-category and intra-category. Regarding inter-category, we decide whether to separate category pairs based on the degree of discrepancy between them. Regarding intra-category, we consider whether to aggregate sample pairs based on whether their pairwise similarity&amp;amp;mdash;among samples assigned to the same predicted category&amp;amp;mdash;is sufficiently high. And as the training progresses, all data will be optimized. Additionally, for some difficult-to-distinguish categories, we propose a screening strategy that fuses source domain and target domain knowledge. We also further optimized the samples belonging to these categories. Our results on three image datasets demonstrate the effectiveness of our method.</description>
	<pubDate>2026-03-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 190: Progressive Optimization of Target Distribution and Effective Refinement of Hard Samples for Source-Free Domain Adaptation</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/190">doi: 10.3390/computers15030190</a></p>
	<p>Authors:
		Shumin Liang
		Xiaorong Hou
		Yajian Zeng
		Xinrui Wang
		</p>
	<p>In recent years, the research on source-free domain adaptation has received increasing attention and has achieved considerable progress. It can overcome the dependence on source domain data and obtain a target domain model with robust performance on the target domain only by using the source domain model and unlabeled target domain data. However, existing studies typically handle the target domain distribution in a relatively coarse manner and are consistently susceptible to model noise interference. Therefore, we propose a progressive optimization strategy for the target domain distribution, including two parts: inter-category and intra-category. Regarding inter-category, we decide whether to separate category pairs based on the degree of discrepancy between them. Regarding intra-category, we consider whether to aggregate sample pairs based on whether their pairwise similarity&amp;amp;mdash;among samples assigned to the same predicted category&amp;amp;mdash;is sufficiently high. And as the training progresses, all data will be optimized. Additionally, for some difficult-to-distinguish categories, we propose a screening strategy that fuses source domain and target domain knowledge. We also further optimized the samples belonging to these categories. Our results on three image datasets demonstrate the effectiveness of our method.</p>
	]]></content:encoded>

	<dc:title>Progressive Optimization of Target Distribution and Effective Refinement of Hard Samples for Source-Free Domain Adaptation</dc:title>
			<dc:creator>Shumin Liang</dc:creator>
			<dc:creator>Xiaorong Hou</dc:creator>
			<dc:creator>Yajian Zeng</dc:creator>
			<dc:creator>Xinrui Wang</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030190</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-15</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-15</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>190</prism:startingPage>
		<prism:doi>10.3390/computers15030190</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/190</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/189">

	<title>Computers, Vol. 15, Pages 189: Post-Quantum Authentication in the Internet of Medical Things: A System-Level Review and Future Directions</title>
	<link>https://www.mdpi.com/2073-431X/15/3/189</link>
	<description>The Internet of Medical Things (IoMT) has become a core component of modern healthcare infrastructures, enabling continuous patient monitoring, remote diagnostics, and data-driven clinical decision-making. Despite these advances, authentication in IoMT environments remains a critical security challenge, intensified by strict resource constraints of medical devices and the emerging threat posed by quantum computing to classical cryptographic techniques. This systematic review investigates authentication mechanisms in IoMT from both post-quantum and system-level perspectives. A structured literature review was conducted using a PRISMA-informed methodology across major scientific databases, including IEEE Xplore, ACM Digital Library, SpringerLink, ScienceDirect, and MDPI. From an initial set of 95 records, 63 studies were selected for qualitative synthesis following screening and eligibility assessment. To organise existing research, this study introduces a multi-dimensional classification framework that categorises authentication solutions according to cryptographic paradigm (classical, hybrid, and post-quantum), deployment architecture, system objectives, and clinical operational constraints. The comparative synthesis demonstrates important trade-offs between security strength, latency, computational overhead, and energy consumption that are frequently underexplored in the existing literature. Furthermore, the analysis identifies key research gaps related to scalability in heterogeneous medical environments, trust establishment across administrative and clinical domains, usability under strict timing constraints, and resilience against quantum-capable adversaries. Based on these findings, future research directions are outlined toward adaptive, lightweight, and context-aware post-quantum authentication frameworks designed for real-world IoMT deployments. Limitations of this review include restriction to English-language publications and selected databases. This study received no external funding, and the review protocol was not formally registered.</description>
	<pubDate>2026-03-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 189: Post-Quantum Authentication in the Internet of Medical Things: A System-Level Review and Future Directions</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/189">doi: 10.3390/computers15030189</a></p>
	<p>Authors:
		Fatima G. Abdullah
		Tayseer S. Atia
		</p>
	<p>The Internet of Medical Things (IoMT) has become a core component of modern healthcare infrastructures, enabling continuous patient monitoring, remote diagnostics, and data-driven clinical decision-making. Despite these advances, authentication in IoMT environments remains a critical security challenge, intensified by strict resource constraints of medical devices and the emerging threat posed by quantum computing to classical cryptographic techniques. This systematic review investigates authentication mechanisms in IoMT from both post-quantum and system-level perspectives. A structured literature review was conducted using a PRISMA-informed methodology across major scientific databases, including IEEE Xplore, ACM Digital Library, SpringerLink, ScienceDirect, and MDPI. From an initial set of 95 records, 63 studies were selected for qualitative synthesis following screening and eligibility assessment. To organise existing research, this study introduces a multi-dimensional classification framework that categorises authentication solutions according to cryptographic paradigm (classical, hybrid, and post-quantum), deployment architecture, system objectives, and clinical operational constraints. The comparative synthesis demonstrates important trade-offs between security strength, latency, computational overhead, and energy consumption that are frequently underexplored in the existing literature. Furthermore, the analysis identifies key research gaps related to scalability in heterogeneous medical environments, trust establishment across administrative and clinical domains, usability under strict timing constraints, and resilience against quantum-capable adversaries. Based on these findings, future research directions are outlined toward adaptive, lightweight, and context-aware post-quantum authentication frameworks designed for real-world IoMT deployments. Limitations of this review include restriction to English-language publications and selected databases. This study received no external funding, and the review protocol was not formally registered.</p>
	]]></content:encoded>

	<dc:title>Post-Quantum Authentication in the Internet of Medical Things: A System-Level Review and Future Directions</dc:title>
			<dc:creator>Fatima G. Abdullah</dc:creator>
			<dc:creator>Tayseer S. Atia</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030189</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-15</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-15</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>189</prism:startingPage>
		<prism:doi>10.3390/computers15030189</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/189</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/188">

	<title>Computers, Vol. 15, Pages 188: Speech-to-Sign Gesture Translation for Kazakh: Dataset and Sign Gesture Translation System</title>
	<link>https://www.mdpi.com/2073-431X/15/3/188</link>
	<description>This paper presents the first prototype of a speech-to-sign language translation system for Kazakh Sign Language (KRSL). The proposed pipeline integrates the NVIDIA FastConformer model for automatic speech recognition (ASR) in the Kazakh language and addresses the challenges of sign language translation in a low-resource setting. Unlike American or British Sign Languages, KRSL lacks publicly available datasets and established translation systems. The pipeline follows a multi-stage process: speech input is converted into text via ASR, segmented into phrases, matched with corresponding gestures, and visualized as sign language. System performance is evaluated using word error rate (WER) for ASR and accuracy metrics for speech-to-sign translation. This study also introduces the first KRSL dataset, consisting of 1200 manually recreated signs, including 95% static images and 5% dynamic gesture videos. To improve robustness under resource-constrained conditions, a Weighted Hybrid Similarity Score (WHSS)-based gesture matching method is proposed. Experimental results show that the FastConformer model achieves an average WER of 10.55%, with 7.8% for isolated words and 13.3% for full sentences. At the phrase level, the system achieves 92.1% accuracy for unigrams, 84.6% for bigrams, and 78.3% for trigrams. The complete pipeline reaches 85% accuracy for individual words and 70% for sentences, with an average latency of 310 ms. These results demonstrate the feasibility and effectiveness of the proposed system for supporting people with hearing and speech impairments in Kazakhstan.</description>
	<pubDate>2026-03-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 188: Speech-to-Sign Gesture Translation for Kazakh: Dataset and Sign Gesture Translation System</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/188">doi: 10.3390/computers15030188</a></p>
	<p>Authors:
		Akdaulet Mnuarbek
		Akbayan Bekarystankyzy
		Mussa Turdalyuly
		Dina Oralbekova
		Alibek Dyussemkhanov
		</p>
	<p>This paper presents the first prototype of a speech-to-sign language translation system for Kazakh Sign Language (KRSL). The proposed pipeline integrates the NVIDIA FastConformer model for automatic speech recognition (ASR) in the Kazakh language and addresses the challenges of sign language translation in a low-resource setting. Unlike American or British Sign Languages, KRSL lacks publicly available datasets and established translation systems. The pipeline follows a multi-stage process: speech input is converted into text via ASR, segmented into phrases, matched with corresponding gestures, and visualized as sign language. System performance is evaluated using word error rate (WER) for ASR and accuracy metrics for speech-to-sign translation. This study also introduces the first KRSL dataset, consisting of 1200 manually recreated signs, including 95% static images and 5% dynamic gesture videos. To improve robustness under resource-constrained conditions, a Weighted Hybrid Similarity Score (WHSS)-based gesture matching method is proposed. Experimental results show that the FastConformer model achieves an average WER of 10.55%, with 7.8% for isolated words and 13.3% for full sentences. At the phrase level, the system achieves 92.1% accuracy for unigrams, 84.6% for bigrams, and 78.3% for trigrams. The complete pipeline reaches 85% accuracy for individual words and 70% for sentences, with an average latency of 310 ms. These results demonstrate the feasibility and effectiveness of the proposed system for supporting people with hearing and speech impairments in Kazakhstan.</p>
	]]></content:encoded>

	<dc:title>Speech-to-Sign Gesture Translation for Kazakh: Dataset and Sign Gesture Translation System</dc:title>
			<dc:creator>Akdaulet Mnuarbek</dc:creator>
			<dc:creator>Akbayan Bekarystankyzy</dc:creator>
			<dc:creator>Mussa Turdalyuly</dc:creator>
			<dc:creator>Dina Oralbekova</dc:creator>
			<dc:creator>Alibek Dyussemkhanov</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030188</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-15</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-15</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>188</prism:startingPage>
		<prism:doi>10.3390/computers15030188</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/188</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/187">

	<title>Computers, Vol. 15, Pages 187: A Formalized Zoned Role-Based Framework for the Analysis, Design, Implementation, Maintenance and Access Control of Integrated Enterprise Systems</title>
	<link>https://www.mdpi.com/2073-431X/15/3/187</link>
	<description>Modern enterprise information systems must simultaneously support complex organizational structures, ensure robust security, and remain scalable and maintainable over time. Traditional Role-Based Access Control (RBAC) models, while effective for permission management, operate primarily as post-design security layers and do not provide a unified methodology for structuring system architecture. This paper introduces the Zoned Role-Based (ZRB) model, a mathematically formalized and comprehensive framework that integrates organizational modeling, system design, implementation, access control, and long-term maintenance. ZRB models an organization as a hierarchy of zones, each containing its own roles, applications, operations, and users, forming a recursive Zone Tree that directly mirrors real organizational semantics. Through formally defined role hierarchies, zone-scoped permission sets, and inter-zone inheritance mappings, ZRB provides a context-aware permission calculus that unifies authentication and authorization across all zones. The paper presents the theoretical foundations of ZRB, a multi-phase engineering methodology for constructing integrated enterprise systems, and a complete implementation architecture with permission inference, navigation design, administrative subsystems, and deployment models. Primary validation and evaluations across several developed systems demonstrate significant improvements in permission accuracy, administrative efficiency, scalability, and maintainability. ZRB thus offers a rigorously defined and practically validated framework for building secure, scalable, and organizationally aligned enterprise information systems.</description>
	<pubDate>2026-03-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 187: A Formalized Zoned Role-Based Framework for the Analysis, Design, Implementation, Maintenance and Access Control of Integrated Enterprise Systems</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/187">doi: 10.3390/computers15030187</a></p>
	<p>Authors:
		Harris Wang
		</p>
	<p>Modern enterprise information systems must simultaneously support complex organizational structures, ensure robust security, and remain scalable and maintainable over time. Traditional Role-Based Access Control (RBAC) models, while effective for permission management, operate primarily as post-design security layers and do not provide a unified methodology for structuring system architecture. This paper introduces the Zoned Role-Based (ZRB) model, a mathematically formalized and comprehensive framework that integrates organizational modeling, system design, implementation, access control, and long-term maintenance. ZRB models an organization as a hierarchy of zones, each containing its own roles, applications, operations, and users, forming a recursive Zone Tree that directly mirrors real organizational semantics. Through formally defined role hierarchies, zone-scoped permission sets, and inter-zone inheritance mappings, ZRB provides a context-aware permission calculus that unifies authentication and authorization across all zones. The paper presents the theoretical foundations of ZRB, a multi-phase engineering methodology for constructing integrated enterprise systems, and a complete implementation architecture with permission inference, navigation design, administrative subsystems, and deployment models. Primary validation and evaluations across several developed systems demonstrate significant improvements in permission accuracy, administrative efficiency, scalability, and maintainability. ZRB thus offers a rigorously defined and practically validated framework for building secure, scalable, and organizationally aligned enterprise information systems.</p>
	]]></content:encoded>

	<dc:title>A Formalized Zoned Role-Based Framework for the Analysis, Design, Implementation, Maintenance and Access Control of Integrated Enterprise Systems</dc:title>
			<dc:creator>Harris Wang</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030187</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-13</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-13</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>187</prism:startingPage>
		<prism:doi>10.3390/computers15030187</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/187</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/186">

	<title>Computers, Vol. 15, Pages 186: Disentangling Interaction and Intention for Long-Tail Pedestrian Trajectory Prediction</title>
	<link>https://www.mdpi.com/2073-431X/15/3/186</link>
	<description>Pedestrian trajectory prediction remains a challenging task, particularly in long-tail scenarios where goal distributions are sparse and inter-agent behaviors are uncertain. In this work, we propose to disentangle the trajectory prediction task into two complementary components: interaction modeling and intention modeling. For interaction modeling, we introduce an adaptive meta-strategy that proactively extracts latent and rare-yet-critical interaction patterns often overlooked by conventional trajectory-only approaches. For intention modeling, we propose Continuous Waypoint Slot-Driven Prototypical Contrastive Learning (PCL). It adapts prototype learning to the multi-modal reality where conventional PCL fails to model diverse and continuous goal distributions. Capitalizing on the complementary strengths of both components, we orchestrate a unified frequency-based fusion module that seamlessly integrates interaction and intention modeling, yielding enhanced overall prediction accuracy. In particular, our method is model-agnostic and can be seamlessly incorporated into a wide range of existing prediction frameworks. Extensive experiments on several datasets demonstrate that our approach not only achieves consistent performance gains in standard settings, but also significantly alleviates degradation on hard or long-tail trajectory samples.</description>
	<pubDate>2026-03-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 186: Disentangling Interaction and Intention for Long-Tail Pedestrian Trajectory Prediction</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/186">doi: 10.3390/computers15030186</a></p>
	<p>Authors:
		Chengkai Yang
		Jincheng Liu
		Xingping Dong
		</p>
	<p>Pedestrian trajectory prediction remains a challenging task, particularly in long-tail scenarios where goal distributions are sparse and inter-agent behaviors are uncertain. In this work, we propose to disentangle the trajectory prediction task into two complementary components: interaction modeling and intention modeling. For interaction modeling, we introduce an adaptive meta-strategy that proactively extracts latent and rare-yet-critical interaction patterns often overlooked by conventional trajectory-only approaches. For intention modeling, we propose Continuous Waypoint Slot-Driven Prototypical Contrastive Learning (PCL). It adapts prototype learning to the multi-modal reality where conventional PCL fails to model diverse and continuous goal distributions. Capitalizing on the complementary strengths of both components, we orchestrate a unified frequency-based fusion module that seamlessly integrates interaction and intention modeling, yielding enhanced overall prediction accuracy. In particular, our method is model-agnostic and can be seamlessly incorporated into a wide range of existing prediction frameworks. Extensive experiments on several datasets demonstrate that our approach not only achieves consistent performance gains in standard settings, but also significantly alleviates degradation on hard or long-tail trajectory samples.</p>
	]]></content:encoded>

	<dc:title>Disentangling Interaction and Intention for Long-Tail Pedestrian Trajectory Prediction</dc:title>
			<dc:creator>Chengkai Yang</dc:creator>
			<dc:creator>Jincheng Liu</dc:creator>
			<dc:creator>Xingping Dong</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030186</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-12</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-12</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>186</prism:startingPage>
		<prism:doi>10.3390/computers15030186</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/186</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/185">

	<title>Computers, Vol. 15, Pages 185: Employee Attrition Prediction: An Explanatory and Statistically Robust Ensemble Learning Model</title>
	<link>https://www.mdpi.com/2073-431X/15/3/185</link>
	<description>Organizational productivity and workforce management are highly affected by employee attrition. Thus, an employee attrition prediction system may allow human resource management to enhance the workplace by minimizing attrition. This study proposes a new and interpretable ensemble learning framework for employee attrition prediction. The model integrates SHapley Additive exPlanations (SHAP)-based feature selection, Optuna hyperparameter optimization, and dual explainability using SHAP and Local Interpretable Model-agnostic Explanations (LIME). Random oversampling (ROS) is used to address class imbalance. The proposed framework allows for both global and local interpretability, enabling actionable insights into retention drivers. It was assessed using two benchmark datasets: the Kaggle HR Analytics dataset (14,999 records) and the IBM HR dataset (1470 records). The results revealed that the most impactful factors on employee attrition are promotion history, tenure, job satisfaction, workload, average monthly hours, overtime, and financial incentives. Furthermore, the proposed model achieved exceptional performance on both datasets. On the Kaggle dataset, it reached an accuracy of 98.72%, an F1-score of 97.29%, and an ROC&amp;amp;ndash;AUC of 0.994, while on the IBM dataset, it produced an accuracy of 97.72%, an F1-score of 97.74%, and an ROC&amp;amp;ndash;AUC of 0.995. Moreover, the proposed approach shows high computational efficiency, demonstrating that it is suitable for real-world deployment. These findings indicate that integrating explainable AI techniques, resampling tools, and automated hyperparameter tuning can achieve robust, accurate, and actionable employee attrition predictions, supporting HR managers&amp;amp;rsquo; decision-making.</description>
	<pubDate>2026-03-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 185: Employee Attrition Prediction: An Explanatory and Statistically Robust Ensemble Learning Model</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/185">doi: 10.3390/computers15030185</a></p>
	<p>Authors:
		Ghalia Nassreddine
		Jamil Hammoud
		Obada Al-Khatib
		Mohamad Al Majzoub
		</p>
	<p>Organizational productivity and workforce management are highly affected by employee attrition. Thus, an employee attrition prediction system may allow human resource management to enhance the workplace by minimizing attrition. This study proposes a new and interpretable ensemble learning framework for employee attrition prediction. The model integrates SHapley Additive exPlanations (SHAP)-based feature selection, Optuna hyperparameter optimization, and dual explainability using SHAP and Local Interpretable Model-agnostic Explanations (LIME). Random oversampling (ROS) is used to address class imbalance. The proposed framework allows for both global and local interpretability, enabling actionable insights into retention drivers. It was assessed using two benchmark datasets: the Kaggle HR Analytics dataset (14,999 records) and the IBM HR dataset (1470 records). The results revealed that the most impactful factors on employee attrition are promotion history, tenure, job satisfaction, workload, average monthly hours, overtime, and financial incentives. Furthermore, the proposed model achieved exceptional performance on both datasets. On the Kaggle dataset, it reached an accuracy of 98.72%, an F1-score of 97.29%, and an ROC&amp;amp;ndash;AUC of 0.994, while on the IBM dataset, it produced an accuracy of 97.72%, an F1-score of 97.74%, and an ROC&amp;amp;ndash;AUC of 0.995. Moreover, the proposed approach shows high computational efficiency, demonstrating that it is suitable for real-world deployment. These findings indicate that integrating explainable AI techniques, resampling tools, and automated hyperparameter tuning can achieve robust, accurate, and actionable employee attrition predictions, supporting HR managers&amp;amp;rsquo; decision-making.</p>
	]]></content:encoded>

	<dc:title>Employee Attrition Prediction: An Explanatory and Statistically Robust Ensemble Learning Model</dc:title>
			<dc:creator>Ghalia Nassreddine</dc:creator>
			<dc:creator>Jamil Hammoud</dc:creator>
			<dc:creator>Obada Al-Khatib</dc:creator>
			<dc:creator>Mohamad Al Majzoub</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030185</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-12</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-12</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>185</prism:startingPage>
		<prism:doi>10.3390/computers15030185</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/185</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/184">

	<title>Computers, Vol. 15, Pages 184: BiteAI: Attention-Guided Distillation and Weight-Only Quantization for Compact Insect-Bite Classification</title>
	<link>https://www.mdpi.com/2073-431X/15/3/184</link>
	<description>Insect bites are a common cause of skin irritation and can contribute to disease transmission through vector-borne pathogens. Early identification of the likely biting organism can assist preliminary guidance (e.g., monitoring for warning signs, considering exposure history) and may reduce complications through timely follow-up. This paper studies a compact attention-guided learning framework for multiclass insect-bite image classification under strict storage constraints. A teacher network (BiteAI-T) based on MobileNetV3-Small is trained with spatial attention pooling to emphasize lesion-relevant regions while maintaining an efficient backbone. A lightweight depthwise-separable student (BiteAI-S) is trained using multi-level knowledge distillation that combines softened-logit matching with intermediate supervision through attention-map alignment and pooled-feature matching. Model storage is further reduced through weight-only quantization-aware training using an LSQ-inspired learnable scaling factor; BatchNorm running statistics are frozen during quantization fine-tuning to improve stability. Experiments on an eight-class dataset (ants, bed bugs, chiggers, fleas, mosquitos, no bites, spiders, ticks) show that BiteAI-T reaches 93.75% test accuracy. For deployment, we export (i) a TorchScript Lite teacher artifact (BiteAI-TLite, 2.35 MB) and (ii) a weight-only int8 student artifact (BiteAI-Sint8, 0.992 MB). Comparative results are also reported for an SVD-compressed + fine-tuned FP16 variant (92.66% test accuracy, 2.84 MB), illustrating accuracy–size trade-offs across compression strategies.</description>
	<pubDate>2026-03-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 184: BiteAI: Attention-Guided Distillation and Weight-Only Quantization for Compact Insect-Bite Classification</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/184">doi: 10.3390/computers15030184</a></p>
	<p>Authors:
		Mohamed Echchidmi
		Anas Bouayad
		</p>
	<p>Insect bites are a common cause of skin irritation and can contribute to disease transmission through vector-borne pathogens. Early identification of the likely biting organism can assist preliminary guidance (e.g., monitoring for warning signs, considering exposure history) and may reduce complications through timely follow-up. This paper studies a compact attention-guided learning framework for multiclass insect-bite image classification under strict storage constraints. A teacher network (BiteAI-T) based on MobileNetV3-Small is trained with spatial attention pooling to emphasize lesion-relevant regions while maintaining an efficient backbone. A lightweight depthwise-separable student (BiteAI-S) is trained using multi-level knowledge distillation that combines softened-logit matching with intermediate supervision through attention-map alignment and pooled-feature matching. Model storage is further reduced through weight-only quantization-aware training using an LSQ-inspired learnable scaling factor; BatchNorm running statistics are frozen during quantization fine-tuning to improve stability. Experiments on an eight-class dataset (ants, bed bugs, chiggers, fleas, mosquitos, no bites, spiders, ticks) show that BiteAI-T reaches 93.75% test accuracy. For deployment, we export (i) a TorchScript Lite teacher artifact (BiteAI-TLite, 2.35 MB) and (ii) a weight-only int8 student artifact (BiteAI-Sint8, 0.992 MB). Comparative results are also reported for an SVD-compressed + fine-tuned FP16 variant (92.66% test accuracy, 2.84 MB), illustrating accuracy–size trade-offs across compression strategies.</p>
	]]></content:encoded>

	<dc:title>BiteAI: Attention-Guided Distillation and Weight-Only Quantization for Compact Insect-Bite Classification</dc:title>
			<dc:creator>Mohamed Echchidmi</dc:creator>
			<dc:creator>Anas Bouayad</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030184</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-11</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-11</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>184</prism:startingPage>
		<prism:doi>10.3390/computers15030184</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/184</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/183">

	<title>Computers, Vol. 15, Pages 183: Hybrid Spatio-Temporal Deep Learning Models for Multi-Task Forecasting in Renewable Energy Systems</title>
	<link>https://www.mdpi.com/2073-431X/15/3/183</link>
	<description>Short-term forecasting of solar and wind power generation is critical for smart grid management but challenging due to non-stationarity and extreme generation events. This study addresses a multi-task learning problem: regression-based forecasting of power output and binary detection of extreme events defined by a quantile-based threshold (q = 0.90). A hybrid spatio-temporal model, DP-STH++, is proposed, implementing parallel causal fusion of LSTM, GRU, a causal Conv1D stack, and a lightweight causal transformer. The architecture employs regression and classification heads, while an uncertainty-weighted mechanism stabilizes multitask optimization in the regression tasks; extreme event detection performance is evaluated using AUC. Training and evaluation follow a leakage-safe protocol with chronological data processing, calendar feature integration, time-aware splitting, and training-only estimation of scaling parameters and extreme thresholds. Experimental results obtained with a one-hour forecasting horizon and a 24 h context window demonstrate that DP-STH++ achieves the best regression performance on the hold-out set (RMSE = 257.18, MAE = 174.86&amp;amp;ndash;287.90, MASE = 0.2438, R2 = 0.9440) and the highest extreme event detection accuracy (AUC = 0.9896), ranking 1st among all compared architectures. In time-series cross-validation, the model retains the leading position with a mean MASE = 0.3883 and AUC = 0.9709. The advantages are particularly pronounced for wind power forecasting, where DP-STH++ simultaneously minimizes regression errors and maximizes AUC = 0.9880&amp;amp;ndash;0.9908.</description>
	<pubDate>2026-03-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 183: Hybrid Spatio-Temporal Deep Learning Models for Multi-Task Forecasting in Renewable Energy Systems</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/183">doi: 10.3390/computers15030183</a></p>
	<p>Authors:
		Gulnaz Tolegenova
		Alma Zakirova
		Maksat Kalimoldayev
		Zhanar Akhayeva
		</p>
	<p>Short-term forecasting of solar and wind power generation is critical for smart grid management but challenging due to non-stationarity and extreme generation events. This study addresses a multi-task learning problem: regression-based forecasting of power output and binary detection of extreme events defined by a quantile-based threshold (q = 0.90). A hybrid spatio-temporal model, DP-STH++, is proposed, implementing parallel causal fusion of LSTM, GRU, a causal Conv1D stack, and a lightweight causal transformer. The architecture employs regression and classification heads, while an uncertainty-weighted mechanism stabilizes multitask optimization in the regression tasks; extreme event detection performance is evaluated using AUC. Training and evaluation follow a leakage-safe protocol with chronological data processing, calendar feature integration, time-aware splitting, and training-only estimation of scaling parameters and extreme thresholds. Experimental results obtained with a one-hour forecasting horizon and a 24 h context window demonstrate that DP-STH++ achieves the best regression performance on the hold-out set (RMSE = 257.18, MAE = 174.86&amp;amp;ndash;287.90, MASE = 0.2438, R2 = 0.9440) and the highest extreme event detection accuracy (AUC = 0.9896), ranking 1st among all compared architectures. In time-series cross-validation, the model retains the leading position with a mean MASE = 0.3883 and AUC = 0.9709. The advantages are particularly pronounced for wind power forecasting, where DP-STH++ simultaneously minimizes regression errors and maximizes AUC = 0.9880&amp;amp;ndash;0.9908.</p>
	]]></content:encoded>

	<dc:title>Hybrid Spatio-Temporal Deep Learning Models for Multi-Task Forecasting in Renewable Energy Systems</dc:title>
			<dc:creator>Gulnaz Tolegenova</dc:creator>
			<dc:creator>Alma Zakirova</dc:creator>
			<dc:creator>Maksat Kalimoldayev</dc:creator>
			<dc:creator>Zhanar Akhayeva</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030183</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-11</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-11</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>183</prism:startingPage>
		<prism:doi>10.3390/computers15030183</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/183</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/182">

	<title>Computers, Vol. 15, Pages 182: DTBAffinity: A Multi-Modal Feature Engineering and Gradient-Boosting Framework for Drug&amp;ndash;Target Binding Affinity on Davis and KIBA Benchmarks</title>
	<link>https://www.mdpi.com/2073-431X/15/3/182</link>
	<description>An accurate prediction of how strongly a drug binds to its target (where the drug will have the desired effect) is very important for drug discovery. It helps select the most promising compounds and saves money by doing fewer experiments. We present DTBAffinity, a multi-modal regression framework that integrates chemically meaningful ligand descriptors with diverse protein sequence features in a unified gradient-boosting model. The representation of ligands includes physicochemical and topological descriptors (RDKit and Mordred), structural keys (MACCS and FP4), circular fingerprints (ECFP/Morgan), and SMILES-derived features from iFeatureOmega. For proteins, thousands of sequence-derived descriptors (composition, autocorrelations, physicochemical profiles, and evolutionary indices) from iFeatureOmega are used, together with contextual embeddings from large protein language models (ESM-1b, ESM-2). The feature matrices are cleaned up, variance filtered, z-score scaled, and univariate selected before being concatenated and modeled with regularized XGBoost ensembles. We evaluate DTBAffinity on two kinase-centric datasets that are commonly used: Davis (30,056 interactions: pKd values) and KIBA (118,254 interactions: integrated affinity scores). Various metrics are used to measure the performance, such as MSE, R2, Pearson/Spearman correlations, Concordance Index (CI), rm2, and AUPR. On Davis, DTBAffinity yields MSE = 0.1885, CI = 0.9102, and AUPR = 0.8112, and on KIBA, it gives MSE = 0.1540, CI = 0.8686, and AUPR = 0.8361; thus, it is better than the state-of-the-art baselines such as KronRLS, SimBoost, DeepDTA, and GraphDTA. The findings here imply that the combination of interpretable descriptors and contextual embeddings in a robust boosting framework is a great way to realize accurate, interpretable, and generalizable DTBA prediction.</description>
	<pubDate>2026-03-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 182: DTBAffinity: A Multi-Modal Feature Engineering and Gradient-Boosting Framework for Drug&amp;ndash;Target Binding Affinity on Davis and KIBA Benchmarks</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/182">doi: 10.3390/computers15030182</a></p>
	<p>Authors:
		Meshari Alazmi
		</p>
	<p>An accurate prediction of how strongly a drug binds to its target (where the drug will have the desired effect) is very important for drug discovery. It helps select the most promising compounds and saves money by doing fewer experiments. We present DTBAffinity, a multi-modal regression framework that integrates chemically meaningful ligand descriptors with diverse protein sequence features in a unified gradient-boosting model. The representation of ligands includes physicochemical and topological descriptors (RDKit and Mordred), structural keys (MACCS and FP4), circular fingerprints (ECFP/Morgan), and SMILES-derived features from iFeatureOmega. For proteins, thousands of sequence-derived descriptors (composition, autocorrelations, physicochemical profiles, and evolutionary indices) from iFeatureOmega are used, together with contextual embeddings from large protein language models (ESM-1b, ESM-2). The feature matrices are cleaned up, variance filtered, z-score scaled, and univariate selected before being concatenated and modeled with regularized XGBoost ensembles. We evaluate DTBAffinity on two kinase-centric datasets that are commonly used: Davis (30,056 interactions: pKd values) and KIBA (118,254 interactions: integrated affinity scores). Various metrics are used to measure the performance, such as MSE, R2, Pearson/Spearman correlations, Concordance Index (CI), rm2, and AUPR. On Davis, DTBAffinity yields MSE = 0.1885, CI = 0.9102, and AUPR = 0.8112, and on KIBA, it gives MSE = 0.1540, CI = 0.8686, and AUPR = 0.8361; thus, it is better than the state-of-the-art baselines such as KronRLS, SimBoost, DeepDTA, and GraphDTA. The findings here imply that the combination of interpretable descriptors and contextual embeddings in a robust boosting framework is a great way to realize accurate, interpretable, and generalizable DTBA prediction.</p>
	]]></content:encoded>

	<dc:title>DTBAffinity: A Multi-Modal Feature Engineering and Gradient-Boosting Framework for Drug&amp;amp;ndash;Target Binding Affinity on Davis and KIBA Benchmarks</dc:title>
			<dc:creator>Meshari Alazmi</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030182</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-10</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-10</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>182</prism:startingPage>
		<prism:doi>10.3390/computers15030182</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/182</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/181">

	<title>Computers, Vol. 15, Pages 181: CacheAware: Data Locality-Aware Scheduling for Distributed Memory Systems</title>
	<link>https://www.mdpi.com/2073-431X/15/3/181</link>
	<description>The widening performance gap between processor speed and memory access latency has made data locality a critical bottleneck in high-performance computing. In Non-Uniform Memory Access (NUMA) and distributed memory systems, remote accesses incur penalties far greater than local operations, degrading the efficiency of scientific and data-intensive workloads. This paper introduces CacheAware, a compiler&amp;amp;ndash;runtime framework for data locality-aware scheduling. CacheAware leverages compiler analysis to annotate tasks with memory access footprints and combines this static information with runtime monitoring of cache miss patterns to guide scheduling and dynamic task migration. Unlike existing NUMA balancing or runtime tasking systems, CacheAware integrates both proactive and reactive strategies to minimize cache thrashing and remote memory fetches. Experimental evaluation on scientific benchmarks demonstrates reductions of up to 30% in cache misses and over 20% improvements in execution time compared to Linux AutoNUMA, NUMA-aware schedulers, and task-based runtimes. These results confirm that CacheAware provides a practical and scalable approach for enhancing data locality and accelerating workloads on modern distributed memory systems.</description>
	<pubDate>2026-03-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 181: CacheAware: Data Locality-Aware Scheduling for Distributed Memory Systems</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/181">doi: 10.3390/computers15030181</a></p>
	<p>Authors:
		Haifa A. Alanazi
		Abdulaziz G. Alanazi
		Nasser S. Albalawi
		</p>
	<p>The widening performance gap between processor speed and memory access latency has made data locality a critical bottleneck in high-performance computing. In Non-Uniform Memory Access (NUMA) and distributed memory systems, remote accesses incur penalties far greater than local operations, degrading the efficiency of scientific and data-intensive workloads. This paper introduces CacheAware, a compiler&amp;amp;ndash;runtime framework for data locality-aware scheduling. CacheAware leverages compiler analysis to annotate tasks with memory access footprints and combines this static information with runtime monitoring of cache miss patterns to guide scheduling and dynamic task migration. Unlike existing NUMA balancing or runtime tasking systems, CacheAware integrates both proactive and reactive strategies to minimize cache thrashing and remote memory fetches. Experimental evaluation on scientific benchmarks demonstrates reductions of up to 30% in cache misses and over 20% improvements in execution time compared to Linux AutoNUMA, NUMA-aware schedulers, and task-based runtimes. These results confirm that CacheAware provides a practical and scalable approach for enhancing data locality and accelerating workloads on modern distributed memory systems.</p>
	]]></content:encoded>

	<dc:title>CacheAware: Data Locality-Aware Scheduling for Distributed Memory Systems</dc:title>
			<dc:creator>Haifa A. Alanazi</dc:creator>
			<dc:creator>Abdulaziz G. Alanazi</dc:creator>
			<dc:creator>Nasser S. Albalawi</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030181</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-10</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-10</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>181</prism:startingPage>
		<prism:doi>10.3390/computers15030181</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/181</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/180">

	<title>Computers, Vol. 15, Pages 180: A Hybrid MIL Architecture for Multi-Class Classification of Bacterial Microscopic Images</title>
	<link>https://www.mdpi.com/2073-431X/15/3/180</link>
	<description>This paper addresses the problem of multi-class classification of bacterial microscopic images using a rigorous experimental protocol designed to prevent information leakage and improve performance. The dataset consists of 2034 images representing 33 taxa, organized by class. Data integrity checks confirmed the absence of corrupted or unreadable files. To formalize image characteristics and ensure quality control, indirect geometric and textural features were calculated, including minimum frame size, brightness statistics (mean and standard deviation), Shannon entropy, Laplace variance, and Sobel gradient energy. Quality checks revealed a small proportion of images with extreme brightness (2.5074%), while no samples with critically low sharpness according to the selected criteria were detected. Statistical analysis of interclass differences using the Kruskal&amp;amp;ndash;Wallis test with multiple comparison correction demonstrated the high discriminatory power of texture features, specifically gradient energy (&amp;amp;epsilon;2 = 0.819987) and Laplace variance (&amp;amp;epsilon;2 = 0.709904). Feature correlations were consistent with their physical interpretation, revealing a strong positive relationship between sharpness and gradient energy. Principal component analysis confirmed a strong structural pattern, with the first two components explaining 75.5766% of the total variance. For a unified comparison, classical machine learning, transfer learning, and modern deep architectures were evaluated within a single protocol.</description>
	<pubDate>2026-03-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 180: A Hybrid MIL Architecture for Multi-Class Classification of Bacterial Microscopic Images</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/180">doi: 10.3390/computers15030180</a></p>
	<p>Authors:
		Aisulu Ismailova
		Gulbanu Yessenbayeva
		Kuanysh Kadirkulov
		Raushan Moldasheva
		Elmira Eldarova
		Gulnaz Zhilkishbayeva
		Shynar Kodanova
		Shynar Yelezhanova
		Valentina Makhatova
		Alexander Nedzved
		</p>
	<p>This paper addresses the problem of multi-class classification of bacterial microscopic images using a rigorous experimental protocol designed to prevent information leakage and improve performance. The dataset consists of 2034 images representing 33 taxa, organized by class. Data integrity checks confirmed the absence of corrupted or unreadable files. To formalize image characteristics and ensure quality control, indirect geometric and textural features were calculated, including minimum frame size, brightness statistics (mean and standard deviation), Shannon entropy, Laplace variance, and Sobel gradient energy. Quality checks revealed a small proportion of images with extreme brightness (2.5074%), while no samples with critically low sharpness according to the selected criteria were detected. Statistical analysis of interclass differences using the Kruskal&amp;amp;ndash;Wallis test with multiple comparison correction demonstrated the high discriminatory power of texture features, specifically gradient energy (&amp;amp;epsilon;2 = 0.819987) and Laplace variance (&amp;amp;epsilon;2 = 0.709904). Feature correlations were consistent with their physical interpretation, revealing a strong positive relationship between sharpness and gradient energy. Principal component analysis confirmed a strong structural pattern, with the first two components explaining 75.5766% of the total variance. For a unified comparison, classical machine learning, transfer learning, and modern deep architectures were evaluated within a single protocol.</p>
	]]></content:encoded>

	<dc:title>A Hybrid MIL Architecture for Multi-Class Classification of Bacterial Microscopic Images</dc:title>
			<dc:creator>Aisulu Ismailova</dc:creator>
			<dc:creator>Gulbanu Yessenbayeva</dc:creator>
			<dc:creator>Kuanysh Kadirkulov</dc:creator>
			<dc:creator>Raushan Moldasheva</dc:creator>
			<dc:creator>Elmira Eldarova</dc:creator>
			<dc:creator>Gulnaz Zhilkishbayeva</dc:creator>
			<dc:creator>Shynar Kodanova</dc:creator>
			<dc:creator>Shynar Yelezhanova</dc:creator>
			<dc:creator>Valentina Makhatova</dc:creator>
			<dc:creator>Alexander Nedzved</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030180</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-10</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-10</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>180</prism:startingPage>
		<prism:doi>10.3390/computers15030180</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/180</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/179">

	<title>Computers, Vol. 15, Pages 179: A Computational Algorithm for Optimal Resource Allocation in Nonlinear Multi-Module Systems with Bilateral Constraints</title>
	<link>https://www.mdpi.com/2073-431X/15/3/179</link>
	<description>This study addresses the problem of optimal resource allocation in nonlinear multi-module dynamic systems arising in complex computational and techno-economic processes, where numerical stability and strict enforcement of structural constraints are critical. The objective is to develop a computationally efficient optimal control algorithm capable of handling bilateral control constraints and external balance conditions without resorting to large-scale nonlinear programming or boundary-value shooting. The proposed method is based on a modified Lagrangian formulation, in which bilateral Karush&amp;amp;ndash;Kuhn&amp;amp;ndash;Tucker (KKT) conditions are analytically embedded into the optimality system. The resulting computational scheme consists of a coupled system of matrix and vector differential equations solved through a non-iterative backward&amp;amp;ndash;forward integration procedure. Numerical experiments conducted on a nonlinear model with Cobb&amp;amp;ndash;Douglas-type operators demonstrate the stable convergence of the trajectories toward a stationary regime, strict satisfaction of bilateral constraints, and consistent enforcement of balance relations throughout the planning horizon. Empirical scalability analysis indicates approximately cubic computational complexity with respect to the state dimension, while sensitivity tests confirm the numerical robustness across different integration tolerances and ODE solvers. These results demonstrate that the proposed structure-preserving framework provides a computationally stable and practically implementable approach to constrained optimal control in nonlinear multi-module systems.</description>
	<pubDate>2026-03-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 179: A Computational Algorithm for Optimal Resource Allocation in Nonlinear Multi-Module Systems with Bilateral Constraints</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/179">doi: 10.3390/computers15030179</a></p>
	<p>Authors:
		Kamshat Tussupova
		Gulbanu Mirzakhmedova
		Diana Rakhimova
		Zhansaya Duisenbekkyzy
		</p>
	<p>This study addresses the problem of optimal resource allocation in nonlinear multi-module dynamic systems arising in complex computational and techno-economic processes, where numerical stability and strict enforcement of structural constraints are critical. The objective is to develop a computationally efficient optimal control algorithm capable of handling bilateral control constraints and external balance conditions without resorting to large-scale nonlinear programming or boundary-value shooting. The proposed method is based on a modified Lagrangian formulation, in which bilateral Karush&amp;amp;ndash;Kuhn&amp;amp;ndash;Tucker (KKT) conditions are analytically embedded into the optimality system. The resulting computational scheme consists of a coupled system of matrix and vector differential equations solved through a non-iterative backward&amp;amp;ndash;forward integration procedure. Numerical experiments conducted on a nonlinear model with Cobb&amp;amp;ndash;Douglas-type operators demonstrate the stable convergence of the trajectories toward a stationary regime, strict satisfaction of bilateral constraints, and consistent enforcement of balance relations throughout the planning horizon. Empirical scalability analysis indicates approximately cubic computational complexity with respect to the state dimension, while sensitivity tests confirm the numerical robustness across different integration tolerances and ODE solvers. These results demonstrate that the proposed structure-preserving framework provides a computationally stable and practically implementable approach to constrained optimal control in nonlinear multi-module systems.</p>
	]]></content:encoded>

	<dc:title>A Computational Algorithm for Optimal Resource Allocation in Nonlinear Multi-Module Systems with Bilateral Constraints</dc:title>
			<dc:creator>Kamshat Tussupova</dc:creator>
			<dc:creator>Gulbanu Mirzakhmedova</dc:creator>
			<dc:creator>Diana Rakhimova</dc:creator>
			<dc:creator>Zhansaya Duisenbekkyzy</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030179</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-09</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-09</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>179</prism:startingPage>
		<prism:doi>10.3390/computers15030179</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/179</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/178">

	<title>Computers, Vol. 15, Pages 178: Grounded Knowledge Graph Extraction via LLMs: An Anchor-Constrained Framework with Provenance Tracking</title>
	<link>https://www.mdpi.com/2073-431X/15/3/178</link>
	<description>Knowledge graphs represent real-world facts as structured triplets and underpin a wide range of applications, including question answering, recommendation, and retrieval-augmented generation. Automatically extracting such triplets from unstructured text is essential for scalable knowledge base construction. Traditional extraction methods require task-specific training data and struggle to generalize across domains. Large language models (LLMs) offer an alternative through in-context learning, enabling flexible extraction without fine-tuning. However, LLMs frequently hallucinate&amp;amp;mdash;generating plausible triplets unsupported by the source text. The root cause is the lack of provenance: existing methods produce triplets without explicit links to their textual origins, making faithfulness unverifiable. This paper presents Anchor-Extraction-Verification-Supplement (AEVS), a framework that grounds every triplet element to the source text. AEVS operates in three stages: (1) anchor discovery identifies entities, relation phrases, and attribute values with precise positions, forming a constrained extraction vocabulary; (2) grounded extraction generates triplets linked to discovered anchors; and (3) restoration-based verification validates triplets through hierarchical matching, with a coverage-aware supplement ensuring comprehensive extraction. Experiments on WebNLG, REBEL, and Wiki-NRE demonstrate consistent improvements over both trained models and LLM-based baselines. Ablation studies confirm that anchor-based constraints are the primary mechanism for hallucination reduction. Dedicated analyses of anchor discovery quality, computational cost (2.83&amp;amp;ndash;4.28 LLM calls per sample), and hallucination rates (0.23&amp;amp;ndash;20.23% across model&amp;amp;ndash;dataset configurations) provide insights into the framework&amp;amp;rsquo;s practical applicability and limitations.</description>
	<pubDate>2026-03-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 178: Grounded Knowledge Graph Extraction via LLMs: An Anchor-Constrained Framework with Provenance Tracking</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/178">doi: 10.3390/computers15030178</a></p>
	<p>Authors:
		Yuzhao Yang
		Genlang Chen
		Binhua He
		Yan Zhao
		</p>
	<p>Knowledge graphs represent real-world facts as structured triplets and underpin a wide range of applications, including question answering, recommendation, and retrieval-augmented generation. Automatically extracting such triplets from unstructured text is essential for scalable knowledge base construction. Traditional extraction methods require task-specific training data and struggle to generalize across domains. Large language models (LLMs) offer an alternative through in-context learning, enabling flexible extraction without fine-tuning. However, LLMs frequently hallucinate&amp;amp;mdash;generating plausible triplets unsupported by the source text. The root cause is the lack of provenance: existing methods produce triplets without explicit links to their textual origins, making faithfulness unverifiable. This paper presents Anchor-Extraction-Verification-Supplement (AEVS), a framework that grounds every triplet element to the source text. AEVS operates in three stages: (1) anchor discovery identifies entities, relation phrases, and attribute values with precise positions, forming a constrained extraction vocabulary; (2) grounded extraction generates triplets linked to discovered anchors; and (3) restoration-based verification validates triplets through hierarchical matching, with a coverage-aware supplement ensuring comprehensive extraction. Experiments on WebNLG, REBEL, and Wiki-NRE demonstrate consistent improvements over both trained models and LLM-based baselines. Ablation studies confirm that anchor-based constraints are the primary mechanism for hallucination reduction. Dedicated analyses of anchor discovery quality, computational cost (2.83&amp;amp;ndash;4.28 LLM calls per sample), and hallucination rates (0.23&amp;amp;ndash;20.23% across model&amp;amp;ndash;dataset configurations) provide insights into the framework&amp;amp;rsquo;s practical applicability and limitations.</p>
	]]></content:encoded>

	<dc:title>Grounded Knowledge Graph Extraction via LLMs: An Anchor-Constrained Framework with Provenance Tracking</dc:title>
			<dc:creator>Yuzhao Yang</dc:creator>
			<dc:creator>Genlang Chen</dc:creator>
			<dc:creator>Binhua He</dc:creator>
			<dc:creator>Yan Zhao</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030178</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-09</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-09</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>178</prism:startingPage>
		<prism:doi>10.3390/computers15030178</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/178</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/177">

	<title>Computers, Vol. 15, Pages 177: ExamQ-Gen: Instructor-in-the-Loop Generation of Self-Contained Exam Questions from Course Materials and Decision-Support Grading</title>
	<link>https://www.mdpi.com/2073-431X/15/3/177</link>
	<description>Reliable evaluation of large language models (LLMs) for educational use requires benchmarks that reflect exam constraints, instructor grading practices, and the operational consequences of thresholded decisions. This paper introduces ExamQ-Gen, an instructor-in-the-loop benchmark that couples two tasks: (i) an LLM answering university-style exam questions and (ii) decision-support grading aligned with an instructor reference. Automatic grading is used for triage and feedback; in practice, ExamQ-Gen supports instructor-led exam authoring and provides grading recommendations, while the instructor issues the final grade and pass/fail decision. ExamQ-Gen is constructed from the course content by using an LLM to generate exam-style questions directly from the lecture materials, producing a course-derived question set suitable for controlled experimentation. The benchmark then instantiates contrasting exam conditions, including instructor-authored (HUMAN) versus pipeline-generated (PIPELINE) artifacts, to evaluate robustness under distribution shifts that can occur when exam questions and answers are produced through different generation workflows. Using two LLM &amp;amp;ldquo;students&amp;amp;rdquo; (Llama3-8B-Instruct and Mistral-7B-Instruct) and an LLM-based grader, we compare automatic grading against an instructor reference on a 1&amp;amp;ndash;10 score scale and at the decision level induced by the operational pass policy (pass if score &amp;amp;ge; 9). Accordingly, our conclusions are conditioned on the two evaluated student models. Score-level agreement is strong under HUMAN conditions but degrades substantially under PIPELINE conditions, indicating condition-dependent stability. At the pass threshold, decision errors are highly asymmetric, with false fails dominating false passes, meaning that conservative grading may appear safe while producing credit denial. A severity-focused analysis isolates a high-stakes failure mode&amp;amp;mdash;denial of instructor-perfect answers&amp;amp;mdash;and shows that, in the most affected PIPELINE condition, the perfect-pass miss rate reaches 0.926 (50/54), consistent with systematic conservatism rather than borderline noise. Overall, the results highlight that aggregate score agreement and accuracy are insufficient for instructor-controlled exam deployment and motivate reporting practices that combine disaggregated score agreement, threshold-based error asymmetry with uncertainty, and severity-aware diagnostics under exam-relevant condition shifts.</description>
	<pubDate>2026-03-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 177: ExamQ-Gen: Instructor-in-the-Loop Generation of Self-Contained Exam Questions from Course Materials and Decision-Support Grading</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/177">doi: 10.3390/computers15030177</a></p>
	<p>Authors:
		Catalin Anghel
		Emilia Pecheanu
		Andreea Alexandra Anghel
		Marian Viorel Craciun
		Adina Cocu
		</p>
	<p>Reliable evaluation of large language models (LLMs) for educational use requires benchmarks that reflect exam constraints, instructor grading practices, and the operational consequences of thresholded decisions. This paper introduces ExamQ-Gen, an instructor-in-the-loop benchmark that couples two tasks: (i) an LLM answering university-style exam questions and (ii) decision-support grading aligned with an instructor reference. Automatic grading is used for triage and feedback; in practice, ExamQ-Gen supports instructor-led exam authoring and provides grading recommendations, while the instructor issues the final grade and pass/fail decision. ExamQ-Gen is constructed from the course content by using an LLM to generate exam-style questions directly from the lecture materials, producing a course-derived question set suitable for controlled experimentation. The benchmark then instantiates contrasting exam conditions, including instructor-authored (HUMAN) versus pipeline-generated (PIPELINE) artifacts, to evaluate robustness under distribution shifts that can occur when exam questions and answers are produced through different generation workflows. Using two LLM &amp;amp;ldquo;students&amp;amp;rdquo; (Llama3-8B-Instruct and Mistral-7B-Instruct) and an LLM-based grader, we compare automatic grading against an instructor reference on a 1&amp;amp;ndash;10 score scale and at the decision level induced by the operational pass policy (pass if score &amp;amp;ge; 9). Accordingly, our conclusions are conditioned on the two evaluated student models. Score-level agreement is strong under HUMAN conditions but degrades substantially under PIPELINE conditions, indicating condition-dependent stability. At the pass threshold, decision errors are highly asymmetric, with false fails dominating false passes, meaning that conservative grading may appear safe while producing credit denial. A severity-focused analysis isolates a high-stakes failure mode&amp;amp;mdash;denial of instructor-perfect answers&amp;amp;mdash;and shows that, in the most affected PIPELINE condition, the perfect-pass miss rate reaches 0.926 (50/54), consistent with systematic conservatism rather than borderline noise. Overall, the results highlight that aggregate score agreement and accuracy are insufficient for instructor-controlled exam deployment and motivate reporting practices that combine disaggregated score agreement, threshold-based error asymmetry with uncertainty, and severity-aware diagnostics under exam-relevant condition shifts.</p>
	]]></content:encoded>

	<dc:title>ExamQ-Gen: Instructor-in-the-Loop Generation of Self-Contained Exam Questions from Course Materials and Decision-Support Grading</dc:title>
			<dc:creator>Catalin Anghel</dc:creator>
			<dc:creator>Emilia Pecheanu</dc:creator>
			<dc:creator>Andreea Alexandra Anghel</dc:creator>
			<dc:creator>Marian Viorel Craciun</dc:creator>
			<dc:creator>Adina Cocu</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030177</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-09</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-09</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>177</prism:startingPage>
		<prism:doi>10.3390/computers15030177</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/177</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/176">

	<title>Computers, Vol. 15, Pages 176: TrustGTN: A Social Network Trust Evaluation Method Based on Heterogeneous Graph Neural Network</title>
	<link>https://www.mdpi.com/2073-431X/15/3/176</link>
	<description>The rapid growth of social networks and online platforms has heightened the importance of trust evaluation in various applications, including e-commerce, social networking, online collaboration, and mobile crowdsourcing. Traditional trust evaluation methods often rely on handcrafted features and simple models, which fail to fully capture the implicit patterns within the complex, heterogeneous structures of social networks. To address this issue, we propose TrustGTN, a novel method based on Heterogeneous Graph Neural Networks (HGNNs). It incorporates a soft selection mechanism that dynamically adjusts the training matrix weights. This enables it to capture the evolving structural and semantic patterns of the graph. The model can automatically learn important trust chains without the need to manually set their lengths. Experimental results show that TrustGTN outperforms existing trust evaluation methods on public datasets, demonstrating its advantages in handling heterogeneous graph data.</description>
	<pubDate>2026-03-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 176: TrustGTN: A Social Network Trust Evaluation Method Based on Heterogeneous Graph Neural Network</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/176">doi: 10.3390/computers15030176</a></p>
	<p>Authors:
		Xiao Liu
		Zai Yang
		Jining Chen
		Gaoxiang Li
		</p>
	<p>The rapid growth of social networks and online platforms has heightened the importance of trust evaluation in various applications, including e-commerce, social networking, online collaboration, and mobile crowdsourcing. Traditional trust evaluation methods often rely on handcrafted features and simple models, which fail to fully capture the implicit patterns within the complex, heterogeneous structures of social networks. To address this issue, we propose TrustGTN, a novel method based on Heterogeneous Graph Neural Networks (HGNNs). It incorporates a soft selection mechanism that dynamically adjusts the training matrix weights. This enables it to capture the evolving structural and semantic patterns of the graph. The model can automatically learn important trust chains without the need to manually set their lengths. Experimental results show that TrustGTN outperforms existing trust evaluation methods on public datasets, demonstrating its advantages in handling heterogeneous graph data.</p>
	]]></content:encoded>

	<dc:title>TrustGTN: A Social Network Trust Evaluation Method Based on Heterogeneous Graph Neural Network</dc:title>
			<dc:creator>Xiao Liu</dc:creator>
			<dc:creator>Zai Yang</dc:creator>
			<dc:creator>Jining Chen</dc:creator>
			<dc:creator>Gaoxiang Li</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030176</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-09</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-09</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>176</prism:startingPage>
		<prism:doi>10.3390/computers15030176</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/176</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/175">

	<title>Computers, Vol. 15, Pages 175: Reducing Teachers&amp;rsquo; Stress Through a Virtual Reality Game: A Feasibility Study of the XRSkills Game</title>
	<link>https://www.mdpi.com/2073-431X/15/3/175</link>
	<description>Teaching is widely recognized as a highly stressful profession, and recent educational changes have further increased the pressure on teachers to manage demanding classroom situations while adapting to new technologies. To address this challenge, the present study examines the feasibility and user acceptance of XRSkills, a virtual reality serious game designed to strengthen teachers&amp;amp;rsquo; coping and problem-solving strategies through realistic school-based scenarios. A feasibility evaluation was conducted with teachers from all school grades and students from multiple European countries, combining a standardized usability measure with open-ended feedback on the game experience. Overall results indicate that XRSkills achieved a good level of usability and was generally perceived as engaging and relevant, particularly for in-service teachers. Participants appreciated the game format and learning approach, while also reporting areas for improvement such as clearer guidance, richer content, and smoother technical performance. These findings support the potential of virtual reality serious games as a practical and scalable training pathway to help teachers rehearse responses to stressors in a safe environment, while also fostering confidence in using immersive technologies for professional development.</description>
	<pubDate>2026-03-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 175: Reducing Teachers&amp;rsquo; Stress Through a Virtual Reality Game: A Feasibility Study of the XRSkills Game</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/175">doi: 10.3390/computers15030175</a></p>
	<p>Authors:
		Ambra Gentile
		Marianna Alesi
		Sussi Mikaelsson
		Carlos Vaz de Carvalho
		</p>
	<p>Teaching is widely recognized as a highly stressful profession, and recent educational changes have further increased the pressure on teachers to manage demanding classroom situations while adapting to new technologies. To address this challenge, the present study examines the feasibility and user acceptance of XRSkills, a virtual reality serious game designed to strengthen teachers&amp;amp;rsquo; coping and problem-solving strategies through realistic school-based scenarios. A feasibility evaluation was conducted with teachers from all school grades and students from multiple European countries, combining a standardized usability measure with open-ended feedback on the game experience. Overall results indicate that XRSkills achieved a good level of usability and was generally perceived as engaging and relevant, particularly for in-service teachers. Participants appreciated the game format and learning approach, while also reporting areas for improvement such as clearer guidance, richer content, and smoother technical performance. These findings support the potential of virtual reality serious games as a practical and scalable training pathway to help teachers rehearse responses to stressors in a safe environment, while also fostering confidence in using immersive technologies for professional development.</p>
	]]></content:encoded>

	<dc:title>Reducing Teachers&amp;amp;rsquo; Stress Through a Virtual Reality Game: A Feasibility Study of the XRSkills Game</dc:title>
			<dc:creator>Ambra Gentile</dc:creator>
			<dc:creator>Marianna Alesi</dc:creator>
			<dc:creator>Sussi Mikaelsson</dc:creator>
			<dc:creator>Carlos Vaz de Carvalho</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030175</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-08</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-08</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>175</prism:startingPage>
		<prism:doi>10.3390/computers15030175</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/175</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/174">

	<title>Computers, Vol. 15, Pages 174: A Lightweight Cascade-Based Farmework for Real-Time Zero-Day Attack Detection</title>
	<link>https://www.mdpi.com/2073-431X/15/3/174</link>
	<description>Zero-day intrusion detection is still a difficult task because of the difference between high laboratory precision and real-time deployability under strict operational constraints. This paper proposes a lightweight two-stage cascade architecture that is specifically designed for CPU-only environments and strict zero-day evaluation. The proposed architecture only uses statistical and flow-level metadata attributes, which are independent of payload analysis, to ensure compatibility with encrypted traffic. The first stage of the proposed architecture is precision oriented to detect potentially malicious traffic with a low decision threshold, and the second stage is precision oriented to enhance classification and remove false positives. To avoid optimistic bias, a strict attack-type separation protocol is employed, where testing attack types are strictly prohibited from training. The proposed method is tested on three benchmark datasets: CSIC 2012 (HTTP level), UNSW-NB15 (intra-domain), and CSE-CIC-IDS2018 (cross-domain). The experimental results show the excellent intra-domain zero-day detection capability (up to 94.81% accuracy with 0.50% FPR), controllable performance degradation in the cross-domain setting (80.53% accuracy with near-zero FPR), and extremely low FP rates on all datasets. The system provides microsecond-level inference latency (0.002&amp;amp;ndash;0.006 ms), a throughput of up to 470,000 requests per second, and memory usage below 6.2 MB without GPU support. These results confirm the significance of architectural optimization and thorough evaluation in building efficient zero-day detection systems.</description>
	<pubDate>2026-03-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 174: A Lightweight Cascade-Based Farmework for Real-Time Zero-Day Attack Detection</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/174">doi: 10.3390/computers15030174</a></p>
	<p>Authors:
		Alpamis Kutlimuratov
		Furkat Rakhmatov
		Jamshid Khamzaev
		Islambek Saymanov
		Piratdin Allayarov
		Gamzatdin Bekbaev
		Shavkat Otamurodov
		Fazliddin Makhmudov
		</p>
	<p>Zero-day intrusion detection is still a difficult task because of the difference between high laboratory precision and real-time deployability under strict operational constraints. This paper proposes a lightweight two-stage cascade architecture that is specifically designed for CPU-only environments and strict zero-day evaluation. The proposed architecture only uses statistical and flow-level metadata attributes, which are independent of payload analysis, to ensure compatibility with encrypted traffic. The first stage of the proposed architecture is precision oriented to detect potentially malicious traffic with a low decision threshold, and the second stage is precision oriented to enhance classification and remove false positives. To avoid optimistic bias, a strict attack-type separation protocol is employed, where testing attack types are strictly prohibited from training. The proposed method is tested on three benchmark datasets: CSIC 2012 (HTTP level), UNSW-NB15 (intra-domain), and CSE-CIC-IDS2018 (cross-domain). The experimental results show the excellent intra-domain zero-day detection capability (up to 94.81% accuracy with 0.50% FPR), controllable performance degradation in the cross-domain setting (80.53% accuracy with near-zero FPR), and extremely low FP rates on all datasets. The system provides microsecond-level inference latency (0.002&amp;amp;ndash;0.006 ms), a throughput of up to 470,000 requests per second, and memory usage below 6.2 MB without GPU support. These results confirm the significance of architectural optimization and thorough evaluation in building efficient zero-day detection systems.</p>
	]]></content:encoded>

	<dc:title>A Lightweight Cascade-Based Farmework for Real-Time Zero-Day Attack Detection</dc:title>
			<dc:creator>Alpamis Kutlimuratov</dc:creator>
			<dc:creator>Furkat Rakhmatov</dc:creator>
			<dc:creator>Jamshid Khamzaev</dc:creator>
			<dc:creator>Islambek Saymanov</dc:creator>
			<dc:creator>Piratdin Allayarov</dc:creator>
			<dc:creator>Gamzatdin Bekbaev</dc:creator>
			<dc:creator>Shavkat Otamurodov</dc:creator>
			<dc:creator>Fazliddin Makhmudov</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030174</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-08</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-08</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>174</prism:startingPage>
		<prism:doi>10.3390/computers15030174</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/174</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/172">

	<title>Computers, Vol. 15, Pages 172: Adaptive Particle Filter-Neural Network Fusion for Cooperative Localization of Multi-UAV Systems in GNSS-Denied Indoor Environments</title>
	<link>https://www.mdpi.com/2073-431X/15/3/172</link>
	<description>Accurate autonomous navigation of unmanned aerial vehicles (UAVs) in complex indoor environments where satellite signals are denied remains a critical challenge. Conventional state estimation methods, such as particle filters, often suffer from particle degeneracy and high computational costs, limiting their robustness and real-time applicability. Here, we introduce an adaptive particle filter-neural network (PF-NN) fusion framework that achieves high-fidelity cooperative localization for multi-UAV systems. Our approach integrates a lightweight neural network that optimizes particle weight allocation by learning from motion consistency, thereby mitigating sample impoverishment. This is coupled with an adaptive resampling strategy that dynamically adjusts the particle population based on the effective sample size, balancing computational load with estimation accuracy. By fusing ultra-wideband (UWB) inter-vehicle ranging with visual landmark observations, the system leverages both global and local constraints to achieve robust state estimation. In simulations involving six UAVs in a complex indoor setting, our algorithm demonstrated superior performance, achieving an average root-mean-square error (RMSE) of 0.437 m. This work provides a robust and efficient solution for multi-UAV cooperative localization, paving the way for reliable autonomous operations in GNSS-denied scenarios such as search-and-rescue and industrial inspection.</description>
	<pubDate>2026-03-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 172: Adaptive Particle Filter-Neural Network Fusion for Cooperative Localization of Multi-UAV Systems in GNSS-Denied Indoor Environments</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/172">doi: 10.3390/computers15030172</a></p>
	<p>Authors:
		Zhongyi Wang
		Hao Wang
		Shuzhi Liu
		</p>
	<p>Accurate autonomous navigation of unmanned aerial vehicles (UAVs) in complex indoor environments where satellite signals are denied remains a critical challenge. Conventional state estimation methods, such as particle filters, often suffer from particle degeneracy and high computational costs, limiting their robustness and real-time applicability. Here, we introduce an adaptive particle filter-neural network (PF-NN) fusion framework that achieves high-fidelity cooperative localization for multi-UAV systems. Our approach integrates a lightweight neural network that optimizes particle weight allocation by learning from motion consistency, thereby mitigating sample impoverishment. This is coupled with an adaptive resampling strategy that dynamically adjusts the particle population based on the effective sample size, balancing computational load with estimation accuracy. By fusing ultra-wideband (UWB) inter-vehicle ranging with visual landmark observations, the system leverages both global and local constraints to achieve robust state estimation. In simulations involving six UAVs in a complex indoor setting, our algorithm demonstrated superior performance, achieving an average root-mean-square error (RMSE) of 0.437 m. This work provides a robust and efficient solution for multi-UAV cooperative localization, paving the way for reliable autonomous operations in GNSS-denied scenarios such as search-and-rescue and industrial inspection.</p>
	]]></content:encoded>

	<dc:title>Adaptive Particle Filter-Neural Network Fusion for Cooperative Localization of Multi-UAV Systems in GNSS-Denied Indoor Environments</dc:title>
			<dc:creator>Zhongyi Wang</dc:creator>
			<dc:creator>Hao Wang</dc:creator>
			<dc:creator>Shuzhi Liu</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030172</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-06</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-06</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>172</prism:startingPage>
		<prism:doi>10.3390/computers15030172</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/172</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/173">

	<title>Computers, Vol. 15, Pages 173: Adoption of AI in Higher Education: Engineering Faculty Perceptions of Preparation for Industry 4.0</title>
	<link>https://www.mdpi.com/2073-431X/15/3/173</link>
	<description>Artificial intelligence (AI) has established itself as a key technology in the context of Industry 4.0, with direct implications for university education, especially in engineering degrees. This study analyses the degree of adoption and the main educational uses of AI-based tools in higher education, as well as teachers&amp;amp;rsquo; perceptions of their contribution to preparing students for the professional challenges associated with Industry 4.0. A qualitative descriptive-interpretative design was used, involving semi-structured interviews with 32 engineering teachers at the University of Seville. The results show an incipient and uneven adoption, focused mainly on instrumental uses to support planning and material development, with still limited integration in assessment and learning personalisation. Despite this, teachers perceive AI as a resource with the potential to promote the development of digital skills and improve employability, although they emphasise the need for specific teacher training and institutional support for deeper and more coherent pedagogical integration.</description>
	<pubDate>2026-03-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 173: Adoption of AI in Higher Education: Engineering Faculty Perceptions of Preparation for Industry 4.0</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/173">doi: 10.3390/computers15030173</a></p>
	<p>Authors:
		José Fernández Cerero
		José María Fernández Batanero
		Daniel Fernández Cerero
		Marta Montenegro Rueda
		</p>
	<p>Artificial intelligence (AI) has established itself as a key technology in the context of Industry 4.0, with direct implications for university education, especially in engineering degrees. This study analyses the degree of adoption and the main educational uses of AI-based tools in higher education, as well as teachers&amp;amp;rsquo; perceptions of their contribution to preparing students for the professional challenges associated with Industry 4.0. A qualitative descriptive-interpretative design was used, involving semi-structured interviews with 32 engineering teachers at the University of Seville. The results show an incipient and uneven adoption, focused mainly on instrumental uses to support planning and material development, with still limited integration in assessment and learning personalisation. Despite this, teachers perceive AI as a resource with the potential to promote the development of digital skills and improve employability, although they emphasise the need for specific teacher training and institutional support for deeper and more coherent pedagogical integration.</p>
	]]></content:encoded>

	<dc:title>Adoption of AI in Higher Education: Engineering Faculty Perceptions of Preparation for Industry 4.0</dc:title>
			<dc:creator>José Fernández Cerero</dc:creator>
			<dc:creator>José María Fernández Batanero</dc:creator>
			<dc:creator>Daniel Fernández Cerero</dc:creator>
			<dc:creator>Marta Montenegro Rueda</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030173</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-06</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-06</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>173</prism:startingPage>
		<prism:doi>10.3390/computers15030173</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/173</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/171">

	<title>Computers, Vol. 15, Pages 171: Weed Detection in Challenging Field Conditions: A Semi-Supervised Framework for Overcoming Shadow Bias and Data Scarcity</title>
	<link>https://www.mdpi.com/2073-431X/15/3/171</link>
	<description>The automated management of invasive weeds is critical for sustainable agriculture, yet the performance of deep learning models in real-world fields is often compromised by two factors: challenging environmental conditions and the high cost of data annotation. This study tackles both issues through a diagnostic-driven, semi-supervised framework. Using a unique dataset of approximately 975 labelled and 10,000 unlabelled images of Guinea Grass in sugarcane, we first establish strong supervised baselines for classification (ResNet) and detection (YOLO, RF-DETR), achieving F1 scores up to 0.90 and mAP50 scores exceeding 0.82. Crucially, this foundational analysis, aided by interpretability tools, uncovered a pervasive &amp;amp;ldquo;shadow bias,&amp;amp;rdquo; where models learned to misidentify shadows as vegetation. This diagnostic insight motivated our primary contribution: a semi-supervised pipeline that leverages unlabelled data to enhance model robustness. By training models on a more diverse set of visual information through pseudo-labelling, this framework not only helps mitigate the shadow bias but also provides a tangible boost in recall, a critical metric for minimising weed escapes in automated spraying systems. To validate our methodology, we demonstrate its effectiveness in a low-data regime on a public crop&amp;amp;ndash;weed benchmark. Our work provides a clear and field-tested framework for developing, diagnosing, and improving robust computer vision systems for the complex realities of precision agriculture.</description>
	<pubDate>2026-03-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 171: Weed Detection in Challenging Field Conditions: A Semi-Supervised Framework for Overcoming Shadow Bias and Data Scarcity</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/171">doi: 10.3390/computers15030171</a></p>
	<p>Authors:
		Alzayat Saleh
		Shunsuke Hatano
		Mostafa Rahimi Azghadi
		</p>
	<p>The automated management of invasive weeds is critical for sustainable agriculture, yet the performance of deep learning models in real-world fields is often compromised by two factors: challenging environmental conditions and the high cost of data annotation. This study tackles both issues through a diagnostic-driven, semi-supervised framework. Using a unique dataset of approximately 975 labelled and 10,000 unlabelled images of Guinea Grass in sugarcane, we first establish strong supervised baselines for classification (ResNet) and detection (YOLO, RF-DETR), achieving F1 scores up to 0.90 and mAP50 scores exceeding 0.82. Crucially, this foundational analysis, aided by interpretability tools, uncovered a pervasive &amp;amp;ldquo;shadow bias,&amp;amp;rdquo; where models learned to misidentify shadows as vegetation. This diagnostic insight motivated our primary contribution: a semi-supervised pipeline that leverages unlabelled data to enhance model robustness. By training models on a more diverse set of visual information through pseudo-labelling, this framework not only helps mitigate the shadow bias but also provides a tangible boost in recall, a critical metric for minimising weed escapes in automated spraying systems. To validate our methodology, we demonstrate its effectiveness in a low-data regime on a public crop&amp;amp;ndash;weed benchmark. Our work provides a clear and field-tested framework for developing, diagnosing, and improving robust computer vision systems for the complex realities of precision agriculture.</p>
	]]></content:encoded>

	<dc:title>Weed Detection in Challenging Field Conditions: A Semi-Supervised Framework for Overcoming Shadow Bias and Data Scarcity</dc:title>
			<dc:creator>Alzayat Saleh</dc:creator>
			<dc:creator>Shunsuke Hatano</dc:creator>
			<dc:creator>Mostafa Rahimi Azghadi</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030171</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-06</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-06</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>171</prism:startingPage>
		<prism:doi>10.3390/computers15030171</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/171</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/170">

	<title>Computers, Vol. 15, Pages 170: The Seed Optimization Method for Fuzz Testing Based on Neural Network-Guided Genetic Algorithm</title>
	<link>https://www.mdpi.com/2073-431X/15/3/170</link>
	<description>To address the issues of low initial seed efficiency and a large number of ineffective mutations, this paper proposes an innovative fuzz testing seed optimization method combining neural networks and genetic algorithms. Traditional fuzz testing seed generation typically relies on random selection and the number of covered paths. In contrast, our method significantly improves seed generation efficiency and coverage by incorporating neural network models and genetic algorithms. First, the AFL tool is used to generate seed coverage path data, which is then used to train the neural network model. This model is employed to construct a fitness function to assess the potential of each seed. Subsequently, new seeds are generated through genetic algorithm crossover and mutation operations, with fitness evaluations based on the predictions of the neural network. Ultimately, the genetic algorithm optimizes the seeds through multiple generations, progressively improving coverage and vulnerability discovery capabilities. The experimental results demonstrate that the proposed method achieves significant improvements in fuzz testing performance, with path coverage increased by 28% compared to AFL and 23% compared to AFL++, and vulnerability discovery enhanced by over 200%.</description>
	<pubDate>2026-03-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 170: The Seed Optimization Method for Fuzz Testing Based on Neural Network-Guided Genetic Algorithm</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/170">doi: 10.3390/computers15030170</a></p>
	<p>Authors:
		Yongbo Jiang
		Zhitao Li
		Baofeng Duan
		Tao Feng
		</p>
	<p>To address the issues of low initial seed efficiency and a large number of ineffective mutations, this paper proposes an innovative fuzz testing seed optimization method combining neural networks and genetic algorithms. Traditional fuzz testing seed generation typically relies on random selection and the number of covered paths. In contrast, our method significantly improves seed generation efficiency and coverage by incorporating neural network models and genetic algorithms. First, the AFL tool is used to generate seed coverage path data, which is then used to train the neural network model. This model is employed to construct a fitness function to assess the potential of each seed. Subsequently, new seeds are generated through genetic algorithm crossover and mutation operations, with fitness evaluations based on the predictions of the neural network. Ultimately, the genetic algorithm optimizes the seeds through multiple generations, progressively improving coverage and vulnerability discovery capabilities. The experimental results demonstrate that the proposed method achieves significant improvements in fuzz testing performance, with path coverage increased by 28% compared to AFL and 23% compared to AFL++, and vulnerability discovery enhanced by over 200%.</p>
	]]></content:encoded>

	<dc:title>The Seed Optimization Method for Fuzz Testing Based on Neural Network-Guided Genetic Algorithm</dc:title>
			<dc:creator>Yongbo Jiang</dc:creator>
			<dc:creator>Zhitao Li</dc:creator>
			<dc:creator>Baofeng Duan</dc:creator>
			<dc:creator>Tao Feng</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030170</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-06</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-06</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>170</prism:startingPage>
		<prism:doi>10.3390/computers15030170</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/170</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/169">

	<title>Computers, Vol. 15, Pages 169: Intrusion Detection in Fog Computing: A Systematic Review of Security Advances and Challenges</title>
	<link>https://www.mdpi.com/2073-431X/15/3/169</link>
	<description>Fog computing extends cloud services to the network edge to support low-latency IoT applications. However, since fog environments are distributed and resource-constrained, intrusion detection systems must be adapted to defend against cyberattacks while keeping computation and communication overhead minimal. This systematic review presents research on intrusion detection systems (IDSs) for fog computing and synthesizes advances and research gaps. The study was guided by the &amp;amp;ldquo;Preferred-Reporting-Items for-Systematic-Reviews-and-Meta-Analyses&amp;amp;rdquo; (PRISMA) framework. Scopus and Web of Science were searched in the title field using TITLE/TI = (&amp;amp;ldquo;intrusion detection&amp;amp;rdquo; AND &amp;amp;ldquo;fog computing&amp;amp;rdquo;) for 2021&amp;amp;ndash;2025. The inclusion criteria were (i) 2021&amp;amp;ndash;2025 publications, (ii) journal or conference papers, (iii) English language, and (iv) open access availability; duplicates were removed programmatically using a DOI-first key with a title, year, and author alternative. The search identified 8560 records, of which 4905 were unique and included for qualitative grouping and bibliometric synthesis. Metadata (year, venue, authors, affiliations, keywords, and citations) were extracted and analyzed in Python to compute trends and collaboration. Intrusion detection systems in fog networks were categorized into traditional/signature-based, machine learning, deep learning, and hybrid/ensemble. Hybrid and DL approaches reported accuracy ranging from 95 to 99% on benchmark datasets (such as NSL-KDD, UNSW-NB15, CIC-IDS2017, KDD99, BoT-IoT). Notable bottlenecks included computational load relative to real-time latency on resource-constrained nodes, elevated false-positive rates for anomaly detection under concept drift, limited generalization to unseen attacks, privacy risks from centralizing data, and limited real-world validation. Bibliometric analyses highlighted the field&amp;amp;rsquo;s concentration in fast-turnaround, open-access journals such as IEEE Access and Sensors, as well as a small number of highly collaborative author clusters, alongside dominant terms such as &amp;amp;ldquo;learning,&amp;amp;rdquo; &amp;amp;ldquo;federated,&amp;amp;rdquo; &amp;amp;ldquo;ensemble,&amp;amp;rdquo; &amp;amp;ldquo;lightweight,&amp;amp;rdquo; and &amp;amp;ldquo;explainability.&amp;amp;rdquo; Emerging directions include federated and distributed training to preserve privacy, as well as online/continual learning adaptation. Future work should consist of real-world evaluation of fog networks, ultra-lightweight yet adaptive hybrid IDS, self-learning, and secure cooperative frameworks. These insights help researchers select appropriate IDS models for fog networks.</description>
	<pubDate>2026-03-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 169: Intrusion Detection in Fog Computing: A Systematic Review of Security Advances and Challenges</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/169">doi: 10.3390/computers15030169</a></p>
	<p>Authors:
		Nyashadzashe Tamuka
		Topside Ehleketani Mathonsi
		Thomas Otieno Olwal
		Solly Maswikaneng
		Tonderai Muchenje
		Tshimangadzo Mavin Tshilongamulenzhe
		</p>
	<p>Fog computing extends cloud services to the network edge to support low-latency IoT applications. However, since fog environments are distributed and resource-constrained, intrusion detection systems must be adapted to defend against cyberattacks while keeping computation and communication overhead minimal. This systematic review presents research on intrusion detection systems (IDSs) for fog computing and synthesizes advances and research gaps. The study was guided by the &amp;amp;ldquo;Preferred-Reporting-Items for-Systematic-Reviews-and-Meta-Analyses&amp;amp;rdquo; (PRISMA) framework. Scopus and Web of Science were searched in the title field using TITLE/TI = (&amp;amp;ldquo;intrusion detection&amp;amp;rdquo; AND &amp;amp;ldquo;fog computing&amp;amp;rdquo;) for 2021&amp;amp;ndash;2025. The inclusion criteria were (i) 2021&amp;amp;ndash;2025 publications, (ii) journal or conference papers, (iii) English language, and (iv) open access availability; duplicates were removed programmatically using a DOI-first key with a title, year, and author alternative. The search identified 8560 records, of which 4905 were unique and included for qualitative grouping and bibliometric synthesis. Metadata (year, venue, authors, affiliations, keywords, and citations) were extracted and analyzed in Python to compute trends and collaboration. Intrusion detection systems in fog networks were categorized into traditional/signature-based, machine learning, deep learning, and hybrid/ensemble. Hybrid and DL approaches reported accuracy ranging from 95 to 99% on benchmark datasets (such as NSL-KDD, UNSW-NB15, CIC-IDS2017, KDD99, BoT-IoT). Notable bottlenecks included computational load relative to real-time latency on resource-constrained nodes, elevated false-positive rates for anomaly detection under concept drift, limited generalization to unseen attacks, privacy risks from centralizing data, and limited real-world validation. Bibliometric analyses highlighted the field&amp;amp;rsquo;s concentration in fast-turnaround, open-access journals such as IEEE Access and Sensors, as well as a small number of highly collaborative author clusters, alongside dominant terms such as &amp;amp;ldquo;learning,&amp;amp;rdquo; &amp;amp;ldquo;federated,&amp;amp;rdquo; &amp;amp;ldquo;ensemble,&amp;amp;rdquo; &amp;amp;ldquo;lightweight,&amp;amp;rdquo; and &amp;amp;ldquo;explainability.&amp;amp;rdquo; Emerging directions include federated and distributed training to preserve privacy, as well as online/continual learning adaptation. Future work should consist of real-world evaluation of fog networks, ultra-lightweight yet adaptive hybrid IDS, self-learning, and secure cooperative frameworks. These insights help researchers select appropriate IDS models for fog networks.</p>
	]]></content:encoded>

	<dc:title>Intrusion Detection in Fog Computing: A Systematic Review of Security Advances and Challenges</dc:title>
			<dc:creator>Nyashadzashe Tamuka</dc:creator>
			<dc:creator>Topside Ehleketani Mathonsi</dc:creator>
			<dc:creator>Thomas Otieno Olwal</dc:creator>
			<dc:creator>Solly Maswikaneng</dc:creator>
			<dc:creator>Tonderai Muchenje</dc:creator>
			<dc:creator>Tshimangadzo Mavin Tshilongamulenzhe</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030169</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-05</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-05</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>169</prism:startingPage>
		<prism:doi>10.3390/computers15030169</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/169</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/168">

	<title>Computers, Vol. 15, Pages 168: VDTAR-Net: A Cooperative Dual-Path Convolutional Neural Network&amp;ndash;Transformer Network for Robust Highlight Reflection Segmentation</title>
	<link>https://www.mdpi.com/2073-431X/15/3/168</link>
	<description>In medical endoscopic imaging, specular reflection (SR) frequently leads to local overexposure, obscuring essential tissue information and complicating computer-aided diagnosis (CAD). Traditional convolutional neural networks (CNNs) face difficulties in modeling global illumination phenomena due to their biased local receptive fields and the inherent &amp;amp;ldquo;object assumption.&amp;amp;rdquo; Conversely, pure transformer models often lose high-frequency boundary details and incur substantial computational costs. To tackle these challenges, this paper introduces VDTAR-Net, a specialized framework adapted to address the unique optical characteristics of specular reflections. Building upon hybrid architectures, our contribution focuses on two core mechanisms: (1) a Cross-architecture Fusion Module (CFM) that enables deep, bidirectional information flow, allowing the Transformer&amp;amp;rsquo;s global illumination modeling to continuously correct the CNN&amp;amp;rsquo;s local texture biases; and (2) a Reflective-Aware Module (RAM), which explicitly integrates the physical prior of high-intensity saturation into the attention mechanism. This task-specific design significantly enhances sensitivity to boundary details in overexposed regions. We also created the first large-scale, expert-labeled cervical white light segmentation dataset, Cervix-WL-900. High-quality ground truth labels were generated through rigorous double-blind annotation and arbitration by senior experts. Experimental results show that VDTAR-Net achieves a Dice score of 92.56% and a mean Intersection over Union (mIoU) score of 87.31% on Cervix-WL-900, demonstrating superior performance compared to methods like U-Net, DeepLabv3+, SegFormer, and PSPNet. Ablation studies further confirm the substantial contributions of dual-path collaboration, CFM deep fusion, and RAM task-specific priors. VDTAR-Net provides a robust baseline for precise highlight segmentation, laying a foundation for subsequent image quality assessment, restoration, and feature decoupling in diagnostic models.</description>
	<pubDate>2026-03-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 168: VDTAR-Net: A Cooperative Dual-Path Convolutional Neural Network&amp;ndash;Transformer Network for Robust Highlight Reflection Segmentation</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/168">doi: 10.3390/computers15030168</a></p>
	<p>Authors:
		Qianlong Zhang
		Yue Zeng
		</p>
	<p>In medical endoscopic imaging, specular reflection (SR) frequently leads to local overexposure, obscuring essential tissue information and complicating computer-aided diagnosis (CAD). Traditional convolutional neural networks (CNNs) face difficulties in modeling global illumination phenomena due to their biased local receptive fields and the inherent &amp;amp;ldquo;object assumption.&amp;amp;rdquo; Conversely, pure transformer models often lose high-frequency boundary details and incur substantial computational costs. To tackle these challenges, this paper introduces VDTAR-Net, a specialized framework adapted to address the unique optical characteristics of specular reflections. Building upon hybrid architectures, our contribution focuses on two core mechanisms: (1) a Cross-architecture Fusion Module (CFM) that enables deep, bidirectional information flow, allowing the Transformer&amp;amp;rsquo;s global illumination modeling to continuously correct the CNN&amp;amp;rsquo;s local texture biases; and (2) a Reflective-Aware Module (RAM), which explicitly integrates the physical prior of high-intensity saturation into the attention mechanism. This task-specific design significantly enhances sensitivity to boundary details in overexposed regions. We also created the first large-scale, expert-labeled cervical white light segmentation dataset, Cervix-WL-900. High-quality ground truth labels were generated through rigorous double-blind annotation and arbitration by senior experts. Experimental results show that VDTAR-Net achieves a Dice score of 92.56% and a mean Intersection over Union (mIoU) score of 87.31% on Cervix-WL-900, demonstrating superior performance compared to methods like U-Net, DeepLabv3+, SegFormer, and PSPNet. Ablation studies further confirm the substantial contributions of dual-path collaboration, CFM deep fusion, and RAM task-specific priors. VDTAR-Net provides a robust baseline for precise highlight segmentation, laying a foundation for subsequent image quality assessment, restoration, and feature decoupling in diagnostic models.</p>
	]]></content:encoded>

	<dc:title>VDTAR-Net: A Cooperative Dual-Path Convolutional Neural Network&amp;amp;ndash;Transformer Network for Robust Highlight Reflection Segmentation</dc:title>
			<dc:creator>Qianlong Zhang</dc:creator>
			<dc:creator>Yue Zeng</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030168</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-04</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-04</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>168</prism:startingPage>
		<prism:doi>10.3390/computers15030168</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/168</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/167">

	<title>Computers, Vol. 15, Pages 167: The Influence of the Form of Digital Etalons on the Effectiveness of Associative Security</title>
	<link>https://www.mdpi.com/2073-431X/15/3/167</link>
	<description>Opportunities to improve the effectiveness of associative protection in scene analysis can be found in changing the configurations of digital etalons (reference patterns) and in the transition from a decimal to a hexadecimal system when encoding object names and their coordinates. The relevance of the research undertaken is determined by the need for a significant increase in the number of keys used and the advisability of further improvement of the security strength. Based on a preliminary analysis, a rule for selecting digital reference configurations has been formulated from the condition of uniform distribution of bit inclusions in the pseudorandom sequence (GAMMA) container when using the decimal and hexadecimal systems for encoding purposes. Algorithms for forming a complete and limited test list of permutations for experimental research purposes have been developed. Results of the computational experiment confirmed validity of the formulated rule. For the accepted configurations, estimates of the expected number of preserved bits of the etalon were obtained.</description>
	<pubDate>2026-03-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 167: The Influence of the Form of Digital Etalons on the Effectiveness of Associative Security</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/167">doi: 10.3390/computers15030167</a></p>
	<p>Authors:
		Vadim Raikhlin
		Ruslan Gibadullin
		Alexey Boyko
		</p>
	<p>Opportunities to improve the effectiveness of associative protection in scene analysis can be found in changing the configurations of digital etalons (reference patterns) and in the transition from a decimal to a hexadecimal system when encoding object names and their coordinates. The relevance of the research undertaken is determined by the need for a significant increase in the number of keys used and the advisability of further improvement of the security strength. Based on a preliminary analysis, a rule for selecting digital reference configurations has been formulated from the condition of uniform distribution of bit inclusions in the pseudorandom sequence (GAMMA) container when using the decimal and hexadecimal systems for encoding purposes. Algorithms for forming a complete and limited test list of permutations for experimental research purposes have been developed. Results of the computational experiment confirmed validity of the formulated rule. For the accepted configurations, estimates of the expected number of preserved bits of the etalon were obtained.</p>
	]]></content:encoded>

	<dc:title>The Influence of the Form of Digital Etalons on the Effectiveness of Associative Security</dc:title>
			<dc:creator>Vadim Raikhlin</dc:creator>
			<dc:creator>Ruslan Gibadullin</dc:creator>
			<dc:creator>Alexey Boyko</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030167</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-04</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-04</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>167</prism:startingPage>
		<prism:doi>10.3390/computers15030167</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/167</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/166">

	<title>Computers, Vol. 15, Pages 166: Non-Destructive Determination of Hass Avocado Harvest Maturity in Colombia Based on Low-Cost Bioimpedance Spectroscopy and Machine Learning</title>
	<link>https://www.mdpi.com/2073-431X/15/3/166</link>
	<description>The export of Hass avocado (Persea americana Mill.) from Colombia requires accurate determination of harvest maturity, currently assessed through destructive dry matter (DM) measurements that are wasteful and limited in throughput. The objective of the article is to propose a low-cost, non-destructive approach to determine the maturity of the Hass avocado crop based on machine learning techniques. The approach consists of a low-cost, non-invasive bioimpedance spectroscopy system operating in the 1&amp;amp;ndash;10 kHz range, featuring a custom Analog Front End (AFE) and a tetrapolar surface probe to mitigate skin contact resistance, which collects data for predictive models of avocado maturity. To evaluate the quality of the approach, a longitudinal field study (n = 100) was conducted in a commercial orchard in Cundinamarca, Colombia, tracking complex impedance features&amp;amp;mdash;Magnitude, Phase Angle, Resistance, and Reactance&amp;amp;mdash;of tagged fruits over 8 weeks across four measurement timepoints. The predictive performance of a classical chemometric model (PLS-DA), non-linear classifiers (SVM, Random Forest), and a temporal Deep Learning (LSTM) architecture was compared using a Stratified Group K-Fold Cross-Validation scheme to prevent data leakage across fruits from the same tree. The 4-electrode configuration successfully isolated mesocarp impedance, identifying the 5&amp;amp;ndash;7.2 kHz band as the most sensitive to physiological maturation. In turn, the LSTM model achieved a mean accuracy of 92.0% and an AUC of 0.94, outperforming the other models by 4.0% in mean accuracy. The results demonstrate that modeling the temporal trajectory of impedance, rather than single-point measurements, improves harvest maturity classification in Hass avocados, providing a scalable, low-cost alternative to destructive testing.</description>
	<pubDate>2026-03-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 166: Non-Destructive Determination of Hass Avocado Harvest Maturity in Colombia Based on Low-Cost Bioimpedance Spectroscopy and Machine Learning</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/166">doi: 10.3390/computers15030166</a></p>
	<p>Authors:
		Froylan Jimenez Sanchez
		Jose Aguilar
		Marta Tabares-Betancur
		</p>
	<p>The export of Hass avocado (Persea americana Mill.) from Colombia requires accurate determination of harvest maturity, currently assessed through destructive dry matter (DM) measurements that are wasteful and limited in throughput. The objective of the article is to propose a low-cost, non-destructive approach to determine the maturity of the Hass avocado crop based on machine learning techniques. The approach consists of a low-cost, non-invasive bioimpedance spectroscopy system operating in the 1&amp;amp;ndash;10 kHz range, featuring a custom Analog Front End (AFE) and a tetrapolar surface probe to mitigate skin contact resistance, which collects data for predictive models of avocado maturity. To evaluate the quality of the approach, a longitudinal field study (n = 100) was conducted in a commercial orchard in Cundinamarca, Colombia, tracking complex impedance features&amp;amp;mdash;Magnitude, Phase Angle, Resistance, and Reactance&amp;amp;mdash;of tagged fruits over 8 weeks across four measurement timepoints. The predictive performance of a classical chemometric model (PLS-DA), non-linear classifiers (SVM, Random Forest), and a temporal Deep Learning (LSTM) architecture was compared using a Stratified Group K-Fold Cross-Validation scheme to prevent data leakage across fruits from the same tree. The 4-electrode configuration successfully isolated mesocarp impedance, identifying the 5&amp;amp;ndash;7.2 kHz band as the most sensitive to physiological maturation. In turn, the LSTM model achieved a mean accuracy of 92.0% and an AUC of 0.94, outperforming the other models by 4.0% in mean accuracy. The results demonstrate that modeling the temporal trajectory of impedance, rather than single-point measurements, improves harvest maturity classification in Hass avocados, providing a scalable, low-cost alternative to destructive testing.</p>
	]]></content:encoded>

	<dc:title>Non-Destructive Determination of Hass Avocado Harvest Maturity in Colombia Based on Low-Cost Bioimpedance Spectroscopy and Machine Learning</dc:title>
			<dc:creator>Froylan Jimenez Sanchez</dc:creator>
			<dc:creator>Jose Aguilar</dc:creator>
			<dc:creator>Marta Tabares-Betancur</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030166</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-04</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-04</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>166</prism:startingPage>
		<prism:doi>10.3390/computers15030166</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/166</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/165">

	<title>Computers, Vol. 15, Pages 165: Development and Validation of the Computational Thinking Assessment Tool DACT</title>
	<link>https://www.mdpi.com/2073-431X/15/3/165</link>
	<description>Although computational thinking (CT) has attracted researchers&amp;amp;rsquo; and educators&amp;amp;rsquo; interest for the last 20 years, resulting in new educational approaches and reformation of curricula, more research work needs to be performed, especially in the field of CT assessment. Taking this need into consideration, this article describes the development of a new CT assessment tool. The DACT CT assessment tool is developed based on CT literature, taking into consideration six basic CT dimensions. Initially, 90 CT assessment tasks are created, which are examined and reformed through a pilot study. The main research consists of an extensive study (521 students), which has resulted in the construction of the DACT CT assessment tool through continuous monitoring of Cronbach&amp;amp;rsquo;s &amp;amp;alpha;, consisting of 36 final tasks. DACT is disengaged from programming, does not require a specific programming language as it uses its own micro-world, is cross-platform and can be administered online or in paper format mode, supported by an administering protocol. This article also discusses the validation process of the DACT and argues on several validation checks, such as face validity, criterion validity and concurrent validity. This work has the ambition to provide a new, useful CT assessment tool to the scientific community.</description>
	<pubDate>2026-03-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 165: Development and Validation of the Computational Thinking Assessment Tool DACT</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/165">doi: 10.3390/computers15030165</a></p>
	<p>Authors:
		Emmanouil Poulakis
		Panagiotis Politis
		Petros Roussos
		</p>
	<p>Although computational thinking (CT) has attracted researchers&amp;amp;rsquo; and educators&amp;amp;rsquo; interest for the last 20 years, resulting in new educational approaches and reformation of curricula, more research work needs to be performed, especially in the field of CT assessment. Taking this need into consideration, this article describes the development of a new CT assessment tool. The DACT CT assessment tool is developed based on CT literature, taking into consideration six basic CT dimensions. Initially, 90 CT assessment tasks are created, which are examined and reformed through a pilot study. The main research consists of an extensive study (521 students), which has resulted in the construction of the DACT CT assessment tool through continuous monitoring of Cronbach&amp;amp;rsquo;s &amp;amp;alpha;, consisting of 36 final tasks. DACT is disengaged from programming, does not require a specific programming language as it uses its own micro-world, is cross-platform and can be administered online or in paper format mode, supported by an administering protocol. This article also discusses the validation process of the DACT and argues on several validation checks, such as face validity, criterion validity and concurrent validity. This work has the ambition to provide a new, useful CT assessment tool to the scientific community.</p>
	]]></content:encoded>

	<dc:title>Development and Validation of the Computational Thinking Assessment Tool DACT</dc:title>
			<dc:creator>Emmanouil Poulakis</dc:creator>
			<dc:creator>Panagiotis Politis</dc:creator>
			<dc:creator>Petros Roussos</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030165</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-04</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-04</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>165</prism:startingPage>
		<prism:doi>10.3390/computers15030165</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/165</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/164">

	<title>Computers, Vol. 15, Pages 164: Machine Learning and Deep Learning for Dropout Prediction in Higher Education: A Review</title>
	<link>https://www.mdpi.com/2073-431X/15/3/164</link>
	<description>Student dropout in Higher Education remains a persistent challenge with significant academic, social and economic consequences. Predictive analytics using traditional Machine Learning and Deep Learning have been increasingly explored to support early identification of students at risk. This article presents a structured literature review of studies published between 2018 and 2025 that apply these techniques to predict dropout in Higher Education. Unlike previous reviews, we pay particular attention to model interpretability, practical deployment and ethical considerations when analysing data types, preprocessing strategies and modelling approaches. Results show that transparent traditional models, including Decision Trees, Logistic Regression, and ensemble methods such as Random Forest and Gradient Boosting remain dominant because they perform strongly on structured data and are easier to explain. Deep Learning approaches, although less prevalent, show promise for sequential and behavioural data but face challenges in data availability, explainability, and implementation complexity. Despite frequently high reported performance, most studies rely on single-institution datasets, limiting generalisability, and only a minority address fairness, bias, or real-world integration. This analysis concludes that we must transition from accuracy-focused evaluations to transparent, accountable and actionable predictive systems that facilitate data-driven and inclusive decision-making in Higher Education.</description>
	<pubDate>2026-03-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 164: Machine Learning and Deep Learning for Dropout Prediction in Higher Education: A Review</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/164">doi: 10.3390/computers15030164</a></p>
	<p>Authors:
		Beatriz Duro
		Anabela Gomes
		Fernanda Brito Correia
		Ana Rosa Borges
		Jorge Bernardino
		</p>
	<p>Student dropout in Higher Education remains a persistent challenge with significant academic, social and economic consequences. Predictive analytics using traditional Machine Learning and Deep Learning have been increasingly explored to support early identification of students at risk. This article presents a structured literature review of studies published between 2018 and 2025 that apply these techniques to predict dropout in Higher Education. Unlike previous reviews, we pay particular attention to model interpretability, practical deployment and ethical considerations when analysing data types, preprocessing strategies and modelling approaches. Results show that transparent traditional models, including Decision Trees, Logistic Regression, and ensemble methods such as Random Forest and Gradient Boosting remain dominant because they perform strongly on structured data and are easier to explain. Deep Learning approaches, although less prevalent, show promise for sequential and behavioural data but face challenges in data availability, explainability, and implementation complexity. Despite frequently high reported performance, most studies rely on single-institution datasets, limiting generalisability, and only a minority address fairness, bias, or real-world integration. This analysis concludes that we must transition from accuracy-focused evaluations to transparent, accountable and actionable predictive systems that facilitate data-driven and inclusive decision-making in Higher Education.</p>
	]]></content:encoded>

	<dc:title>Machine Learning and Deep Learning for Dropout Prediction in Higher Education: A Review</dc:title>
			<dc:creator>Beatriz Duro</dc:creator>
			<dc:creator>Anabela Gomes</dc:creator>
			<dc:creator>Fernanda Brito Correia</dc:creator>
			<dc:creator>Ana Rosa Borges</dc:creator>
			<dc:creator>Jorge Bernardino</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030164</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-04</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-04</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>164</prism:startingPage>
		<prism:doi>10.3390/computers15030164</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/164</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/163">

	<title>Computers, Vol. 15, Pages 163: Digitalization of Railway Traffic Dispatching Systems: From Legacy Infrastructure to a Software-Centric Platform</title>
	<link>https://www.mdpi.com/2073-431X/15/3/163</link>
	<description>Digitalization of railway traffic dispatching systems is a key step in the modernization of railway telecommunication infrastructure. This paper presents a case study of the migration from legacy analog technology to a software-centric dispatching platform that integrates digital signal processing, optical fiber transmission, and Internet Protocol (IP)-based network architectures, as implemented in the Serbian railway system. The modernization is performed through an iterative, incremental process: existing analog dispatcher equipment and established operating procedures are preserved, while digital dispatching centers, trackside communication nodes, and radio-dispatching services are introduced gradually. This staged evolution enables high-capacity, noise-resilient communication and seamless interconnection between the old and the new subsystems without disrupting railway operations. The adoption of software-based control and integrated digital signal processing provides modular scalability, real-time system supervision, automated diagnostics, and improved maintainability. One of critical services within the new architecture, the Centralized Call Record- and Message-Archiving System (CCRMAS), provides a centralized platform that captures, secures, and retrieves operational railway communication in real time for monitoring, post-incident analysis, and regulatory compliance. The resulting architecture, deployed within Serbian Railways, establishes a scalable and resilient foundation for future automation, interoperability, and integration within intelligent railway traffic-management environments. Thus, the paper extracts a generalizable hybrid migration architecture model and transferable design principles, supported by deployment artifacts and illustrated through migration scenarios, that can be applied to the modernization of other legacy-intensive railway networks.</description>
	<pubDate>2026-03-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 163: Digitalization of Railway Traffic Dispatching Systems: From Legacy Infrastructure to a Software-Centric Platform</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/163">doi: 10.3390/computers15030163</a></p>
	<p>Authors:
		Ivan Kokić
		Jovana Vuleta-Radoičić
		Iva Salom
		Goran Dimić
		Bratislav Planić
		Sandra Velimirović
		Slavica Boštjančič Rakas
		</p>
	<p>Digitalization of railway traffic dispatching systems is a key step in the modernization of railway telecommunication infrastructure. This paper presents a case study of the migration from legacy analog technology to a software-centric dispatching platform that integrates digital signal processing, optical fiber transmission, and Internet Protocol (IP)-based network architectures, as implemented in the Serbian railway system. The modernization is performed through an iterative, incremental process: existing analog dispatcher equipment and established operating procedures are preserved, while digital dispatching centers, trackside communication nodes, and radio-dispatching services are introduced gradually. This staged evolution enables high-capacity, noise-resilient communication and seamless interconnection between the old and the new subsystems without disrupting railway operations. The adoption of software-based control and integrated digital signal processing provides modular scalability, real-time system supervision, automated diagnostics, and improved maintainability. One of critical services within the new architecture, the Centralized Call Record- and Message-Archiving System (CCRMAS), provides a centralized platform that captures, secures, and retrieves operational railway communication in real time for monitoring, post-incident analysis, and regulatory compliance. The resulting architecture, deployed within Serbian Railways, establishes a scalable and resilient foundation for future automation, interoperability, and integration within intelligent railway traffic-management environments. Thus, the paper extracts a generalizable hybrid migration architecture model and transferable design principles, supported by deployment artifacts and illustrated through migration scenarios, that can be applied to the modernization of other legacy-intensive railway networks.</p>
	]]></content:encoded>

	<dc:title>Digitalization of Railway Traffic Dispatching Systems: From Legacy Infrastructure to a Software-Centric Platform</dc:title>
			<dc:creator>Ivan Kokić</dc:creator>
			<dc:creator>Jovana Vuleta-Radoičić</dc:creator>
			<dc:creator>Iva Salom</dc:creator>
			<dc:creator>Goran Dimić</dc:creator>
			<dc:creator>Bratislav Planić</dc:creator>
			<dc:creator>Sandra Velimirović</dc:creator>
			<dc:creator>Slavica Boštjančič Rakas</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030163</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-03</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-03</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>163</prism:startingPage>
		<prism:doi>10.3390/computers15030163</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/163</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/162">

	<title>Computers, Vol. 15, Pages 162: PIRE: Interoperable Platform for Electronic Records</title>
	<link>https://www.mdpi.com/2073-431X/15/3/162</link>
	<description>The interoperability of electronic health records in Colombia faces a critical gap between the regulatory mandates established by the Colombian regulatory framework and the actual technical capacity of healthcare institutions to implement them. This article presents PIRE (Electronic Records Interoperability Platform), an open-source architecture that demonstrates the viability of end-to-end FHIR systems in the Colombian context. The main objective was to develop a platform capable of integrating health data from biomedical devices into an FHIR server, preserving clinical semantics through LOINC terminologies. The methodology followed an iterative development approach, implementing a HAPI FHIR server on AWS, a normalization application in Flask, and clinical visualization modules aligned with the FHIR Core CO Implementation Guide. The Bioharness-3 device was used to capture metrics on heart rate, respiratory rate, activity, and posture. The platform achieved a data normalization latency of 104&amp;amp;ndash;438 ms per record and 100% semantic validation against the FHIR Core CO profiles, validating compliance with Colombian IHCE specifications. It is concluded that PIRE constitutes a reproducible reference model for healthcare institutions that wish to implement interoperability as a cost-effective solution.</description>
	<pubDate>2026-03-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 162: PIRE: Interoperable Platform for Electronic Records</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/162">doi: 10.3390/computers15030162</a></p>
	<p>Authors:
		Leonardo Juan Ramirez Lopez
		Norman Eduardo Jaimes Salazar
		Juan Esteban Barbosa Posada
		</p>
	<p>The interoperability of electronic health records in Colombia faces a critical gap between the regulatory mandates established by the Colombian regulatory framework and the actual technical capacity of healthcare institutions to implement them. This article presents PIRE (Electronic Records Interoperability Platform), an open-source architecture that demonstrates the viability of end-to-end FHIR systems in the Colombian context. The main objective was to develop a platform capable of integrating health data from biomedical devices into an FHIR server, preserving clinical semantics through LOINC terminologies. The methodology followed an iterative development approach, implementing a HAPI FHIR server on AWS, a normalization application in Flask, and clinical visualization modules aligned with the FHIR Core CO Implementation Guide. The Bioharness-3 device was used to capture metrics on heart rate, respiratory rate, activity, and posture. The platform achieved a data normalization latency of 104&amp;amp;ndash;438 ms per record and 100% semantic validation against the FHIR Core CO profiles, validating compliance with Colombian IHCE specifications. It is concluded that PIRE constitutes a reproducible reference model for healthcare institutions that wish to implement interoperability as a cost-effective solution.</p>
	]]></content:encoded>

	<dc:title>PIRE: Interoperable Platform for Electronic Records</dc:title>
			<dc:creator>Leonardo Juan Ramirez Lopez</dc:creator>
			<dc:creator>Norman Eduardo Jaimes Salazar</dc:creator>
			<dc:creator>Juan Esteban Barbosa Posada</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030162</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-03</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-03</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>162</prism:startingPage>
		<prism:doi>10.3390/computers15030162</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/162</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/161">

	<title>Computers, Vol. 15, Pages 161: From Patient Emotion Recognition to Provider Understanding: A Multimodal Data Mining Framework for Emotion-Aware Clinical Counseling Systems</title>
	<link>https://www.mdpi.com/2073-431X/15/3/161</link>
	<description>Computational analysis of therapeutic communication presents challenges in multi-label classification, severe class imbalance, and heterogeneous multimodal data integration. We introduce a bidirectional analytical framework addressing patient emotion recognition and provider behavior analysis. For patient-side analysis, we employ ClinicalBERT on human-annotated CounselChat (1482 interactions, 25 categories, imbalance 60:1), achieving a macro-F1 of 0.74 through class weighting and threshold optimization, representing a six-fold improvement over naive baselines and 6&amp;amp;ndash;13 point improvement over modern imbalance methods. For provider-side analysis, we process 330 YouTube therapy sessions through automated pipelines (speaker diarization, automatic speech recognition, temporal segmentation), yielding 14,086 annotated segments. Our architecture combines DeBERTa-v3-base with WavLM-base-plus through cross-modal attention mechanisms adapted from multimodal Transformer frameworks. On controlled human-annotated HOPE data (178 sessions, 12,500 utterances), the model achieves a macro-F1 of 0.91 with Cohen&amp;amp;rsquo;s kappa of 0.87, comparable to inter-rater reliability reported in psychotherapy process research. On YouTube data, a macro-F1 of 0.71 demonstrates feasibility while highlighting annotation quality impacts. Cross-dataset transfer and systematic attention analyses validate domain-specific effectiveness and interpretability.</description>
	<pubDate>2026-03-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 161: From Patient Emotion Recognition to Provider Understanding: A Multimodal Data Mining Framework for Emotion-Aware Clinical Counseling Systems</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/161">doi: 10.3390/computers15030161</a></p>
	<p>Authors:
		Saahithi Mallarapu
		Xinyan Liu
		Pegah Zargarian
		Seyyedeh Fatemeh Mottaghian
		Ramyashree Suresha
		Vasudha Jain
		Akram Bayat
		</p>
	<p>Computational analysis of therapeutic communication presents challenges in multi-label classification, severe class imbalance, and heterogeneous multimodal data integration. We introduce a bidirectional analytical framework addressing patient emotion recognition and provider behavior analysis. For patient-side analysis, we employ ClinicalBERT on human-annotated CounselChat (1482 interactions, 25 categories, imbalance 60:1), achieving a macro-F1 of 0.74 through class weighting and threshold optimization, representing a six-fold improvement over naive baselines and 6&amp;amp;ndash;13 point improvement over modern imbalance methods. For provider-side analysis, we process 330 YouTube therapy sessions through automated pipelines (speaker diarization, automatic speech recognition, temporal segmentation), yielding 14,086 annotated segments. Our architecture combines DeBERTa-v3-base with WavLM-base-plus through cross-modal attention mechanisms adapted from multimodal Transformer frameworks. On controlled human-annotated HOPE data (178 sessions, 12,500 utterances), the model achieves a macro-F1 of 0.91 with Cohen&amp;amp;rsquo;s kappa of 0.87, comparable to inter-rater reliability reported in psychotherapy process research. On YouTube data, a macro-F1 of 0.71 demonstrates feasibility while highlighting annotation quality impacts. Cross-dataset transfer and systematic attention analyses validate domain-specific effectiveness and interpretability.</p>
	]]></content:encoded>

	<dc:title>From Patient Emotion Recognition to Provider Understanding: A Multimodal Data Mining Framework for Emotion-Aware Clinical Counseling Systems</dc:title>
			<dc:creator>Saahithi Mallarapu</dc:creator>
			<dc:creator>Xinyan Liu</dc:creator>
			<dc:creator>Pegah Zargarian</dc:creator>
			<dc:creator>Seyyedeh Fatemeh Mottaghian</dc:creator>
			<dc:creator>Ramyashree Suresha</dc:creator>
			<dc:creator>Vasudha Jain</dc:creator>
			<dc:creator>Akram Bayat</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030161</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-03</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-03</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>161</prism:startingPage>
		<prism:doi>10.3390/computers15030161</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/161</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/160">

	<title>Computers, Vol. 15, Pages 160: Analysis of the Consensual Pupillary Reflex Using Blue LED Step Light and Automated Image Segmentation</title>
	<link>https://www.mdpi.com/2073-431X/15/3/160</link>
	<description>This study evaluates the dynamics of the human pupillary reflex in response to a stepped blue light stimulus (465 nm) in young adults residing at high altitude (3400 m above sea level). High-resolution video sequences of three participants were analyzed using four classical image segmentation techniques: K-Means, Otsu, fixed binary threshold, and multi-channel RGB threshold. Rather than proposing new algorithms, this work evaluates the technical feasibility and stability of computationally lightweight segmentation approaches under controlled lighting conditions and with low-cost hardware constraints. Among the methods evaluated, fixed binary thresholding showed stable temporal behavior and minimal computational complexity within the experimental setup. The results show a consistent contraction&amp;amp;ndash;plateau&amp;amp;ndash;recovery pattern across all participants, with representative contraction, stabilization, and recovery times of 1.89 s, 0.41 s, and 2.33 s, respectively. Although limited by the small sample size, these findings support the feasibility of implementing simplified segmentation strategies for pupillometry in resource-limited settings.</description>
	<pubDate>2026-03-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 160: Analysis of the Consensual Pupillary Reflex Using Blue LED Step Light and Automated Image Segmentation</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/160">doi: 10.3390/computers15030160</a></p>
	<p>Authors:
		Edyson R. Torres-Centeno
		Erwin J. Sacoto-Cabrera
		Roger Jesus Coaquira-Castillo
		L. Walter Utrilla Mego
		Miguel A. Castillo-Guevara
		Yesenia Concha-Ramos
		Edison Moreno-Cardenas
		</p>
	<p>This study evaluates the dynamics of the human pupillary reflex in response to a stepped blue light stimulus (465 nm) in young adults residing at high altitude (3400 m above sea level). High-resolution video sequences of three participants were analyzed using four classical image segmentation techniques: K-Means, Otsu, fixed binary threshold, and multi-channel RGB threshold. Rather than proposing new algorithms, this work evaluates the technical feasibility and stability of computationally lightweight segmentation approaches under controlled lighting conditions and with low-cost hardware constraints. Among the methods evaluated, fixed binary thresholding showed stable temporal behavior and minimal computational complexity within the experimental setup. The results show a consistent contraction&amp;amp;ndash;plateau&amp;amp;ndash;recovery pattern across all participants, with representative contraction, stabilization, and recovery times of 1.89 s, 0.41 s, and 2.33 s, respectively. Although limited by the small sample size, these findings support the feasibility of implementing simplified segmentation strategies for pupillometry in resource-limited settings.</p>
	]]></content:encoded>

	<dc:title>Analysis of the Consensual Pupillary Reflex Using Blue LED Step Light and Automated Image Segmentation</dc:title>
			<dc:creator>Edyson R. Torres-Centeno</dc:creator>
			<dc:creator>Erwin J. Sacoto-Cabrera</dc:creator>
			<dc:creator>Roger Jesus Coaquira-Castillo</dc:creator>
			<dc:creator>L. Walter Utrilla Mego</dc:creator>
			<dc:creator>Miguel A. Castillo-Guevara</dc:creator>
			<dc:creator>Yesenia Concha-Ramos</dc:creator>
			<dc:creator>Edison Moreno-Cardenas</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030160</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-03</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-03</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>160</prism:startingPage>
		<prism:doi>10.3390/computers15030160</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/160</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/159">

	<title>Computers, Vol. 15, Pages 159: A Resource-Efficient Approach to Fine-Tuning a BERT-Base Model for Sentiment Analysis</title>
	<link>https://www.mdpi.com/2073-431X/15/3/159</link>
	<description>Fine-tuning a BERT-Base model for specific tasks, such as sentiment analysis, has become resource-intensive and often requires high computational power and memory. This paper introduces SCALE, a novel resource-efficient fine-tuning method that targets the most critical transformer layers, which reduces computational costs without sacrificing performance. By dynamically profiling transformer layers via activation magnitudes and attention entropy, SCALE selects and adapts only the most influential layers with lightweight adapter modules. The proposed method outperforms traditional fine-tuning techniques, achieving a 2.3% improvement in accuracy on the IMDB dataset and reducing training time by 56.3% compared to full-model fine-tuning. Experiments across various sentiment analysis benchmarks demonstrate SCALE&amp;amp;rsquo;s effectiveness in optimizing fine-tuning for the BERT-base model in resource-constrained environments, achieving up to 99% of the performance of full-model fine-tuning while using only 40% of the parameters. The empirical validation in this study is restricted to binary and multi-class sentiment classification. The evaluation specifically reflects effectiveness in sentiment analysis text classification tasks.</description>
	<pubDate>2026-03-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 159: A Resource-Efficient Approach to Fine-Tuning a BERT-Base Model for Sentiment Analysis</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/159">doi: 10.3390/computers15030159</a></p>
	<p>Authors:
		Abdullah M. Basahel
		Shreyanth H. Giriyappa
		Furqan Alam
		Tahani Saleh Mohammed Alnazzawi
		Saqib Qamar
		Adnan Ahmed Abi Sen
		</p>
	<p>Fine-tuning a BERT-Base model for specific tasks, such as sentiment analysis, has become resource-intensive and often requires high computational power and memory. This paper introduces SCALE, a novel resource-efficient fine-tuning method that targets the most critical transformer layers, which reduces computational costs without sacrificing performance. By dynamically profiling transformer layers via activation magnitudes and attention entropy, SCALE selects and adapts only the most influential layers with lightweight adapter modules. The proposed method outperforms traditional fine-tuning techniques, achieving a 2.3% improvement in accuracy on the IMDB dataset and reducing training time by 56.3% compared to full-model fine-tuning. Experiments across various sentiment analysis benchmarks demonstrate SCALE&amp;amp;rsquo;s effectiveness in optimizing fine-tuning for the BERT-base model in resource-constrained environments, achieving up to 99% of the performance of full-model fine-tuning while using only 40% of the parameters. The empirical validation in this study is restricted to binary and multi-class sentiment classification. The evaluation specifically reflects effectiveness in sentiment analysis text classification tasks.</p>
	]]></content:encoded>

	<dc:title>A Resource-Efficient Approach to Fine-Tuning a BERT-Base Model for Sentiment Analysis</dc:title>
			<dc:creator>Abdullah M. Basahel</dc:creator>
			<dc:creator>Shreyanth H. Giriyappa</dc:creator>
			<dc:creator>Furqan Alam</dc:creator>
			<dc:creator>Tahani Saleh Mohammed Alnazzawi</dc:creator>
			<dc:creator>Saqib Qamar</dc:creator>
			<dc:creator>Adnan Ahmed Abi Sen</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030159</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-03</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-03</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>159</prism:startingPage>
		<prism:doi>10.3390/computers15030159</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/159</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/158">

	<title>Computers, Vol. 15, Pages 158: Staying Young at the Edge: A Software Aging Perspective for Foundation Models as a Service</title>
	<link>https://www.mdpi.com/2073-431X/15/3/158</link>
	<description>Nowadays, the emergence of Foundation Models as a Service enables mobile users to access powerful capabilities such as inference and fine-tuning on demand and without incurring local computational overhead. This paper introduces a software-aware offloading framework for FMaaS that allows edge nodes to forecast software aging and prevent service degradation. Each node employs a lightweight Echo State Network to predict its software age, with tasks dynamically assigned based on communication cost, inference delay, and forecast reliability. Simulation results including ablation studies confirm the effectiveness of software age forecasting in reducing task failures and improving session continuity.</description>
	<pubDate>2026-03-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 158: Staying Young at the Edge: A Software Aging Perspective for Foundation Models as a Service</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/158">doi: 10.3390/computers15030158</a></p>
	<p>Authors:
		Benedetta Picano
		Romano Fantacci
		</p>
	<p>Nowadays, the emergence of Foundation Models as a Service enables mobile users to access powerful capabilities such as inference and fine-tuning on demand and without incurring local computational overhead. This paper introduces a software-aware offloading framework for FMaaS that allows edge nodes to forecast software aging and prevent service degradation. Each node employs a lightweight Echo State Network to predict its software age, with tasks dynamically assigned based on communication cost, inference delay, and forecast reliability. Simulation results including ablation studies confirm the effectiveness of software age forecasting in reducing task failures and improving session continuity.</p>
	]]></content:encoded>

	<dc:title>Staying Young at the Edge: A Software Aging Perspective for Foundation Models as a Service</dc:title>
			<dc:creator>Benedetta Picano</dc:creator>
			<dc:creator>Romano Fantacci</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030158</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-03</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-03</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>158</prism:startingPage>
		<prism:doi>10.3390/computers15030158</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/158</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/157">

	<title>Computers, Vol. 15, Pages 157: Adaptive K-Fold Siamese Neural Network Classifier for Automatic Seatbelt Monitoring</title>
	<link>https://www.mdpi.com/2073-431X/15/3/157</link>
	<description>A seatbelt is an essential aspect of safety in road traffic accidents. Although most traffic regulations enforce drivers and passengers to wear and fasten the seatbelt manually, AI-based techniques have been introduced for monitoring to improve safety standards. In this study, a new approach is proposed to address the monitoring problem of seatbelts. Deep learning (DL) classification based on adaptive Siamese Neural Network (SNN) has been developed utilizing the K-fold method for feature verification. The proposed adaptive K-Fold-based SNN approach utilizes a binary seatbelt dataset, with positive and negative classes, to verify the status of the seatbelt. The network involves sharing a convolutional feature extractor, followed by a distinct-based similarity function. To enhance model reliability, 5-fold cross validation is applied (k = 5), splitting the dataset into 5 subsets, where the model is trained on four sets and validated on the fifth one. The model was trained using binary cross entropy loss, Adam optimization, and performance metrics such as accuracy, precision, recall, and F1 score. The seatbelt dataset is basically designed for object detection models. In this work, we used a dataset in the verification model and achieved high-performance metrics. The model is implemented using a Python-based Jupyter Notebook 7.5.1. It achieved a high performance in seatbelt verification with an average Accuracy = 0.9989, average Precision = 0.9988, average Recall = 0.9990, and average F1 Score = 0.9989. The proposed adaptive K-Fold SNN model can ensure reliability and reduce the risk of over fitting.</description>
	<pubDate>2026-03-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 157: Adaptive K-Fold Siamese Neural Network Classifier for Automatic Seatbelt Monitoring</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/157">doi: 10.3390/computers15030157</a></p>
	<p>Authors:
		Ahmed M. Hasan
		Farah F. Alkhalid
		Safanah M. Rafaat
		Amjad J. Humaidi
		</p>
	<p>A seatbelt is an essential aspect of safety in road traffic accidents. Although most traffic regulations enforce drivers and passengers to wear and fasten the seatbelt manually, AI-based techniques have been introduced for monitoring to improve safety standards. In this study, a new approach is proposed to address the monitoring problem of seatbelts. Deep learning (DL) classification based on adaptive Siamese Neural Network (SNN) has been developed utilizing the K-fold method for feature verification. The proposed adaptive K-Fold-based SNN approach utilizes a binary seatbelt dataset, with positive and negative classes, to verify the status of the seatbelt. The network involves sharing a convolutional feature extractor, followed by a distinct-based similarity function. To enhance model reliability, 5-fold cross validation is applied (k = 5), splitting the dataset into 5 subsets, where the model is trained on four sets and validated on the fifth one. The model was trained using binary cross entropy loss, Adam optimization, and performance metrics such as accuracy, precision, recall, and F1 score. The seatbelt dataset is basically designed for object detection models. In this work, we used a dataset in the verification model and achieved high-performance metrics. The model is implemented using a Python-based Jupyter Notebook 7.5.1. It achieved a high performance in seatbelt verification with an average Accuracy = 0.9989, average Precision = 0.9988, average Recall = 0.9990, and average F1 Score = 0.9989. The proposed adaptive K-Fold SNN model can ensure reliability and reduce the risk of over fitting.</p>
	]]></content:encoded>

	<dc:title>Adaptive K-Fold Siamese Neural Network Classifier for Automatic Seatbelt Monitoring</dc:title>
			<dc:creator>Ahmed M. Hasan</dc:creator>
			<dc:creator>Farah F. Alkhalid</dc:creator>
			<dc:creator>Safanah M. Rafaat</dc:creator>
			<dc:creator>Amjad J. Humaidi</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030157</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-03</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-03</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>157</prism:startingPage>
		<prism:doi>10.3390/computers15030157</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/157</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/156">

	<title>Computers, Vol. 15, Pages 156: A Leakage-Aware Multimodal Machine Learning Framework for Nutrition Supply&amp;ndash;Demand Forecasting Using Temporal and Spatial Data Fusion</title>
	<link>https://www.mdpi.com/2073-431X/15/3/156</link>
	<description>Accurate forecasting of nutrition supply&amp;amp;ndash;demand dynamics is essential for reducing resource wastage and improving equitable allocation. However, this task remains challenging due to heterogeneous data sources, cold-start regions, and the risk of information leakage in spatiotemporal modeling. This study presents a leakage-aware multimodal machine learning framework for nutrition supply&amp;amp;ndash;demand forecasting. The framework integrates temporal, spatial, and contextual information within a unified architecture. It combines self-supervised temporal representation learning, causal time-lag modeling, and few-shot adaptation to improve generalization under limited or previously unseen data conditions. Heterogeneous inputs include epidemiological, environmental, demographic, sentiment, and biologically derived indicators. These signals are encoded using a PatchTST-inspired temporal backbone coupled with a feature-token transformer employing cross-modal attention. Spatial dependencies are explicitly modeled using graph neural networks. Hierarchical decoding enables multi-horizon forecasting with calibrated uncertainty estimates. Model evaluation is conducted under strict spatiotemporal hold-out protocols with explicit leakage detection. All synthetic signals are excluded from testing. Across geographically and temporally disjoint datasets, the proposed framework consistently outperforms strong unimodal and multimodal baselines. It achieves macro-F1 scores above 99.5% and stable early-warning lead times of approximately 9 days under distribution shift. Ablation studies indicate that causal time-lag enforcement and few-shot adaptation contribute most strongly to performance robustness. Closed-loop simulation experiments suggest potential reductions in nutrient wastage of approximately 38%, response latency of 19%, and operational costs of 16% when deployed as a decision-support tool. External validation on fully unseen regions confirms the generalizability of the framework under realistic forecasting constraints.</description>
	<pubDate>2026-03-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 156: A Leakage-Aware Multimodal Machine Learning Framework for Nutrition Supply&amp;ndash;Demand Forecasting Using Temporal and Spatial Data Fusion</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/156">doi: 10.3390/computers15030156</a></p>
	<p>Authors:
		 Abdullah
		Muhammad Ateeb Ather
		Jose Luis Oropeza Rodriguez
		Carlos Guzmán Sánchez-Mejorada
		Miguel Jesús Torres Ruiz
		Rolando Quintero Tellez
		</p>
	<p>Accurate forecasting of nutrition supply&amp;amp;ndash;demand dynamics is essential for reducing resource wastage and improving equitable allocation. However, this task remains challenging due to heterogeneous data sources, cold-start regions, and the risk of information leakage in spatiotemporal modeling. This study presents a leakage-aware multimodal machine learning framework for nutrition supply&amp;amp;ndash;demand forecasting. The framework integrates temporal, spatial, and contextual information within a unified architecture. It combines self-supervised temporal representation learning, causal time-lag modeling, and few-shot adaptation to improve generalization under limited or previously unseen data conditions. Heterogeneous inputs include epidemiological, environmental, demographic, sentiment, and biologically derived indicators. These signals are encoded using a PatchTST-inspired temporal backbone coupled with a feature-token transformer employing cross-modal attention. Spatial dependencies are explicitly modeled using graph neural networks. Hierarchical decoding enables multi-horizon forecasting with calibrated uncertainty estimates. Model evaluation is conducted under strict spatiotemporal hold-out protocols with explicit leakage detection. All synthetic signals are excluded from testing. Across geographically and temporally disjoint datasets, the proposed framework consistently outperforms strong unimodal and multimodal baselines. It achieves macro-F1 scores above 99.5% and stable early-warning lead times of approximately 9 days under distribution shift. Ablation studies indicate that causal time-lag enforcement and few-shot adaptation contribute most strongly to performance robustness. Closed-loop simulation experiments suggest potential reductions in nutrient wastage of approximately 38%, response latency of 19%, and operational costs of 16% when deployed as a decision-support tool. External validation on fully unseen regions confirms the generalizability of the framework under realistic forecasting constraints.</p>
	]]></content:encoded>

	<dc:title>A Leakage-Aware Multimodal Machine Learning Framework for Nutrition Supply&amp;amp;ndash;Demand Forecasting Using Temporal and Spatial Data Fusion</dc:title>
			<dc:creator> Abdullah</dc:creator>
			<dc:creator>Muhammad Ateeb Ather</dc:creator>
			<dc:creator>Jose Luis Oropeza Rodriguez</dc:creator>
			<dc:creator>Carlos Guzmán Sánchez-Mejorada</dc:creator>
			<dc:creator>Miguel Jesús Torres Ruiz</dc:creator>
			<dc:creator>Rolando Quintero Tellez</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030156</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-02</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-02</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>156</prism:startingPage>
		<prism:doi>10.3390/computers15030156</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/156</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/155">

	<title>Computers, Vol. 15, Pages 155: Federated Learning: A Survey of Core Challenges, Current Methods, and Opportunities</title>
	<link>https://www.mdpi.com/2073-431X/15/3/155</link>
	<description>Federated learning (FL) has emerged as a transformative distributed learning paradigm that enables collaborative model training without sharing raw data, thereby preserving privacy across large, diverse, and geographically dispersed clients. Despite its rapid adoption in mobile networks, Internet of Things (IoT) systems, healthcare, finance, and edge intelligence, FL continues to face several persistent and interdependent challenges that hinder its scalability, efficiency, and real-world deployment. In this survey, we present a systematic examination of six core challenges in federated learning: heterogeneity, computation overhead, communication bottlenecks, client selection, aggregation and optimization, and privacy preservation. We analyze how these challenges manifest across the full FL pipeline, from local training and client participation to global model aggregation and distribution, and examine their impact on model performance, convergence behavior, fairness, and system reliability. Furthermore, we synthesize representative state-of-the-art approaches proposed to address each challenge and discuss their underlying assumptions, trade-offs, and limitations in practical deployments. Finally, we identify open research problems and outline promising directions for developing more robust, scalable, and efficient federated learning systems. This survey aims to serve as a comprehensive reference for researchers and practitioners seeking a unified understanding of the fundamental challenges shaping modern federated learning.</description>
	<pubDate>2026-03-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 155: Federated Learning: A Survey of Core Challenges, Current Methods, and Opportunities</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/155">doi: 10.3390/computers15030155</a></p>
	<p>Authors:
		Madan Baduwal
		Priyanka Paudel
		Vini Chaudhary
		</p>
	<p>Federated learning (FL) has emerged as a transformative distributed learning paradigm that enables collaborative model training without sharing raw data, thereby preserving privacy across large, diverse, and geographically dispersed clients. Despite its rapid adoption in mobile networks, Internet of Things (IoT) systems, healthcare, finance, and edge intelligence, FL continues to face several persistent and interdependent challenges that hinder its scalability, efficiency, and real-world deployment. In this survey, we present a systematic examination of six core challenges in federated learning: heterogeneity, computation overhead, communication bottlenecks, client selection, aggregation and optimization, and privacy preservation. We analyze how these challenges manifest across the full FL pipeline, from local training and client participation to global model aggregation and distribution, and examine their impact on model performance, convergence behavior, fairness, and system reliability. Furthermore, we synthesize representative state-of-the-art approaches proposed to address each challenge and discuss their underlying assumptions, trade-offs, and limitations in practical deployments. Finally, we identify open research problems and outline promising directions for developing more robust, scalable, and efficient federated learning systems. This survey aims to serve as a comprehensive reference for researchers and practitioners seeking a unified understanding of the fundamental challenges shaping modern federated learning.</p>
	]]></content:encoded>

	<dc:title>Federated Learning: A Survey of Core Challenges, Current Methods, and Opportunities</dc:title>
			<dc:creator>Madan Baduwal</dc:creator>
			<dc:creator>Priyanka Paudel</dc:creator>
			<dc:creator>Vini Chaudhary</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030155</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-02</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-02</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>155</prism:startingPage>
		<prism:doi>10.3390/computers15030155</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/155</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/154">

	<title>Computers, Vol. 15, Pages 154: Human&amp;ndash;AI Collaboration in Programming Education: Student Perspectives on LLM-Based Coding Assistants</title>
	<link>https://www.mdpi.com/2073-431X/15/3/154</link>
	<description>The integration of large language models (LLMs) such as GitHub Copilot, ChatGPT, and DeepSeek into programming education has introduced a new form of human&amp;amp;ndash;AI collaboration. These tools provide real-time code suggestions, debugging assistance, and design support, yet their effects on learning, trust, productivity, and coding practices remain underexplored. We surveyed 248 students to examine relationships among these constructs, usage patterns by programming experience and academic level, the most frequently used assistants and programming languages, group differences in perceived learning and coding practices, and the extent to which learning, trust, and coding practices predict productivity. Students reported high adoption of ChatGPT and Python, generally positive perceptions of learning and productivity, and significant positive correlations among all constructs. Kruskal&amp;amp;ndash;Wallis tests indicated no significant differences in perceived learning across Basic, Intermediate, and Expert programmers, nor in coding practices across academic years (Years 1&amp;amp;ndash;4). Multiple regression showed that learning, trust, and coding practices jointly explained a substantial proportion of productivity variance (R2 = 0.628). These findings emphasize both opportunities and risks of AI integration and offer guidance for educators aiming to integrate AI tools while maintaining pedagogical rigor.</description>
	<pubDate>2026-03-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 154: Human&amp;ndash;AI Collaboration in Programming Education: Student Perspectives on LLM-Based Coding Assistants</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/154">doi: 10.3390/computers15030154</a></p>
	<p>Authors:
		Hebah Alquran
		Shadi Banitaan
		</p>
	<p>The integration of large language models (LLMs) such as GitHub Copilot, ChatGPT, and DeepSeek into programming education has introduced a new form of human&amp;amp;ndash;AI collaboration. These tools provide real-time code suggestions, debugging assistance, and design support, yet their effects on learning, trust, productivity, and coding practices remain underexplored. We surveyed 248 students to examine relationships among these constructs, usage patterns by programming experience and academic level, the most frequently used assistants and programming languages, group differences in perceived learning and coding practices, and the extent to which learning, trust, and coding practices predict productivity. Students reported high adoption of ChatGPT and Python, generally positive perceptions of learning and productivity, and significant positive correlations among all constructs. Kruskal&amp;amp;ndash;Wallis tests indicated no significant differences in perceived learning across Basic, Intermediate, and Expert programmers, nor in coding practices across academic years (Years 1&amp;amp;ndash;4). Multiple regression showed that learning, trust, and coding practices jointly explained a substantial proportion of productivity variance (R2 = 0.628). These findings emphasize both opportunities and risks of AI integration and offer guidance for educators aiming to integrate AI tools while maintaining pedagogical rigor.</p>
	]]></content:encoded>

	<dc:title>Human&amp;amp;ndash;AI Collaboration in Programming Education: Student Perspectives on LLM-Based Coding Assistants</dc:title>
			<dc:creator>Hebah Alquran</dc:creator>
			<dc:creator>Shadi Banitaan</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030154</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-02</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-02</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>154</prism:startingPage>
		<prism:doi>10.3390/computers15030154</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/154</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/153">

	<title>Computers, Vol. 15, Pages 153: A DSS Methodology for Emergency Management: Preliminary Application to the Municipality of Amatrice (Italy)</title>
	<link>https://www.mdpi.com/2073-431X/15/3/153</link>
	<description>The increasing exposure of dispersed rural settlements to natural and infrastructural risks highlights the need for structured and reproducible territorial information layers capable of supporting future decision-making processes. To this end, a rigorous characterization of settlement nodes and their structural attributes is essential. This article represents a first exploratory application of the proposed methodology and constitutes an initial phase of its implementation. The objective is not to provide a definitive or exhaustive model, but rather to test the underlying theoretical framework through a preliminary experimentation aimed at verifying its internal coherence, replicability, and operational potential. In this initial stage, the methodology is applied to demonstrate concretely what types of information can be systematically collected and how an urban center can be characterized in terms of accessibility and its role within the broader territorial system. The methodology is applied to the municipality of Amatrice as a case study representative of highly fragmented inner-area settlements. This first implementation highlights the potential of the approach, allows for the identification of possible methodological criticalities, and lays the groundwork for more advanced and structured future developments. The contribution therefore constitutes a foundational analytical layer aimed at organizing territorial information in a structured form and providing a coherent basis for future analyses and territorial and emergency management strategies.</description>
	<pubDate>2026-03-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 153: A DSS Methodology for Emergency Management: Preliminary Application to the Municipality of Amatrice (Italy)</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/153">doi: 10.3390/computers15030153</a></p>
	<p>Authors:
		Cristina Montaldi
		Annamaria Felli
		Vanessa Tomei
		Francesco Zullo
		</p>
	<p>The increasing exposure of dispersed rural settlements to natural and infrastructural risks highlights the need for structured and reproducible territorial information layers capable of supporting future decision-making processes. To this end, a rigorous characterization of settlement nodes and their structural attributes is essential. This article represents a first exploratory application of the proposed methodology and constitutes an initial phase of its implementation. The objective is not to provide a definitive or exhaustive model, but rather to test the underlying theoretical framework through a preliminary experimentation aimed at verifying its internal coherence, replicability, and operational potential. In this initial stage, the methodology is applied to demonstrate concretely what types of information can be systematically collected and how an urban center can be characterized in terms of accessibility and its role within the broader territorial system. The methodology is applied to the municipality of Amatrice as a case study representative of highly fragmented inner-area settlements. This first implementation highlights the potential of the approach, allows for the identification of possible methodological criticalities, and lays the groundwork for more advanced and structured future developments. The contribution therefore constitutes a foundational analytical layer aimed at organizing territorial information in a structured form and providing a coherent basis for future analyses and territorial and emergency management strategies.</p>
	]]></content:encoded>

	<dc:title>A DSS Methodology for Emergency Management: Preliminary Application to the Municipality of Amatrice (Italy)</dc:title>
			<dc:creator>Cristina Montaldi</dc:creator>
			<dc:creator>Annamaria Felli</dc:creator>
			<dc:creator>Vanessa Tomei</dc:creator>
			<dc:creator>Francesco Zullo</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030153</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-02</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-02</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>153</prism:startingPage>
		<prism:doi>10.3390/computers15030153</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/153</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/152">

	<title>Computers, Vol. 15, Pages 152: Modeling the Nutrition&amp;ndash;Academic Intention Gap: A Data-Driven Adaptive Gamified Architecture</title>
	<link>https://www.mdpi.com/2073-431X/15/3/152</link>
	<description>The integration of Internet of Things (IoT) and mobile computing in education offers new avenues to address complex health behaviors that affect cognitive performance. While traditional health education relies on passive information delivery, emerging research suggests that interactive systems can bridge the gap between intent and action. This study addresses the &amp;amp;ldquo;double burden of malnutrition&amp;amp;rdquo; in Ecuadorian schoolchildren (N = 120) as a Human-Computer Interaction (HCI) challenge. By utilizing a quantitative profiling approach rooted in the Social Dimensions of Health framework, we modeled the user requirements for a proposed intervention system. The findings identified a critical &amp;amp;ldquo;Action Gap&amp;amp;rdquo;: while 78.3% of users possess the motivation to improve habits for academic gain, 53.3% remain entrenched in high-sugar consumption patterns due to environmental latency. Statistical profiling reveals a significant dissonance (p&amp;amp;lt;0.05) between cognitive intent and behavioral execution. Consequently, this paper presents the &amp;amp;ldquo;Digital Bridge Architecture,&amp;amp;rdquo; a computational framework that leverages these motivation metrics to design an Alternate Reality Game (ARG) logic. We conclude that conventional static applications may be limited in their capacity to support sustained behavioral change in this context. The proposed framework suggests that context-aware, gamified feedback mechanisms can offer a promising direction for aligning academic motivation with healthier behavioral outcomes.</description>
	<pubDate>2026-03-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 152: Modeling the Nutrition&amp;ndash;Academic Intention Gap: A Data-Driven Adaptive Gamified Architecture</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/152">doi: 10.3390/computers15030152</a></p>
	<p>Authors:
		Nadia Pesantez-Jara
		Nicolás Márquez
		Cristian Vidal-Silva
		</p>
	<p>The integration of Internet of Things (IoT) and mobile computing in education offers new avenues to address complex health behaviors that affect cognitive performance. While traditional health education relies on passive information delivery, emerging research suggests that interactive systems can bridge the gap between intent and action. This study addresses the &amp;amp;ldquo;double burden of malnutrition&amp;amp;rdquo; in Ecuadorian schoolchildren (N = 120) as a Human-Computer Interaction (HCI) challenge. By utilizing a quantitative profiling approach rooted in the Social Dimensions of Health framework, we modeled the user requirements for a proposed intervention system. The findings identified a critical &amp;amp;ldquo;Action Gap&amp;amp;rdquo;: while 78.3% of users possess the motivation to improve habits for academic gain, 53.3% remain entrenched in high-sugar consumption patterns due to environmental latency. Statistical profiling reveals a significant dissonance (p&amp;amp;lt;0.05) between cognitive intent and behavioral execution. Consequently, this paper presents the &amp;amp;ldquo;Digital Bridge Architecture,&amp;amp;rdquo; a computational framework that leverages these motivation metrics to design an Alternate Reality Game (ARG) logic. We conclude that conventional static applications may be limited in their capacity to support sustained behavioral change in this context. The proposed framework suggests that context-aware, gamified feedback mechanisms can offer a promising direction for aligning academic motivation with healthier behavioral outcomes.</p>
	]]></content:encoded>

	<dc:title>Modeling the Nutrition&amp;amp;ndash;Academic Intention Gap: A Data-Driven Adaptive Gamified Architecture</dc:title>
			<dc:creator>Nadia Pesantez-Jara</dc:creator>
			<dc:creator>Nicolás Márquez</dc:creator>
			<dc:creator>Cristian Vidal-Silva</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030152</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>152</prism:startingPage>
		<prism:doi>10.3390/computers15030152</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/152</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/151">

	<title>Computers, Vol. 15, Pages 151: A Hybrid Ensemble Framework for Rare Event Detection in Large-Scale Tabular Data</title>
	<link>https://www.mdpi.com/2073-431X/15/3/151</link>
	<description>Rare event detection in large tabular data remains a computationally challenging problem due to class imbalance, heterogeneous feature distributions, and unstable thresholds. Traditional machine learning approaches based on individual models and fixed thresholds often exhibit limited robustness and reproducibility in such settings. This paper proposes a hybrid ensemble framework for rare event detection that integrates heterogeneous machine learning models through threshold-aware probabilistic aggregation. The framework combines gradient-boosted decision trees, regularized linear models, and neural networks, leveraging their complementary inductive biases. To ensure reproducibility and robust performance evaluation under severe class imbalance, a leaky-controlled evaluation protocol is employed, including rootwise summation, probability calibration, and validation-based threshold optimization. The proposed approach is evaluated on a large tabular dataset containing approximately 50,000 observations. Experimental results demonstrate improved rare event detection and robust generalization performance compared to individual baseline models. Explainability is achieved through Shapley Additive Explanations (SHAP)-based attribution analysis and clustering in the explanation space, enabling transparent analysis of ensemble decision-making behavior. The proposed framework represents a general-purpose computational solution for rare event detection and can be applied to a wide range of data-driven decision-making and anomaly detection problems.</description>
	<pubDate>2026-03-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 151: A Hybrid Ensemble Framework for Rare Event Detection in Large-Scale Tabular Data</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/151">doi: 10.3390/computers15030151</a></p>
	<p>Authors:
		Natalya Maxutova
		Akmaral Kassymova
		Kuanysh Kadirkulov
		Aisulu Ismailova
		Gulkiz Zhidekulova
		Zhanar Azhibekova
		Jamalbek Tussupov
		Quvvatali Rakhimov
		Zhanat Kenzhebayeva
		</p>
	<p>Rare event detection in large tabular data remains a computationally challenging problem due to class imbalance, heterogeneous feature distributions, and unstable thresholds. Traditional machine learning approaches based on individual models and fixed thresholds often exhibit limited robustness and reproducibility in such settings. This paper proposes a hybrid ensemble framework for rare event detection that integrates heterogeneous machine learning models through threshold-aware probabilistic aggregation. The framework combines gradient-boosted decision trees, regularized linear models, and neural networks, leveraging their complementary inductive biases. To ensure reproducibility and robust performance evaluation under severe class imbalance, a leaky-controlled evaluation protocol is employed, including rootwise summation, probability calibration, and validation-based threshold optimization. The proposed approach is evaluated on a large tabular dataset containing approximately 50,000 observations. Experimental results demonstrate improved rare event detection and robust generalization performance compared to individual baseline models. Explainability is achieved through Shapley Additive Explanations (SHAP)-based attribution analysis and clustering in the explanation space, enabling transparent analysis of ensemble decision-making behavior. The proposed framework represents a general-purpose computational solution for rare event detection and can be applied to a wide range of data-driven decision-making and anomaly detection problems.</p>
	]]></content:encoded>

	<dc:title>A Hybrid Ensemble Framework for Rare Event Detection in Large-Scale Tabular Data</dc:title>
			<dc:creator>Natalya Maxutova</dc:creator>
			<dc:creator>Akmaral Kassymova</dc:creator>
			<dc:creator>Kuanysh Kadirkulov</dc:creator>
			<dc:creator>Aisulu Ismailova</dc:creator>
			<dc:creator>Gulkiz Zhidekulova</dc:creator>
			<dc:creator>Zhanar Azhibekova</dc:creator>
			<dc:creator>Jamalbek Tussupov</dc:creator>
			<dc:creator>Quvvatali Rakhimov</dc:creator>
			<dc:creator>Zhanat Kenzhebayeva</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030151</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>151</prism:startingPage>
		<prism:doi>10.3390/computers15030151</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/151</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/150">

	<title>Computers, Vol. 15, Pages 150: Algorithmic Challenges and Regulatory Frameworks of Artificial Intelligence in Mexico: A Prospective Analysis from the Perspective of Digital Governance Theory</title>
	<link>https://www.mdpi.com/2073-431X/15/3/150</link>
	<description>The rapid integration of artificial intelligence (AI) has heightened the need for evidence-based regulatory frameworks to effectively address its legal, ethical, and societal consequences. This research carefully analyzes the prevailing landscape of AI-related legislation in Mexico. The study conducts a comprehensive review of legislative initiatives related to AI regulation submitted to Mexican legislative bodies, encompassing those approved or pending in commissions. This process leads to the identification and categorization of outstanding initiatives across seven policy areas: Congress, Education, Health, Intellectual Property, Justice, AI Promotion, and AI Regulation. As a principal contribution, this work offers the first exhaustive mapping and thematic classification of legislative activity related to AI in Mexico. Furthermore, the analysis identifies systemic regulatory deficiencies, such as the lack of AI-specific legislation, the limited scope of existing data protection laws in relation to AI systems, and an absence of technical provisions concerning ethical design, algorithmic transparency, cybersecurity, and accountability frameworks. By showcasing these deficiencies, the study contributes a diagnostic framework for evaluating AI governance readiness in emerging economies. The findings emphasize the importance of establishing a comprehensive, technically sound, and internationally harmonized regulatory framework to reduce AI-related risks while promoting responsible innovation in Mexico.</description>
	<pubDate>2026-03-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 150: Algorithmic Challenges and Regulatory Frameworks of Artificial Intelligence in Mexico: A Prospective Analysis from the Perspective of Digital Governance Theory</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/150">doi: 10.3390/computers15030150</a></p>
	<p>Authors:
		Eduardo Arguijo
		Yenny Villuendas-Rey
		Arturo Cruz-Jiménez
		Jonatan Mireles-Hernández
		Oscar Camacho-Nieto
		Mario Aldape-Pérez
		</p>
	<p>The rapid integration of artificial intelligence (AI) has heightened the need for evidence-based regulatory frameworks to effectively address its legal, ethical, and societal consequences. This research carefully analyzes the prevailing landscape of AI-related legislation in Mexico. The study conducts a comprehensive review of legislative initiatives related to AI regulation submitted to Mexican legislative bodies, encompassing those approved or pending in commissions. This process leads to the identification and categorization of outstanding initiatives across seven policy areas: Congress, Education, Health, Intellectual Property, Justice, AI Promotion, and AI Regulation. As a principal contribution, this work offers the first exhaustive mapping and thematic classification of legislative activity related to AI in Mexico. Furthermore, the analysis identifies systemic regulatory deficiencies, such as the lack of AI-specific legislation, the limited scope of existing data protection laws in relation to AI systems, and an absence of technical provisions concerning ethical design, algorithmic transparency, cybersecurity, and accountability frameworks. By showcasing these deficiencies, the study contributes a diagnostic framework for evaluating AI governance readiness in emerging economies. The findings emphasize the importance of establishing a comprehensive, technically sound, and internationally harmonized regulatory framework to reduce AI-related risks while promoting responsible innovation in Mexico.</p>
	]]></content:encoded>

	<dc:title>Algorithmic Challenges and Regulatory Frameworks of Artificial Intelligence in Mexico: A Prospective Analysis from the Perspective of Digital Governance Theory</dc:title>
			<dc:creator>Eduardo Arguijo</dc:creator>
			<dc:creator>Yenny Villuendas-Rey</dc:creator>
			<dc:creator>Arturo Cruz-Jiménez</dc:creator>
			<dc:creator>Jonatan Mireles-Hernández</dc:creator>
			<dc:creator>Oscar Camacho-Nieto</dc:creator>
			<dc:creator>Mario Aldape-Pérez</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030150</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>150</prism:startingPage>
		<prism:doi>10.3390/computers15030150</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/150</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/149">

	<title>Computers, Vol. 15, Pages 149: Democratizing Urban Well-Being: A Virtual Reality and Eye-Tracking Analysis of Biophilic Interventions Across Socioeconomic Contexts</title>
	<link>https://www.mdpi.com/2073-431X/15/3/149</link>
	<description>In this pilot study, we investigate the psychological and attentional impact of biophilic urban interventions using an immersive virtual reality (VR) framework integrated with real-time eye-tracking. Specifically, it examines whether bio-esthetic enhancements can mitigate perceptual inequalities across neighborhoods of varying socioeconomic status (SES). Sixteen participants viewed original and digitally enhanced fixed-viewpoint 360&amp;amp;deg; videos of Low-, Medium-, and High-SES environments while a comprehensive suite of oculomotor dynamics and psychometric responses were recorded. Results confirmed a significant Condition &amp;amp;times; SES interaction across both subjective preference (Liking) and esthetic evaluation (&amp;amp;eta;2p = 0.41), suggesting a role for biophilic design as a &amp;amp;ldquo;socio-perceptual equalizer&amp;amp;rdquo;: while baseline ratings consistently favored High-SES areas, interventions in Low-SES contexts yielded the highest marginal gains, effectively bridging the gap with privileged environments. Eye-tracking metrics revealed that this convergence was associated with active visual engagement, with Enhanced Low-SES scenes eliciting the highest fixation counts and visual coverage. However, a critical dissociation emerged between immediate affective improvement and self-reported stress reduction. Elevated saccadic velocities in Enhanced Low-SES scenes are consistent with a state of &amp;amp;ldquo;hard fascination&amp;amp;rdquo; or novelty-induced arousal. This pattern implies that while biophilia elements boost positive affect, physiological restoration may be a dose-dependent process, requiring sufficient exposure duration to transition from curiosity-driven scanning to the &amp;amp;ldquo;soft fascination&amp;amp;rdquo; linked to stress recovery. These findings provide preliminary evidence for integrated XR analytics as a tool for evidence-based urban design and are discussed in the context of the equigenesis hypothesis.</description>
	<pubDate>2026-03-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 149: Democratizing Urban Well-Being: A Virtual Reality and Eye-Tracking Analysis of Biophilic Interventions Across Socioeconomic Contexts</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/149">doi: 10.3390/computers15030149</a></p>
	<p>Authors:
		Cleiton Ferreira
		Marina Guil-Jiménez
		Paula Latorre
		Aurora Molina-Muñoz
		Sergio Castaño-Castaño
		Francisco Nieto-Escamez
		</p>
	<p>In this pilot study, we investigate the psychological and attentional impact of biophilic urban interventions using an immersive virtual reality (VR) framework integrated with real-time eye-tracking. Specifically, it examines whether bio-esthetic enhancements can mitigate perceptual inequalities across neighborhoods of varying socioeconomic status (SES). Sixteen participants viewed original and digitally enhanced fixed-viewpoint 360&amp;amp;deg; videos of Low-, Medium-, and High-SES environments while a comprehensive suite of oculomotor dynamics and psychometric responses were recorded. Results confirmed a significant Condition &amp;amp;times; SES interaction across both subjective preference (Liking) and esthetic evaluation (&amp;amp;eta;2p = 0.41), suggesting a role for biophilic design as a &amp;amp;ldquo;socio-perceptual equalizer&amp;amp;rdquo;: while baseline ratings consistently favored High-SES areas, interventions in Low-SES contexts yielded the highest marginal gains, effectively bridging the gap with privileged environments. Eye-tracking metrics revealed that this convergence was associated with active visual engagement, with Enhanced Low-SES scenes eliciting the highest fixation counts and visual coverage. However, a critical dissociation emerged between immediate affective improvement and self-reported stress reduction. Elevated saccadic velocities in Enhanced Low-SES scenes are consistent with a state of &amp;amp;ldquo;hard fascination&amp;amp;rdquo; or novelty-induced arousal. This pattern implies that while biophilia elements boost positive affect, physiological restoration may be a dose-dependent process, requiring sufficient exposure duration to transition from curiosity-driven scanning to the &amp;amp;ldquo;soft fascination&amp;amp;rdquo; linked to stress recovery. These findings provide preliminary evidence for integrated XR analytics as a tool for evidence-based urban design and are discussed in the context of the equigenesis hypothesis.</p>
	]]></content:encoded>

	<dc:title>Democratizing Urban Well-Being: A Virtual Reality and Eye-Tracking Analysis of Biophilic Interventions Across Socioeconomic Contexts</dc:title>
			<dc:creator>Cleiton Ferreira</dc:creator>
			<dc:creator>Marina Guil-Jiménez</dc:creator>
			<dc:creator>Paula Latorre</dc:creator>
			<dc:creator>Aurora Molina-Muñoz</dc:creator>
			<dc:creator>Sergio Castaño-Castaño</dc:creator>
			<dc:creator>Francisco Nieto-Escamez</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030149</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>149</prism:startingPage>
		<prism:doi>10.3390/computers15030149</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/149</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/148">

	<title>Computers, Vol. 15, Pages 148: A Multi-Objective Statistical Framework for Evaluating LLM-Based Code Modernization: Transformation Pattern Analysis and Effect Size Validation</title>
	<link>https://www.mdpi.com/2073-431X/15/3/148</link>
	<description>Automated legacy code modernization using Large Language Models lacks rigorous evaluation frameworks and multi-objective quality assessment methodologies. Existing research suffers from three critical deficiencies: single-metric evaluation paradigms creating pathological optimization incentives, statistical validation limited to p-values without effect size analysis, and absence of systematic transformation pattern taxonomies explaining what works and why. We present a novel multi-objective statistical framework that jointly assesses Cyclomatic Complexity (CC) and Maintainability Index (MI) while providing comprehensive effect size analysis addressing software engineering research gaps. Applied to 47 legacy Java samples from Apache Ant (version 1.10.x, commit rel/1.10.14), our framework achieves 97.9% metric-level improvement with very large practical effects (Cohen&amp;amp;rsquo;s d=1.86, 95% CI [1.36, 2.35], p&amp;amp;lt;0.0001) for maintainability&amp;amp;mdash;substantially exceeding prior work and conventional significance thresholds. We note that this success rate reflects quality metric improvement; functional equivalence was verified through syntactic validation and manual inspection of a 20% random sample, while comprehensive automated test-based verification remains a limitation addressed in future work. We contribute: (1) first multi-objective quality assessment framework for code modernization with weighted composite scoring and sensitivity analysis, (2) rigorous statistical methodology with effect size analysis beyond p-values, (3) systematic transformation pattern taxonomy identifying four successful patterns and three failure modes with predictive value (inter-rater agreement &amp;amp;kappa;=0.82), and (4) negative result showing iterative refinement provides no benefit (d=0.08, p=0.179), saving community resources. Our transformation taxonomy enables practitioners to predict success likelihood from code characteristics, while our statistical framework provides replicable methodology for evaluating LLM-based software engineering tools. The very large effect size indicates metric-level improvements are materially meaningful for real-world software maintenance, not merely statistically detectable.</description>
	<pubDate>2026-03-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 148: A Multi-Objective Statistical Framework for Evaluating LLM-Based Code Modernization: Transformation Pattern Analysis and Effect Size Validation</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/148">doi: 10.3390/computers15030148</a></p>
	<p>Authors:
		Bashair Althani
		</p>
	<p>Automated legacy code modernization using Large Language Models lacks rigorous evaluation frameworks and multi-objective quality assessment methodologies. Existing research suffers from three critical deficiencies: single-metric evaluation paradigms creating pathological optimization incentives, statistical validation limited to p-values without effect size analysis, and absence of systematic transformation pattern taxonomies explaining what works and why. We present a novel multi-objective statistical framework that jointly assesses Cyclomatic Complexity (CC) and Maintainability Index (MI) while providing comprehensive effect size analysis addressing software engineering research gaps. Applied to 47 legacy Java samples from Apache Ant (version 1.10.x, commit rel/1.10.14), our framework achieves 97.9% metric-level improvement with very large practical effects (Cohen&amp;amp;rsquo;s d=1.86, 95% CI [1.36, 2.35], p&amp;amp;lt;0.0001) for maintainability&amp;amp;mdash;substantially exceeding prior work and conventional significance thresholds. We note that this success rate reflects quality metric improvement; functional equivalence was verified through syntactic validation and manual inspection of a 20% random sample, while comprehensive automated test-based verification remains a limitation addressed in future work. We contribute: (1) first multi-objective quality assessment framework for code modernization with weighted composite scoring and sensitivity analysis, (2) rigorous statistical methodology with effect size analysis beyond p-values, (3) systematic transformation pattern taxonomy identifying four successful patterns and three failure modes with predictive value (inter-rater agreement &amp;amp;kappa;=0.82), and (4) negative result showing iterative refinement provides no benefit (d=0.08, p=0.179), saving community resources. Our transformation taxonomy enables practitioners to predict success likelihood from code characteristics, while our statistical framework provides replicable methodology for evaluating LLM-based software engineering tools. The very large effect size indicates metric-level improvements are materially meaningful for real-world software maintenance, not merely statistically detectable.</p>
	]]></content:encoded>

	<dc:title>A Multi-Objective Statistical Framework for Evaluating LLM-Based Code Modernization: Transformation Pattern Analysis and Effect Size Validation</dc:title>
			<dc:creator>Bashair Althani</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030148</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>148</prism:startingPage>
		<prism:doi>10.3390/computers15030148</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/148</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/147">

	<title>Computers, Vol. 15, Pages 147: Hierarchical Fuzzy System Integrated with Deep Learning for Robust and Interpretable Classification of Breast Malignancies Using Radiomics Features from Ultrasound Imaging</title>
	<link>https://www.mdpi.com/2073-431X/15/3/147</link>
	<description>Breast cancer poses a global health risk and requires precision and accessibility in diagnostic measures. Ultrasound imaging is vital for breast lesion identification due to its safety, cost-effectiveness, and real-time capabilities. This paper presents a new fuzzy system architecture that utilizes ultrasound-based radiomics features to classify breast cancers. In order to ensure uniformity and consistency in shape-based characteristics limited to tumors, we calculate parameters such as elongation, compactness, spherical disproportion, and volumetrics following IBSI recommendations. We employ a hierarchical fuzzy system tree to handle high-dimensional data space and to identify the most discriminative characteristics. The selected features are incorporated into a modular fuzzy logic design that promotes transparency and maintains an auditable decision history according to clinical interpretability. Our framework enables the more accurate classification of breast cancer while addressing the beliefs and values prevalent in clinical applications. Tested on an independent set of data, the model achieved high accuracy of 99.60%, with low overfitting and strong generalization. To enhance its generalizability, we validated it on an internal dataset, attaining a sensitivity of 93.65%, a specificity of 99.24%, an AUC of 0.996, and an 18% reduction in unnecessary biopsies, as demonstrated through decision curve analysis, demonstrating substantial clinical utility across various settings. The findings confirm the system&amp;amp;rsquo;s ability to identify intricate radiomic patterns linked to cancer. Due to its computing efficiency, it may be executed in real time during routine screening. The proposed radiomics-based fuzzy classification framework may offer a clinically beneficial approach for differentiating benign from malignant breast lesions. Explainability is enhanced with user-friendly artifacts for clinicians, including ranking IF-THEN rules and counterfactuals, all of which were validated in usability trials that demonstrated increased trust among radiologists compared to other technologies. Enhanced differentiation in the classification of various lesion types will decrease unnecessary biopsies. This approach integrates radiomics features with transparent and interpretable fuzzy logic to deliver enhanced predictors and a comprehensible framework for users, including physicians, to facilitate decision-making. This approach advances precision medicine standards through the early detection of lesions using more specific and systematic diagnostic instruments.</description>
	<pubDate>2026-03-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 147: Hierarchical Fuzzy System Integrated with Deep Learning for Robust and Interpretable Classification of Breast Malignancies Using Radiomics Features from Ultrasound Imaging</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/147">doi: 10.3390/computers15030147</a></p>
	<p>Authors:
		Mohamed Loey
		Heba M. Khalil
		</p>
	<p>Breast cancer poses a global health risk and requires precision and accessibility in diagnostic measures. Ultrasound imaging is vital for breast lesion identification due to its safety, cost-effectiveness, and real-time capabilities. This paper presents a new fuzzy system architecture that utilizes ultrasound-based radiomics features to classify breast cancers. In order to ensure uniformity and consistency in shape-based characteristics limited to tumors, we calculate parameters such as elongation, compactness, spherical disproportion, and volumetrics following IBSI recommendations. We employ a hierarchical fuzzy system tree to handle high-dimensional data space and to identify the most discriminative characteristics. The selected features are incorporated into a modular fuzzy logic design that promotes transparency and maintains an auditable decision history according to clinical interpretability. Our framework enables the more accurate classification of breast cancer while addressing the beliefs and values prevalent in clinical applications. Tested on an independent set of data, the model achieved high accuracy of 99.60%, with low overfitting and strong generalization. To enhance its generalizability, we validated it on an internal dataset, attaining a sensitivity of 93.65%, a specificity of 99.24%, an AUC of 0.996, and an 18% reduction in unnecessary biopsies, as demonstrated through decision curve analysis, demonstrating substantial clinical utility across various settings. The findings confirm the system&amp;amp;rsquo;s ability to identify intricate radiomic patterns linked to cancer. Due to its computing efficiency, it may be executed in real time during routine screening. The proposed radiomics-based fuzzy classification framework may offer a clinically beneficial approach for differentiating benign from malignant breast lesions. Explainability is enhanced with user-friendly artifacts for clinicians, including ranking IF-THEN rules and counterfactuals, all of which were validated in usability trials that demonstrated increased trust among radiologists compared to other technologies. Enhanced differentiation in the classification of various lesion types will decrease unnecessary biopsies. This approach integrates radiomics features with transparent and interpretable fuzzy logic to deliver enhanced predictors and a comprehensible framework for users, including physicians, to facilitate decision-making. This approach advances precision medicine standards through the early detection of lesions using more specific and systematic diagnostic instruments.</p>
	]]></content:encoded>

	<dc:title>Hierarchical Fuzzy System Integrated with Deep Learning for Robust and Interpretable Classification of Breast Malignancies Using Radiomics Features from Ultrasound Imaging</dc:title>
			<dc:creator>Mohamed Loey</dc:creator>
			<dc:creator>Heba M. Khalil</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030147</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>147</prism:startingPage>
		<prism:doi>10.3390/computers15030147</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/147</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/146">

	<title>Computers, Vol. 15, Pages 146: On Signifiable Computability: Part III: A Note on Unnameable Functions on Natural Numbers</title>
	<link>https://www.mdpi.com/2073-431X/15/3/146</link>
	<description>A writing system W on an alphabet A is a tuple (R,M), where R is a finite set of text formation rules and M is a finite rule application mechanism that generates texts on A. A natural writing system forms natural language texts on an alphabet such as Sanskrit on Devanagari. An artificial writing system generates formal language texts on an alphabet such as Lisp on Unicode. Let N={0,1,2,&amp;amp;hellip;} be the set of natural numbers. A function on natural numbers f:Nk&amp;amp;#8614;N, 0&amp;amp;lt;k&amp;amp;isin;N, is nameable by a writing system on an alphabet if, and only if, the system can generate a text on the alphabet that names f and no other function. We show that there exist functions on natural numbers unnameable in principle in that they cannot be named by any writing system on any alphabet. Our results imply the following computability-theoretic hierarchy of functions on natural numbers: computable&amp;amp;#8842;partiallycomputable&amp;amp;#8842;nameable&amp;amp;#8842;F, where F is the set of functions on N.</description>
	<pubDate>2026-03-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 146: On Signifiable Computability: Part III: A Note on Unnameable Functions on Natural Numbers</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/146">doi: 10.3390/computers15030146</a></p>
	<p>Authors:
		Vladimir A. Kulyukin
		</p>
	<p>A writing system W on an alphabet A is a tuple (R,M), where R is a finite set of text formation rules and M is a finite rule application mechanism that generates texts on A. A natural writing system forms natural language texts on an alphabet such as Sanskrit on Devanagari. An artificial writing system generates formal language texts on an alphabet such as Lisp on Unicode. Let N={0,1,2,&amp;amp;hellip;} be the set of natural numbers. A function on natural numbers f:Nk&amp;amp;#8614;N, 0&amp;amp;lt;k&amp;amp;isin;N, is nameable by a writing system on an alphabet if, and only if, the system can generate a text on the alphabet that names f and no other function. We show that there exist functions on natural numbers unnameable in principle in that they cannot be named by any writing system on any alphabet. Our results imply the following computability-theoretic hierarchy of functions on natural numbers: computable&amp;amp;#8842;partiallycomputable&amp;amp;#8842;nameable&amp;amp;#8842;F, where F is the set of functions on N.</p>
	]]></content:encoded>

	<dc:title>On Signifiable Computability: Part III: A Note on Unnameable Functions on Natural Numbers</dc:title>
			<dc:creator>Vladimir A. Kulyukin</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030146</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Communication</prism:section>
	<prism:startingPage>146</prism:startingPage>
		<prism:doi>10.3390/computers15030146</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/146</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/144">

	<title>Computers, Vol. 15, Pages 144: Scalable IoT-Based Architecture for Continuous Monitoring of Patients at Home: Design and Technical Validation</title>
	<link>https://www.mdpi.com/2073-431X/15/3/144</link>
	<description>This article presents a scalable IoT-based architecture for continuous and passive monitoring of human behavior in home environments, designed as a technical foundation for future dementia risk assessment systems. The architecture addresses three fundamental challenges: achieving room-level spatial localization without privacy-invasive methods, balancing temporal resolution with bandwidth efficiency in continuous data streams, and enabling multi-institutional model development under GDPR constraints. The system integrates (1) wearable BLE sensors with infrared room-level localization; (2) edge computing gateways with local preprocessing and machine learning; (3) a three-channel data architecture that simultaneously achieves full 1 s temporal resolution for machine learning training, low-latency real-time visualization, and 41.2% network bandwidth reduction; and (4) a federated learning framework enabling collaborative model development without data sharing between institutions. Technical validation in two apartments (three participants, 7 days) demonstrated: 97.6% room-level localization accuracy using infrared beacons; less than 7 s end-to-end latency for 99.5% of critical events; and 98.5% deduplication accuracy in multi-gateway configurations. Federated learning simulation demonstrates algorithmic convergence (84.3% IID, 79.8% non-IID) and workflow feasibility, establishing a foundation for future production deployment. Cost analysis shows approximately &amp;amp;euro;490 for initial implementation and approximately &amp;amp;euro;55 monthly operation, representing substantially lower costs than existing research systems. The work establishes architectural and technical feasibility, as well as system-level economic viability, of continuous home monitoring for behavioral analysis within the evaluated residential scenarios. Clinical validation of diagnostic capabilities through longitudinal studies with validated cognitive assessments and patients with mild cognitive impairment remains to be studied in future work.</description>
	<pubDate>2026-03-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 144: Scalable IoT-Based Architecture for Continuous Monitoring of Patients at Home: Design and Technical Validation</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/144">doi: 10.3390/computers15030144</a></p>
	<p>Authors:
		Rosen Ivanov
		</p>
	<p>This article presents a scalable IoT-based architecture for continuous and passive monitoring of human behavior in home environments, designed as a technical foundation for future dementia risk assessment systems. The architecture addresses three fundamental challenges: achieving room-level spatial localization without privacy-invasive methods, balancing temporal resolution with bandwidth efficiency in continuous data streams, and enabling multi-institutional model development under GDPR constraints. The system integrates (1) wearable BLE sensors with infrared room-level localization; (2) edge computing gateways with local preprocessing and machine learning; (3) a three-channel data architecture that simultaneously achieves full 1 s temporal resolution for machine learning training, low-latency real-time visualization, and 41.2% network bandwidth reduction; and (4) a federated learning framework enabling collaborative model development without data sharing between institutions. Technical validation in two apartments (three participants, 7 days) demonstrated: 97.6% room-level localization accuracy using infrared beacons; less than 7 s end-to-end latency for 99.5% of critical events; and 98.5% deduplication accuracy in multi-gateway configurations. Federated learning simulation demonstrates algorithmic convergence (84.3% IID, 79.8% non-IID) and workflow feasibility, establishing a foundation for future production deployment. Cost analysis shows approximately &amp;amp;euro;490 for initial implementation and approximately &amp;amp;euro;55 monthly operation, representing substantially lower costs than existing research systems. The work establishes architectural and technical feasibility, as well as system-level economic viability, of continuous home monitoring for behavioral analysis within the evaluated residential scenarios. Clinical validation of diagnostic capabilities through longitudinal studies with validated cognitive assessments and patients with mild cognitive impairment remains to be studied in future work.</p>
	]]></content:encoded>

	<dc:title>Scalable IoT-Based Architecture for Continuous Monitoring of Patients at Home: Design and Technical Validation</dc:title>
			<dc:creator>Rosen Ivanov</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030144</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>144</prism:startingPage>
		<prism:doi>10.3390/computers15030144</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/144</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/145">

	<title>Computers, Vol. 15, Pages 145: SESQ: Spatially Aware Encoding and Semantically Guided Querying for 3D Grounding</title>
	<link>https://www.mdpi.com/2073-431X/15/3/145</link>
	<description>3D visual grounding is a fundamental task for human&amp;amp;ndash;machine interaction, aiming to localize specific objects in complex 3D point clouds based on natural language descriptions. Despite recent advancements, existing Transformer-based architectures often rely on absolute position embeddings and heuristic query initialization, which lack the capacity to capture fine-grained relative spatial dependencies and fail to effectively filter out scene clutter. In this paper, we propose SESQ, a novel framework that synergizes Spatially Aware Encoding and Semantically Guided Querying for 3D grounding. Our approach introduces two key innovations. First, we propose the Rotary Spatially Aware Encoder (RSAE), which incorporates Rotary Position Embeddings (RoPE) into the self-attention layers. By transforming 3D coordinates into a rotary representation, RSAE enables the model to inherently capture relative spatial distances and maintains geometric consistency throughout the encoding stage. Second, a Semantic Query Initialization (SQI) module is designed to initialize object queries by explicitly computing the cross-modal similarity between textual embeddings and visual point cloud features. By replacing traditional heuristic-based selection with semantic-aware alignment, SQI ensures that the decoding process originates from contextually relevant object candidates, significantly reducing the impact of task-irrelevant distractors. Extensive experiments on ScanRefer and ReferIt3D (Nr3D/Sr3D) benchmarks demonstrate the effectiveness of our framework. Compared to the baseline EDA, our method achieves a significant performance gain of 2.68% in overall Acc@0.5 on ScanRefer, a 4.9% improvement on the challenging Nr3D &amp;amp;ldquo;Hard&amp;amp;rdquo; subset, and a 1.1% increase in overall Acc@0.25 on Sr3D.</description>
	<pubDate>2026-03-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 145: SESQ: Spatially Aware Encoding and Semantically Guided Querying for 3D Grounding</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/145">doi: 10.3390/computers15030145</a></p>
	<p>Authors:
		Jinyuan Li
		Yundong Wu
		Tiancai Huang
		Mengyun Cao
		</p>
	<p>3D visual grounding is a fundamental task for human&amp;amp;ndash;machine interaction, aiming to localize specific objects in complex 3D point clouds based on natural language descriptions. Despite recent advancements, existing Transformer-based architectures often rely on absolute position embeddings and heuristic query initialization, which lack the capacity to capture fine-grained relative spatial dependencies and fail to effectively filter out scene clutter. In this paper, we propose SESQ, a novel framework that synergizes Spatially Aware Encoding and Semantically Guided Querying for 3D grounding. Our approach introduces two key innovations. First, we propose the Rotary Spatially Aware Encoder (RSAE), which incorporates Rotary Position Embeddings (RoPE) into the self-attention layers. By transforming 3D coordinates into a rotary representation, RSAE enables the model to inherently capture relative spatial distances and maintains geometric consistency throughout the encoding stage. Second, a Semantic Query Initialization (SQI) module is designed to initialize object queries by explicitly computing the cross-modal similarity between textual embeddings and visual point cloud features. By replacing traditional heuristic-based selection with semantic-aware alignment, SQI ensures that the decoding process originates from contextually relevant object candidates, significantly reducing the impact of task-irrelevant distractors. Extensive experiments on ScanRefer and ReferIt3D (Nr3D/Sr3D) benchmarks demonstrate the effectiveness of our framework. Compared to the baseline EDA, our method achieves a significant performance gain of 2.68% in overall Acc@0.5 on ScanRefer, a 4.9% improvement on the challenging Nr3D &amp;amp;ldquo;Hard&amp;amp;rdquo; subset, and a 1.1% increase in overall Acc@0.25 on Sr3D.</p>
	]]></content:encoded>

	<dc:title>SESQ: Spatially Aware Encoding and Semantically Guided Querying for 3D Grounding</dc:title>
			<dc:creator>Jinyuan Li</dc:creator>
			<dc:creator>Yundong Wu</dc:creator>
			<dc:creator>Tiancai Huang</dc:creator>
			<dc:creator>Mengyun Cao</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030145</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>145</prism:startingPage>
		<prism:doi>10.3390/computers15030145</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/145</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/143">

	<title>Computers, Vol. 15, Pages 143: CONGA: CONscientization GAme for Colon Cancer Literacy in Last-Semester Software Engineering Students</title>
	<link>https://www.mdpi.com/2073-431X/15/3/143</link>
	<description>This study aimed to evaluate the effectiveness of the CONGA game, an interactive and gamified digital tool that uses AI-generated or manually created questions with feedback, to improve colon cancer literacy among tenth- semester Software Engineering students at the University of Guayaquil. Grounded in Paulo Freire&amp;amp;rsquo;s critical pedagogy, CONGA operationalizes the concept of &amp;amp;ldquo;conscientiza&amp;amp;ccedil;&amp;amp;atilde;o&amp;amp;rdquo; (critical consciousness awakening) by engaging learners in dialogical reflection on medical myths and encouraging critical evaluation of health information sources. This work addresses an age group&amp;amp;mdash;emerging adulthood&amp;amp;mdash;that is often overlooked in cancer prevention campaigns despite increasing cancer incidence in this population. The game incorporates an adaptive engine that personalizes difficulty and scoring based on player performance, enhancing engagement and learning personalization. A controlled experiment compared the game-based intervention with traditional lecture-based instruction, using pre- and post-test assessments to measure knowledge gains and misconception reduction. Results demonstrated that the CONGA group achieved a significantly higher post-test correct response rate of 82%, compared to 57% in the traditional instruction group, and showed a 70.4% reduction in incorrect responses versus 42.4% in the control group. These findings indicate that CONGA&amp;amp;rsquo;s adaptive, feedback-driven design was more effective in enhancing short-term knowledge acquisition and immediate conceptual clarification following a single session. The study concludes that, based on immediate post-intervention assessments, gamified learning represents a scalable and engaging pedagogical strategy for colon cancer literacy, particularly in our local younger population. However, these results reflect short-term learning gains measured immediately after a single session, and further research is needed to evaluate long-term knowledge acquisition.</description>
	<pubDate>2026-02-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 143: CONGA: CONscientization GAme for Colon Cancer Literacy in Last-Semester Software Engineering Students</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/143">doi: 10.3390/computers15030143</a></p>
	<p>Authors:
		Franklin Parrales-Bravo
		Jonatan Guillen-Salabarria
		Janio Jadán-Guerrero
		Leonel Vasquez-Cevallos
		</p>
	<p>This study aimed to evaluate the effectiveness of the CONGA game, an interactive and gamified digital tool that uses AI-generated or manually created questions with feedback, to improve colon cancer literacy among tenth- semester Software Engineering students at the University of Guayaquil. Grounded in Paulo Freire&amp;amp;rsquo;s critical pedagogy, CONGA operationalizes the concept of &amp;amp;ldquo;conscientiza&amp;amp;ccedil;&amp;amp;atilde;o&amp;amp;rdquo; (critical consciousness awakening) by engaging learners in dialogical reflection on medical myths and encouraging critical evaluation of health information sources. This work addresses an age group&amp;amp;mdash;emerging adulthood&amp;amp;mdash;that is often overlooked in cancer prevention campaigns despite increasing cancer incidence in this population. The game incorporates an adaptive engine that personalizes difficulty and scoring based on player performance, enhancing engagement and learning personalization. A controlled experiment compared the game-based intervention with traditional lecture-based instruction, using pre- and post-test assessments to measure knowledge gains and misconception reduction. Results demonstrated that the CONGA group achieved a significantly higher post-test correct response rate of 82%, compared to 57% in the traditional instruction group, and showed a 70.4% reduction in incorrect responses versus 42.4% in the control group. These findings indicate that CONGA&amp;amp;rsquo;s adaptive, feedback-driven design was more effective in enhancing short-term knowledge acquisition and immediate conceptual clarification following a single session. The study concludes that, based on immediate post-intervention assessments, gamified learning represents a scalable and engaging pedagogical strategy for colon cancer literacy, particularly in our local younger population. However, these results reflect short-term learning gains measured immediately after a single session, and further research is needed to evaluate long-term knowledge acquisition.</p>
	]]></content:encoded>

	<dc:title>CONGA: CONscientization GAme for Colon Cancer Literacy in Last-Semester Software Engineering Students</dc:title>
			<dc:creator>Franklin Parrales-Bravo</dc:creator>
			<dc:creator>Jonatan Guillen-Salabarria</dc:creator>
			<dc:creator>Janio Jadán-Guerrero</dc:creator>
			<dc:creator>Leonel Vasquez-Cevallos</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030143</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-02-27</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-02-27</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>143</prism:startingPage>
		<prism:doi>10.3390/computers15030143</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/143</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/3/142">

	<title>Computers, Vol. 15, Pages 142: Multimodal Analysis of Hazard Perception Learning in Novice Drivers with Autism Using a Simulation-Based Training Environment</title>
	<link>https://www.mdpi.com/2073-431X/15/3/142</link>
	<description>Simulation-based driver training has shown promise for improving hazard perception in novice drivers; however, how learners with autism adapt behaviorally, visually, and physiologically during such training remains poorly understood. This study examined the effects of a game-based, hazard-focused driving simulation on hazard detection accuracy, gaze behavior, and heart rate in novice drivers with autism using a single-case, multi-phase design. Five participants completed repeated trials across baseline, treatment, and withdrawal phases while behavioral performance, eye movements, and physiological response were recorded. Across outcome domains, participants demonstrated highly individualized learning trajectories with substantial variability in both the direction and magnitude of change. Improvements in hazard detection accuracy were not consistently accompanied by changes in gaze organization or physiological response. While one participant exhibited a canonical pattern of coordinated improvement across behavioral, visual, and physiological measures, others showed dissociation between modalities, including reduced physiological arousal without performance gains or modest accuracy improvements despite sustained physiological engagement. Exploratory peri-hazard analyses further revealed participant-specific heart rate responses aligned to hazard detection, with no uniform temporal signature associated with learning. These findings suggest that hazard perception learning in drivers with autism does not follow a single pathway and cannot be inferred from any single performance or physiological metric. Instead, multimodal, within-participant analysis is critical for capturing meaningful individual adaptation during simulation-based training. The results have implications for the design and evaluation of driver training systems, supporting flexible, learner-specific assessment frameworks and adaptive approaches that accommodate diverse patterns of engagement and learning.</description>
	<pubDate>2026-02-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 142: Multimodal Analysis of Hazard Perception Learning in Novice Drivers with Autism Using a Simulation-Based Training Environment</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/3/142">doi: 10.3390/computers15030142</a></p>
	<p>Authors:
		Erik J. Sand
		Matthew T. Marino
		Charles E. Hughes
		</p>
	<p>Simulation-based driver training has shown promise for improving hazard perception in novice drivers; however, how learners with autism adapt behaviorally, visually, and physiologically during such training remains poorly understood. This study examined the effects of a game-based, hazard-focused driving simulation on hazard detection accuracy, gaze behavior, and heart rate in novice drivers with autism using a single-case, multi-phase design. Five participants completed repeated trials across baseline, treatment, and withdrawal phases while behavioral performance, eye movements, and physiological response were recorded. Across outcome domains, participants demonstrated highly individualized learning trajectories with substantial variability in both the direction and magnitude of change. Improvements in hazard detection accuracy were not consistently accompanied by changes in gaze organization or physiological response. While one participant exhibited a canonical pattern of coordinated improvement across behavioral, visual, and physiological measures, others showed dissociation between modalities, including reduced physiological arousal without performance gains or modest accuracy improvements despite sustained physiological engagement. Exploratory peri-hazard analyses further revealed participant-specific heart rate responses aligned to hazard detection, with no uniform temporal signature associated with learning. These findings suggest that hazard perception learning in drivers with autism does not follow a single pathway and cannot be inferred from any single performance or physiological metric. Instead, multimodal, within-participant analysis is critical for capturing meaningful individual adaptation during simulation-based training. The results have implications for the design and evaluation of driver training systems, supporting flexible, learner-specific assessment frameworks and adaptive approaches that accommodate diverse patterns of engagement and learning.</p>
	]]></content:encoded>

	<dc:title>Multimodal Analysis of Hazard Perception Learning in Novice Drivers with Autism Using a Simulation-Based Training Environment</dc:title>
			<dc:creator>Erik J. Sand</dc:creator>
			<dc:creator>Matthew T. Marino</dc:creator>
			<dc:creator>Charles E. Hughes</dc:creator>
		<dc:identifier>doi: 10.3390/computers15030142</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-02-27</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-02-27</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>142</prism:startingPage>
		<prism:doi>10.3390/computers15030142</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/3/142</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
    
<cc:License rdf:about="https://creativecommons.org/licenses/by/4.0/">
	<cc:permits rdf:resource="https://creativecommons.org/ns#Reproduction" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#Distribution" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#DerivativeWorks" />
</cc:License>

</rdf:RDF>
