<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF xmlns="http://purl.org/rss/1.0/"
 xmlns:dc="http://purl.org/dc/elements/1.1/"
 xmlns:dcterms="http://purl.org/dc/terms/"
 xmlns:cc="http://web.resource.org/cc/"
 xmlns:prism="http://prismstandard.org/namespaces/basic/2.0/"
 xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
 xmlns:admin="http://webns.net/mvcb/"
 xmlns:content="http://purl.org/rss/1.0/modules/content/">
    <channel rdf:about="https://www.mdpi.com/rss/journal/technologies">
		<title>Technologies</title>
		<description>Latest open access articles published in Technologies at https://www.mdpi.com/journal/technologies</description>
		<link>https://www.mdpi.com/journal/technologies</link>
		<admin:generatorAgent rdf:resource="https://www.mdpi.com/journal/technologies"/>
		<admin:errorReportsTo rdf:resource="mailto:support@mdpi.com"/>
		<dc:publisher>MDPI</dc:publisher>
		<dc:language>en</dc:language>
		<dc:rights>Creative Commons Attribution (CC-BY)</dc:rights>
						<prism:copyright>MDPI</prism:copyright>
		<prism:rightsAgent>support@mdpi.com</prism:rightsAgent>
		<image rdf:resource="https://pub.mdpi-res.com/img/design/mdpi-pub-logo.png?13cf3b5bd783e021?1778040678"/>
				<items>
			<rdf:Seq>
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/281" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/280" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/279" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/278" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/277" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/276" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/275" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/274" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/273" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/272" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/271" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/270" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/268" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/269" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/267" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/266" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/265" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/263" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/264" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/262" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/261" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/260" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/257" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/259" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/258" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/256" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/255" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/254" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/253" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/252" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/250" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/251" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/249" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/248" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/247" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/246" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/245" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/5/244" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/243" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/242" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/241" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/240" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/239" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/238" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/237" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/236" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/235" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/234" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/233" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/231" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/232" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/230" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/229" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/227" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/228" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/225" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/226" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/224" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/223" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/222" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/221" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/220" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/219" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/218" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/217" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/216" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/215" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/214" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/213" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/212" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/211" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/210" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/209" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/208" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/207" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/206" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/205" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/204" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/203" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/202" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/201" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/200" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/199" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/198" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/197" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/196" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/195" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/4/194" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/3/193" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/3/192" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/3/191" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/3/190" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/3/189" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/3/188" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/3/187" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/3/186" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/3/185" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/3/184" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/3/183" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-7080/14/3/182" />
                    	</rdf:Seq>
		</items>
				<cc:license rdf:resource="https://creativecommons.org/licenses/by/4.0/" />
	</channel>

        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/281">

	<title>Technologies, Vol. 14, Pages 281: Enhancing Embedded Systems Education Through Remote Laboratories with Real-Time Firmware Observability</title>
	<link>https://www.mdpi.com/2227-7080/14/5/281</link>
	<description>Microcontroller programming is a key element in engineering education, enabling students to acquire practical skills in embedded systems. Despite its importance, limited access to physical laboratories at many institutions restricts hands-on learning experiences. Remote laboratories have been introduced as a solution; however, most rely solely on external visual feedback, which constrains observability and limits analysis of internal program behavior. This paper introduces a remote laboratory for microcontroller education utilizing an STM32 platform within a Cloud&amp;amp;ndash;IoT framework. The proposed system allows real-time monitoring of internal firmware variables through compilation artifacts, without requiring direct interaction with traditional debugging tools. A pilot study involving 40 students in a microcontrollers course, who engaged with the system remotely using standard development workflows, was conducted. Usability was measured via the System Usability Scale (SUS), yielding an average score of 75.19 and reflecting high satisfaction. The platform enables real-time observation of internal system states during program execution, facilitating program analysis while maintaining an experience similar to traditional on-site laboratories. These findings indicate that the system offers a robust and scalable alternative for embedded systems education, with improved observability over existing remote laboratory solutions.</description>
	<pubDate>2026-05-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 281: Enhancing Embedded Systems Education Through Remote Laboratories with Real-Time Firmware Observability</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/281">doi: 10.3390/technologies14050281</a></p>
	<p>Authors:
		Fabián García-Vázquez
		Héctor A. Guerrero-Osuna
		Luis F. Luque-Vega
		Jesús Antonio Nava-Pintor
		Jorge A. Lizarraga
		Salvador Castro-Tapia
		Mario Molina-Almaraz
		Ana G. Castañeda-Miranda
		Carlos A. Olvera-Olvera
		</p>
	<p>Microcontroller programming is a key element in engineering education, enabling students to acquire practical skills in embedded systems. Despite its importance, limited access to physical laboratories at many institutions restricts hands-on learning experiences. Remote laboratories have been introduced as a solution; however, most rely solely on external visual feedback, which constrains observability and limits analysis of internal program behavior. This paper introduces a remote laboratory for microcontroller education utilizing an STM32 platform within a Cloud&amp;amp;ndash;IoT framework. The proposed system allows real-time monitoring of internal firmware variables through compilation artifacts, without requiring direct interaction with traditional debugging tools. A pilot study involving 40 students in a microcontrollers course, who engaged with the system remotely using standard development workflows, was conducted. Usability was measured via the System Usability Scale (SUS), yielding an average score of 75.19 and reflecting high satisfaction. The platform enables real-time observation of internal system states during program execution, facilitating program analysis while maintaining an experience similar to traditional on-site laboratories. These findings indicate that the system offers a robust and scalable alternative for embedded systems education, with improved observability over existing remote laboratory solutions.</p>
	]]></content:encoded>

	<dc:title>Enhancing Embedded Systems Education Through Remote Laboratories with Real-Time Firmware Observability</dc:title>
			<dc:creator>Fabián García-Vázquez</dc:creator>
			<dc:creator>Héctor A. Guerrero-Osuna</dc:creator>
			<dc:creator>Luis F. Luque-Vega</dc:creator>
			<dc:creator>Jesús Antonio Nava-Pintor</dc:creator>
			<dc:creator>Jorge A. Lizarraga</dc:creator>
			<dc:creator>Salvador Castro-Tapia</dc:creator>
			<dc:creator>Mario Molina-Almaraz</dc:creator>
			<dc:creator>Ana G. Castañeda-Miranda</dc:creator>
			<dc:creator>Carlos A. Olvera-Olvera</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050281</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-05-06</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-05-06</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>281</prism:startingPage>
		<prism:doi>10.3390/technologies14050281</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/281</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/280">

	<title>Technologies, Vol. 14, Pages 280: Overspray Containment Using an Air-Curtain Spray Hood in High-Pressure Airless Spray Coating with CFD Simulation and Experimental Validation</title>
	<link>https://www.mdpi.com/2227-7080/14/5/280</link>
	<description>High-pressure airless spray coating can atomize high-viscosity, high-solids coatings without compressed air and is widely used for large-scale anticorrosion applications, but robotic operation often produces substantial overspray that increases material waste, environmental burden, and lowers deposition efficiency. In this work, air-curtain blowing is investigated as an overspray control strategy for wall-climbing robotic airless spraying. A validated CFD framework was established using the realizable k&amp;amp;ndash;&amp;amp;epsilon; turbulence model coupled with a discrete-phase model (DPM) to simulate particle atomization, transport, impact, and escape, and to examine the effects of blowing angle and gap distance on the flow field and particle trajectories. Overspray performance was quantified using the wall deposition rate, hood collection rate, and particle escape rate. Experiments using a transparent spray hood with a mass collection system were conducted to validate the numerical predictions. The CFD results captured the measured trends in deposition and escape across the tested conditions. Among the evaluated parameters, a 60&amp;amp;deg; blowing angle provided the most effective overspray reduction by redirecting particles toward the target surface. Overall, combining CFD analysis with experimental validation offers a practical methodology for designing and optimizing air-curtain systems to improve coating efficiency in automated high-pressure airless spray applications.</description>
	<pubDate>2026-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 280: Overspray Containment Using an Air-Curtain Spray Hood in High-Pressure Airless Spray Coating with CFD Simulation and Experimental Validation</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/280">doi: 10.3390/technologies14050280</a></p>
	<p>Authors:
		Yu-Hsien Chen
		Li-Ting Huang
		Sheng-Jye Hwang
		Hsueh-Hao Liao
		Chen-Han Hsien
		Wei-Ting Chang
		Ming-Chang Hsu
		Yi Huang
		Yu-Ting Chuang
		</p>
	<p>High-pressure airless spray coating can atomize high-viscosity, high-solids coatings without compressed air and is widely used for large-scale anticorrosion applications, but robotic operation often produces substantial overspray that increases material waste, environmental burden, and lowers deposition efficiency. In this work, air-curtain blowing is investigated as an overspray control strategy for wall-climbing robotic airless spraying. A validated CFD framework was established using the realizable k&amp;amp;ndash;&amp;amp;epsilon; turbulence model coupled with a discrete-phase model (DPM) to simulate particle atomization, transport, impact, and escape, and to examine the effects of blowing angle and gap distance on the flow field and particle trajectories. Overspray performance was quantified using the wall deposition rate, hood collection rate, and particle escape rate. Experiments using a transparent spray hood with a mass collection system were conducted to validate the numerical predictions. The CFD results captured the measured trends in deposition and escape across the tested conditions. Among the evaluated parameters, a 60&amp;amp;deg; blowing angle provided the most effective overspray reduction by redirecting particles toward the target surface. Overall, combining CFD analysis with experimental validation offers a practical methodology for designing and optimizing air-curtain systems to improve coating efficiency in automated high-pressure airless spray applications.</p>
	]]></content:encoded>

	<dc:title>Overspray Containment Using an Air-Curtain Spray Hood in High-Pressure Airless Spray Coating with CFD Simulation and Experimental Validation</dc:title>
			<dc:creator>Yu-Hsien Chen</dc:creator>
			<dc:creator>Li-Ting Huang</dc:creator>
			<dc:creator>Sheng-Jye Hwang</dc:creator>
			<dc:creator>Hsueh-Hao Liao</dc:creator>
			<dc:creator>Chen-Han Hsien</dc:creator>
			<dc:creator>Wei-Ting Chang</dc:creator>
			<dc:creator>Ming-Chang Hsu</dc:creator>
			<dc:creator>Yi Huang</dc:creator>
			<dc:creator>Yu-Ting Chuang</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050280</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-05-04</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-05-04</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>280</prism:startingPage>
		<prism:doi>10.3390/technologies14050280</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/280</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/279">

	<title>Technologies, Vol. 14, Pages 279: An Efficient Odor Source Localization Method for Wheeled Mobile Robots in Indoor Ventilated Environments</title>
	<link>https://www.mdpi.com/2227-7080/14/5/279</link>
	<description>Odor source localization (OSL) using mobile robots in indoor ventilated environments remains challenging due to turbulent dispersion, uneven concentration distribution, and weak robustness in conventional algorithms. This paper proposes an efficient OSL strategy for wheeled mobile robots by integrating time-varying smoke plume modeling, particle filtering (PF), and information entropy. A multi-sensor fusion perception system is developed, including an LDS-02 LiDAR, ultrasonic anemometer, and PMS5003 particle sensor. The proposed method employs a plume model to characterize odor particle propagation, uses particle filtering to estimate the posterior distribution of the source location, and introduces information entropy to quantify perceptual uncertainty and optimize robot path planning. Comparative simulations and real-world experiments are conducted in a 5 m &amp;amp;times; 3 m indoor ventilated environment against the traditional gradient&amp;amp;ndash;bionic hybrid algorithm. Results demonstrate that the proposed algorithm significantly reduces the average search time and improves the localization success rate. The long-distance localization success rate exceeds 90%, and the positioning error is controlled within 0.5 m. The proposed strategy provides a reliable and practical solution for OSL in indoor ventilation environments.</description>
	<pubDate>2026-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 279: An Efficient Odor Source Localization Method for Wheeled Mobile Robots in Indoor Ventilated Environments</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/279">doi: 10.3390/technologies14050279</a></p>
	<p>Authors:
		Xutong Ye
		Boxuan Guo
		Yujiao Gu
		Haifeng Jiu
		Shuo Pang
		</p>
	<p>Odor source localization (OSL) using mobile robots in indoor ventilated environments remains challenging due to turbulent dispersion, uneven concentration distribution, and weak robustness in conventional algorithms. This paper proposes an efficient OSL strategy for wheeled mobile robots by integrating time-varying smoke plume modeling, particle filtering (PF), and information entropy. A multi-sensor fusion perception system is developed, including an LDS-02 LiDAR, ultrasonic anemometer, and PMS5003 particle sensor. The proposed method employs a plume model to characterize odor particle propagation, uses particle filtering to estimate the posterior distribution of the source location, and introduces information entropy to quantify perceptual uncertainty and optimize robot path planning. Comparative simulations and real-world experiments are conducted in a 5 m &amp;amp;times; 3 m indoor ventilated environment against the traditional gradient&amp;amp;ndash;bionic hybrid algorithm. Results demonstrate that the proposed algorithm significantly reduces the average search time and improves the localization success rate. The long-distance localization success rate exceeds 90%, and the positioning error is controlled within 0.5 m. The proposed strategy provides a reliable and practical solution for OSL in indoor ventilation environments.</p>
	]]></content:encoded>

	<dc:title>An Efficient Odor Source Localization Method for Wheeled Mobile Robots in Indoor Ventilated Environments</dc:title>
			<dc:creator>Xutong Ye</dc:creator>
			<dc:creator>Boxuan Guo</dc:creator>
			<dc:creator>Yujiao Gu</dc:creator>
			<dc:creator>Haifeng Jiu</dc:creator>
			<dc:creator>Shuo Pang</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050279</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-05-04</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-05-04</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>279</prism:startingPage>
		<prism:doi>10.3390/technologies14050279</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/279</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/278">

	<title>Technologies, Vol. 14, Pages 278: Defense-in-Depth Management of Radioactive Atmospheric Emissions in an Urban Medical Cyclotron Facility</title>
	<link>https://www.mdpi.com/2227-7080/14/5/278</link>
	<description>The operation of medical cyclotrons for PET radiopharmaceutical production presents significant radiological and environmental challenges that require systematic risk assessment and evidence-based mitigation strategies. In this study, an integrated framework combining Failure Mode and Effects Analysis (FMEA) with a quantitative Defense Effectiveness Factor (DEF) approach to evaluate and reduce residual risk in a real urban cyclotron facility. High-criticality failure modes (Risk Priority Number &amp;amp;ge;120) affecting HVAC systems, stack exhaust, and power supply were identified and validated through a Delphi expert consensus process. These modes were addressed with multi-layered defense-in-depth strategies: redundant systems (occurrence reduction, 60&amp;amp;ndash;80% effectiveness), real-time monitoring (detection reduction, 40&amp;amp;ndash;50% effectiveness), and design robustness (severity reduction, 70&amp;amp;ndash;85% effectiveness). The combined DEF yielded a 96&amp;amp;ndash;97% risk reduction. One-way sensitivity analysis confirmed the robustness of these results, with residual annual effective dose to the representative person remaining between 50&amp;amp;ndash;88 &amp;amp;mu;Sv/year (well below the IAEA 1 mSv/year public dose constraint) even under pessimistic scenarios. Primary exposure pathways were inhalation and cloud gamma from 18F and 41Ar during the early-morning production window, while secondary pathways were negligible due to the short half-lives of the radionuclides. These findings demonstrate that the integration of FMEA with DEF-based defense-in-depth and Gaussian plume modeling provides a transparent, robust, and regulatory-compliant framework for managing radioactive atmospheric emissions in urban medical cyclotron facilities.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 278: Defense-in-Depth Management of Radioactive Atmospheric Emissions in an Urban Medical Cyclotron Facility</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/278">doi: 10.3390/technologies14050278</a></p>
	<p>Authors:
		Frank Montero-Díaz
		Antonio Torres-Valle
		Ulises Jauregui-Haza
		</p>
	<p>The operation of medical cyclotrons for PET radiopharmaceutical production presents significant radiological and environmental challenges that require systematic risk assessment and evidence-based mitigation strategies. In this study, an integrated framework combining Failure Mode and Effects Analysis (FMEA) with a quantitative Defense Effectiveness Factor (DEF) approach to evaluate and reduce residual risk in a real urban cyclotron facility. High-criticality failure modes (Risk Priority Number &amp;amp;ge;120) affecting HVAC systems, stack exhaust, and power supply were identified and validated through a Delphi expert consensus process. These modes were addressed with multi-layered defense-in-depth strategies: redundant systems (occurrence reduction, 60&amp;amp;ndash;80% effectiveness), real-time monitoring (detection reduction, 40&amp;amp;ndash;50% effectiveness), and design robustness (severity reduction, 70&amp;amp;ndash;85% effectiveness). The combined DEF yielded a 96&amp;amp;ndash;97% risk reduction. One-way sensitivity analysis confirmed the robustness of these results, with residual annual effective dose to the representative person remaining between 50&amp;amp;ndash;88 &amp;amp;mu;Sv/year (well below the IAEA 1 mSv/year public dose constraint) even under pessimistic scenarios. Primary exposure pathways were inhalation and cloud gamma from 18F and 41Ar during the early-morning production window, while secondary pathways were negligible due to the short half-lives of the radionuclides. These findings demonstrate that the integration of FMEA with DEF-based defense-in-depth and Gaussian plume modeling provides a transparent, robust, and regulatory-compliant framework for managing radioactive atmospheric emissions in urban medical cyclotron facilities.</p>
	]]></content:encoded>

	<dc:title>Defense-in-Depth Management of Radioactive Atmospheric Emissions in an Urban Medical Cyclotron Facility</dc:title>
			<dc:creator>Frank Montero-Díaz</dc:creator>
			<dc:creator>Antonio Torres-Valle</dc:creator>
			<dc:creator>Ulises Jauregui-Haza</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050278</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>278</prism:startingPage>
		<prism:doi>10.3390/technologies14050278</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/278</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/277">

	<title>Technologies, Vol. 14, Pages 277: Ensemble-Based Multimodal Deep Learning for Precise Skin Cancer Diagnosis: Integrating Clinical Imagery with Patient Metadata</title>
	<link>https://www.mdpi.com/2227-7080/14/5/277</link>
	<description>The rising incidence of skin cancer necessitates scalable and accurate diagnostic tools. While dermoscopy-based systems have achieved expert-level performance, clinical smartphone images pose challenges due to variability in lighting, resolution, and artifacts. Recent advances in multimodal deep learning have shown promise, yet most approaches rely on simple feature concatenation or single-model classifiers, limiting their ability to capture complex cross-modal interactions. This study aims to bridge the diagnostic gap in resource-limited settings by developing a robust multimodal framework that synergizes clinical smartphone images with structured patient metadata for automated skin cancer classification. We propose a novel hybrid architecture integrating a Swin Transformer V2 (SwinV2-Tiny) for hierarchical visual feature extraction and a Denoising Autoencoder (DAE) with PCA for robust metadata embedding. These heterogeneous modalities are fused via a Gated Attention Mechanism that dynamically weighs feature importance across streams. Classification is performed by a Heterogeneous Meta-Stack Ensemble comprising CatBoost, LightGBM, XGBoost, and Logistic Regression, designed to maximize calibration and generalization across imbalanced classes. Evaluated on the PAD-UFES-20 dataset (2298 clinical smartphone images, six diagnostic classes), the proposed framework achieves state-of-the-art performance with a macro-averaged F1-score of 0.977, accuracy of 0.978, and an AUC of 0.990. It significantly outperforms unimodal baselines and existing multimodal methods, demonstrating superior sensitivity (0.974) and precision (0.981), particularly for underrepresented malignant classes like Melanoma (F1: 0.995) and Squamous Cell Carcinoma (F1: 0.960). The integration of clinical metadata with advanced visual embeddings via gated attention significantly enhances diagnostic reliability. Comprehensive ablation studies confirm the contribution of each architectural component. This framework offers a viable pathway for deploying high-precision, AI-driven dermatological screening tools on standard smartphone devices.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 277: Ensemble-Based Multimodal Deep Learning for Precise Skin Cancer Diagnosis: Integrating Clinical Imagery with Patient Metadata</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/277">doi: 10.3390/technologies14050277</a></p>
	<p>Authors:
		Wyssem Fathallah
		M’hamed Abid
		Mourad Mars
		Hedi Sakli
		</p>
	<p>The rising incidence of skin cancer necessitates scalable and accurate diagnostic tools. While dermoscopy-based systems have achieved expert-level performance, clinical smartphone images pose challenges due to variability in lighting, resolution, and artifacts. Recent advances in multimodal deep learning have shown promise, yet most approaches rely on simple feature concatenation or single-model classifiers, limiting their ability to capture complex cross-modal interactions. This study aims to bridge the diagnostic gap in resource-limited settings by developing a robust multimodal framework that synergizes clinical smartphone images with structured patient metadata for automated skin cancer classification. We propose a novel hybrid architecture integrating a Swin Transformer V2 (SwinV2-Tiny) for hierarchical visual feature extraction and a Denoising Autoencoder (DAE) with PCA for robust metadata embedding. These heterogeneous modalities are fused via a Gated Attention Mechanism that dynamically weighs feature importance across streams. Classification is performed by a Heterogeneous Meta-Stack Ensemble comprising CatBoost, LightGBM, XGBoost, and Logistic Regression, designed to maximize calibration and generalization across imbalanced classes. Evaluated on the PAD-UFES-20 dataset (2298 clinical smartphone images, six diagnostic classes), the proposed framework achieves state-of-the-art performance with a macro-averaged F1-score of 0.977, accuracy of 0.978, and an AUC of 0.990. It significantly outperforms unimodal baselines and existing multimodal methods, demonstrating superior sensitivity (0.974) and precision (0.981), particularly for underrepresented malignant classes like Melanoma (F1: 0.995) and Squamous Cell Carcinoma (F1: 0.960). The integration of clinical metadata with advanced visual embeddings via gated attention significantly enhances diagnostic reliability. Comprehensive ablation studies confirm the contribution of each architectural component. This framework offers a viable pathway for deploying high-precision, AI-driven dermatological screening tools on standard smartphone devices.</p>
	]]></content:encoded>

	<dc:title>Ensemble-Based Multimodal Deep Learning for Precise Skin Cancer Diagnosis: Integrating Clinical Imagery with Patient Metadata</dc:title>
			<dc:creator>Wyssem Fathallah</dc:creator>
			<dc:creator>M’hamed Abid</dc:creator>
			<dc:creator>Mourad Mars</dc:creator>
			<dc:creator>Hedi Sakli</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050277</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>277</prism:startingPage>
		<prism:doi>10.3390/technologies14050277</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/277</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/276">

	<title>Technologies, Vol. 14, Pages 276: A Hybrid Machine Learning Framework for Mechanistically Interpretable Latent Parameter Inference in a Spatiotemporal CAR-T Therapy Model for Solid Tumours</title>
	<link>https://www.mdpi.com/2227-7080/14/5/276</link>
	<description>CAR-T cell therapy remains ineffective in most solid tumours because effector cells infiltrate poorly, undergo exhaustion, and face antigen escape within an immunosuppressive microenvironment. To address this, we developed a hybrid framework that combines a mechanistic spatiotemporal model with machine learning for limited individual-level mechanistic personalisation under data constraints. At its core, we employed a reaction&amp;amp;ndash;diffusion&amp;amp;ndash;chemotaxis model describing functional and exhausted CAR-T cells, antigen-positive and antigen-negative tumour subpopulations, a chemoattractant, an immunosuppressive factor, and hypoxia. Gradient boosting combined with nested cross-validation was used to recover model-consistent latent-parameter pseudo-labels generated by a limited inverse problem. Within this surrogate-target setting, parameters characterising the tumour microenvironment and CAR-T cell exhaustion were reproduced most robustly, whereas antigen escape and individualised initial conditions were substantially less well constrained. As an auxiliary reference point, we also considered a direct empirical baseline for binary clinical outcomes. This baseline indicated that the observed clinical features contained a more stable signal for disease control than for objective response. A favourable response was associated with high CAR-T cell infiltration and cytotoxic potency, whereas resistance was linked to exhaustion, antigen escape, and a suppressive microenvironment. Overall, the proposed approach should be interpreted as an internally validated, hypothesis-generating proof-of-concept platform for mapping clinical features to mechanistically interpretable surrogate latent targets, rather than as evidence for validated recovery of true patient-specific biological parameters.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 276: A Hybrid Machine Learning Framework for Mechanistically Interpretable Latent Parameter Inference in a Spatiotemporal CAR-T Therapy Model for Solid Tumours</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/276">doi: 10.3390/technologies14050276</a></p>
	<p>Authors:
		Maxim Polyakov
		</p>
	<p>CAR-T cell therapy remains ineffective in most solid tumours because effector cells infiltrate poorly, undergo exhaustion, and face antigen escape within an immunosuppressive microenvironment. To address this, we developed a hybrid framework that combines a mechanistic spatiotemporal model with machine learning for limited individual-level mechanistic personalisation under data constraints. At its core, we employed a reaction&amp;amp;ndash;diffusion&amp;amp;ndash;chemotaxis model describing functional and exhausted CAR-T cells, antigen-positive and antigen-negative tumour subpopulations, a chemoattractant, an immunosuppressive factor, and hypoxia. Gradient boosting combined with nested cross-validation was used to recover model-consistent latent-parameter pseudo-labels generated by a limited inverse problem. Within this surrogate-target setting, parameters characterising the tumour microenvironment and CAR-T cell exhaustion were reproduced most robustly, whereas antigen escape and individualised initial conditions were substantially less well constrained. As an auxiliary reference point, we also considered a direct empirical baseline for binary clinical outcomes. This baseline indicated that the observed clinical features contained a more stable signal for disease control than for objective response. A favourable response was associated with high CAR-T cell infiltration and cytotoxic potency, whereas resistance was linked to exhaustion, antigen escape, and a suppressive microenvironment. Overall, the proposed approach should be interpreted as an internally validated, hypothesis-generating proof-of-concept platform for mapping clinical features to mechanistically interpretable surrogate latent targets, rather than as evidence for validated recovery of true patient-specific biological parameters.</p>
	]]></content:encoded>

	<dc:title>A Hybrid Machine Learning Framework for Mechanistically Interpretable Latent Parameter Inference in a Spatiotemporal CAR-T Therapy Model for Solid Tumours</dc:title>
			<dc:creator>Maxim Polyakov</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050276</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>276</prism:startingPage>
		<prism:doi>10.3390/technologies14050276</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/276</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/275">

	<title>Technologies, Vol. 14, Pages 275: Multidisciplinary Breakthroughs in Short-Fiber Composites&amp;mdash;Properties, Microstructure, Topology Optimization and Embedded Elements</title>
	<link>https://www.mdpi.com/2227-7080/14/5/275</link>
	<description>Short-fiber-reinforced composite materials are increasingly being used in a wide variety of fields, including medicine, the automotive industry, and aviation. This growing demand has driven the development of new manufacturing technologies, numerical modeling and topology optimization methods, and techniques for assessing the internal structure of such products, among others. This review provides a comprehensive examination of the characteristics and mechanical properties of short-fiber-reinforced composite materials, focusing on the key aspects of their design and manufacturing processes. We analyze the consideration of anisotropy in material modeling, the methods for the non-destructive testing of material structure, and the multidisciplinary approach to product design. The review also addresses advanced design techniques, including topology optimization and bimaterial optimization for designing products with embedded lattice structures, as well as adhesion modeling. In contrast to existing reviews, this work presents an overview of multidisciplinary studies dedicated to all stages of designing and manufacturing structures from short-fiber-reinforced composites, unifying the engineering pipeline from material property and molding modeling to topology-optimized design and static analysis under operation loads.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 275: Multidisciplinary Breakthroughs in Short-Fiber Composites&amp;mdash;Properties, Microstructure, Topology Optimization and Embedded Elements</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/275">doi: 10.3390/technologies14050275</a></p>
	<p>Authors:
		Evgenii I. Kurkin
		Vladislava O. Chertykovtseva
		Andrey V. Sedelnikov
		</p>
	<p>Short-fiber-reinforced composite materials are increasingly being used in a wide variety of fields, including medicine, the automotive industry, and aviation. This growing demand has driven the development of new manufacturing technologies, numerical modeling and topology optimization methods, and techniques for assessing the internal structure of such products, among others. This review provides a comprehensive examination of the characteristics and mechanical properties of short-fiber-reinforced composite materials, focusing on the key aspects of their design and manufacturing processes. We analyze the consideration of anisotropy in material modeling, the methods for the non-destructive testing of material structure, and the multidisciplinary approach to product design. The review also addresses advanced design techniques, including topology optimization and bimaterial optimization for designing products with embedded lattice structures, as well as adhesion modeling. In contrast to existing reviews, this work presents an overview of multidisciplinary studies dedicated to all stages of designing and manufacturing structures from short-fiber-reinforced composites, unifying the engineering pipeline from material property and molding modeling to topology-optimized design and static analysis under operation loads.</p>
	]]></content:encoded>

	<dc:title>Multidisciplinary Breakthroughs in Short-Fiber Composites&amp;amp;mdash;Properties, Microstructure, Topology Optimization and Embedded Elements</dc:title>
			<dc:creator>Evgenii I. Kurkin</dc:creator>
			<dc:creator>Vladislava O. Chertykovtseva</dc:creator>
			<dc:creator>Andrey V. Sedelnikov</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050275</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>275</prism:startingPage>
		<prism:doi>10.3390/technologies14050275</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/275</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/274">

	<title>Technologies, Vol. 14, Pages 274: An Enhanced XGBoost-Based Framework for Efficient Multi-Class Cyber Threat Detection in Industrial IoT Networks</title>
	<link>https://www.mdpi.com/2227-7080/14/5/274</link>
	<description>Securing Industrial IoT (IIoT) network environments remains a significant challenge due to the increasing complexity of interconnected sensors, actuators, gateways, and control systems, which are frequent targets of cyberattacks. These threats can lead to operational disruptions, financial losses, and safety risks. This paper proposes an efficient multi-stage intrusion detection framework based on an enhanced Extreme Gradient Boosting (XGBoost) model for IIoT environments. The proposed framework integrates data preprocessing, class imbalance handling, hyperparameter optimization, probability calibration, and class-specific decision thresholds within a unified pipeline. In addition, calibrated probability outputs are utilized as continuous indicators of prediction confidence, enabling more reliable and risk-aware decision-making. The hierarchical multi-stage design decomposes the detection task into progressively refined classification levels, improving discrimination among complex and overlapping attack categories. The framework is evaluated using the Edge-IIoTset benchmark dataset, which reflects realistic IIoT network traffic under both normal and malicious conditions. Experimental results demonstrate that the proposed approach achieved significant performance improvements, including up to 21% increase in recall and 15% improvement in macro F1 score compared to the baseline models. Furthermore, the model exhibits low inference latency and supports efficient deployment in time-sensitive IIoT monitoring scenarios. These results indicate that the proposed framework provides an effective and scalable solution for multi-class cyber threat detection in IIoT networks.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 274: An Enhanced XGBoost-Based Framework for Efficient Multi-Class Cyber Threat Detection in Industrial IoT Networks</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/274">doi: 10.3390/technologies14050274</a></p>
	<p>Authors:
		Adel A. Ahmed
		Talal A. A. Abdullah
		</p>
	<p>Securing Industrial IoT (IIoT) network environments remains a significant challenge due to the increasing complexity of interconnected sensors, actuators, gateways, and control systems, which are frequent targets of cyberattacks. These threats can lead to operational disruptions, financial losses, and safety risks. This paper proposes an efficient multi-stage intrusion detection framework based on an enhanced Extreme Gradient Boosting (XGBoost) model for IIoT environments. The proposed framework integrates data preprocessing, class imbalance handling, hyperparameter optimization, probability calibration, and class-specific decision thresholds within a unified pipeline. In addition, calibrated probability outputs are utilized as continuous indicators of prediction confidence, enabling more reliable and risk-aware decision-making. The hierarchical multi-stage design decomposes the detection task into progressively refined classification levels, improving discrimination among complex and overlapping attack categories. The framework is evaluated using the Edge-IIoTset benchmark dataset, which reflects realistic IIoT network traffic under both normal and malicious conditions. Experimental results demonstrate that the proposed approach achieved significant performance improvements, including up to 21% increase in recall and 15% improvement in macro F1 score compared to the baseline models. Furthermore, the model exhibits low inference latency and supports efficient deployment in time-sensitive IIoT monitoring scenarios. These results indicate that the proposed framework provides an effective and scalable solution for multi-class cyber threat detection in IIoT networks.</p>
	]]></content:encoded>

	<dc:title>An Enhanced XGBoost-Based Framework for Efficient Multi-Class Cyber Threat Detection in Industrial IoT Networks</dc:title>
			<dc:creator>Adel A. Ahmed</dc:creator>
			<dc:creator>Talal A. A. Abdullah</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050274</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>274</prism:startingPage>
		<prism:doi>10.3390/technologies14050274</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/274</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/273">

	<title>Technologies, Vol. 14, Pages 273: A GPT-Based Assessment of Alignment Between Privacy Legal Frameworks &amp;amp; ISO/IEC 27701:2025: A Latin American Case Study</title>
	<link>https://www.mdpi.com/2227-7080/14/5/273</link>
	<description>The 2025 update of the International Organization for Standardization/International Electrotechnical Commission (ISO/IEC) 27701 standard offers a major advantage by enabling organizations to implement a Privacy Information Management System (PIMS) autonomously while maintaining alignment with the General Data Protection Regulation (GDPR). However, it remains unclear to what extent privacy legal frameworks in developing jurisdictions, particularly in Latin American countries, align with this new standard. At the same time, the traditional method for assessing the alignment between privacy legal frameworks and ISO/IEC 27701 continues to rely on manual mapping between the standard&amp;amp;rsquo;s subclauses and privacy regulatory articles, a process that is time-consuming, costly, and error-prone. More critically, no method exists to quantitatively assess the reliability of such mappings, leaving alignment assessments largely subjective. To address these limitations, this paper proposes a novel method based on an OpenAI Generative Pre-trained Transformer (GPT) combined with a Chain-of-Thought (CoT) reasoning strategy to quantitatively assess the alignment between privacy legal frameworks and ISO/IEC 27701:2025. By leveraging GPT&amp;amp;rsquo;s logarithmic probabilities (logprobs) and the standard&amp;amp;rsquo;s subclause definitions as classification categories, the method enables confidence-based evaluation of legal&amp;amp;ndash;standard alignment. The proposed method is then applied to analyze the privacy legal frameworks of Paraguay, Chile, Ecuador, M&amp;amp;eacute;xico, Colombia, and Per&amp;amp;uacute;, examining how effectively they promote the standard&amp;amp;rsquo;s guidelines. A suitable confidence threshold is then selected by assessing the GDPR and comparing the results with the reference mappings reported in Annex D of the standard. Finally, the method identifies the number of compliant subclauses per clause, the regulatory articles influencing the resulting logprobs, and the underlying privacy gaps for reduced alignment across the analyzed privacy legal frameworks. Overall, our results indicate that while Latin American privacy legal frameworks mandate protective measures by promoting a suitable operation and continuous improvement of a PIMS, they do not explicitly demand adequate risk management and sufficient preventive safeguards for citizens&amp;amp;rsquo; Personally Identifiable Information (PII) in dynamic contexts.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 273: A GPT-Based Assessment of Alignment Between Privacy Legal Frameworks &amp;amp; ISO/IEC 27701:2025: A Latin American Case Study</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/273">doi: 10.3390/technologies14050273</a></p>
	<p>Authors:
		David Cevallos-Salas
		José Estrada-Jiménez
		Danny S. Guamán
		</p>
	<p>The 2025 update of the International Organization for Standardization/International Electrotechnical Commission (ISO/IEC) 27701 standard offers a major advantage by enabling organizations to implement a Privacy Information Management System (PIMS) autonomously while maintaining alignment with the General Data Protection Regulation (GDPR). However, it remains unclear to what extent privacy legal frameworks in developing jurisdictions, particularly in Latin American countries, align with this new standard. At the same time, the traditional method for assessing the alignment between privacy legal frameworks and ISO/IEC 27701 continues to rely on manual mapping between the standard&amp;amp;rsquo;s subclauses and privacy regulatory articles, a process that is time-consuming, costly, and error-prone. More critically, no method exists to quantitatively assess the reliability of such mappings, leaving alignment assessments largely subjective. To address these limitations, this paper proposes a novel method based on an OpenAI Generative Pre-trained Transformer (GPT) combined with a Chain-of-Thought (CoT) reasoning strategy to quantitatively assess the alignment between privacy legal frameworks and ISO/IEC 27701:2025. By leveraging GPT&amp;amp;rsquo;s logarithmic probabilities (logprobs) and the standard&amp;amp;rsquo;s subclause definitions as classification categories, the method enables confidence-based evaluation of legal&amp;amp;ndash;standard alignment. The proposed method is then applied to analyze the privacy legal frameworks of Paraguay, Chile, Ecuador, M&amp;amp;eacute;xico, Colombia, and Per&amp;amp;uacute;, examining how effectively they promote the standard&amp;amp;rsquo;s guidelines. A suitable confidence threshold is then selected by assessing the GDPR and comparing the results with the reference mappings reported in Annex D of the standard. Finally, the method identifies the number of compliant subclauses per clause, the regulatory articles influencing the resulting logprobs, and the underlying privacy gaps for reduced alignment across the analyzed privacy legal frameworks. Overall, our results indicate that while Latin American privacy legal frameworks mandate protective measures by promoting a suitable operation and continuous improvement of a PIMS, they do not explicitly demand adequate risk management and sufficient preventive safeguards for citizens&amp;amp;rsquo; Personally Identifiable Information (PII) in dynamic contexts.</p>
	]]></content:encoded>

	<dc:title>A GPT-Based Assessment of Alignment Between Privacy Legal Frameworks &amp;amp;amp; ISO/IEC 27701:2025: A Latin American Case Study</dc:title>
			<dc:creator>David Cevallos-Salas</dc:creator>
			<dc:creator>José Estrada-Jiménez</dc:creator>
			<dc:creator>Danny S. Guamán</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050273</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>273</prism:startingPage>
		<prism:doi>10.3390/technologies14050273</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/273</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/272">

	<title>Technologies, Vol. 14, Pages 272: Multi-Scale Hybrid Attention Temporal Network for Motionless Activity Using Smartphone Inertial Sensors</title>
	<link>https://www.mdpi.com/2227-7080/14/5/272</link>
	<description>Wearable sensor-based human activity recognition (HAR) has gained growing significance in healthcare monitoring and assisted living systems. Although considerable advances have been made in classifying dynamic movements, stationary activities&amp;amp;mdash;such as sleeping, driving, and watching TV&amp;amp;mdash;remain difficult to distinguish owing to their weak sensor signatures and limited discriminative cues. This paper presents the multi-scale hybrid attention temporal network (MHAT-Net), a deep learning framework whose key architectural novelty lies in the parallel (non-sequential) dual-pathway temporal modeling: a BiGRU branch and a transformer encoder branch operate simultaneously on the same spatially encoded representation, combined via a learnable attention-based fusion module. This design targets the underexplored problem of distinguishing stationary activities from weak inertial sensor signatures. The architecture is built upon three integrated components: (1) a multi-branch CNN with kernel sizes three, five, and seven combined with channel attention for adaptive spatial feature extraction across multiple temporal scales; (2) parallel bidirectional gated recurrent unit (BiGRU) and transformer encoder pathways for jointly capturing short-range sequential patterns and long-range temporal correlations; and (3) an attention-driven fusion module that adaptively weights the outputs of both temporal branches. The model was assessed on a publicly available benchmark comprising three motionless activity categories collected from 25 participants via smartphone sensors. In 5-fold cross-validation, MHAT-Net attained 97.42% (&amp;amp;plusmn;4.69%) accuracy with accelerometer data and 92.31% (&amp;amp;plusmn;0.31%) with gyroscope data, substantially exceeding the accuracies of five baseline architectures: CNN, LSTM, BiLSTM, GRU, and BiGRU. Ablation experiments identified multi-scale spatial feature extraction as the most influential module (2.21&amp;amp;ndash;2.47% contribution), followed by the hybrid temporal modeling components. Cross-modality analysis confirmed that accelerometer signals yielded richer discriminative content for stationary activities, while MHAT-Net sustained consistent performance across both sensor types. The proposed integration of multi-scale spatial encoding, hybrid temporal modeling, and multi-level attention gives MHAT-Net the ability to reliably detect subtle activity-specific patterns, establishing a new benchmark in wearable sensor-based recognition for comprehensive daily behavior monitoring.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 272: Multi-Scale Hybrid Attention Temporal Network for Motionless Activity Using Smartphone Inertial Sensors</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/272">doi: 10.3390/technologies14050272</a></p>
	<p>Authors:
		Sakorn Mekruksavanich
		Anuchit Jitpattanakul
		</p>
	<p>Wearable sensor-based human activity recognition (HAR) has gained growing significance in healthcare monitoring and assisted living systems. Although considerable advances have been made in classifying dynamic movements, stationary activities&amp;amp;mdash;such as sleeping, driving, and watching TV&amp;amp;mdash;remain difficult to distinguish owing to their weak sensor signatures and limited discriminative cues. This paper presents the multi-scale hybrid attention temporal network (MHAT-Net), a deep learning framework whose key architectural novelty lies in the parallel (non-sequential) dual-pathway temporal modeling: a BiGRU branch and a transformer encoder branch operate simultaneously on the same spatially encoded representation, combined via a learnable attention-based fusion module. This design targets the underexplored problem of distinguishing stationary activities from weak inertial sensor signatures. The architecture is built upon three integrated components: (1) a multi-branch CNN with kernel sizes three, five, and seven combined with channel attention for adaptive spatial feature extraction across multiple temporal scales; (2) parallel bidirectional gated recurrent unit (BiGRU) and transformer encoder pathways for jointly capturing short-range sequential patterns and long-range temporal correlations; and (3) an attention-driven fusion module that adaptively weights the outputs of both temporal branches. The model was assessed on a publicly available benchmark comprising three motionless activity categories collected from 25 participants via smartphone sensors. In 5-fold cross-validation, MHAT-Net attained 97.42% (&amp;amp;plusmn;4.69%) accuracy with accelerometer data and 92.31% (&amp;amp;plusmn;0.31%) with gyroscope data, substantially exceeding the accuracies of five baseline architectures: CNN, LSTM, BiLSTM, GRU, and BiGRU. Ablation experiments identified multi-scale spatial feature extraction as the most influential module (2.21&amp;amp;ndash;2.47% contribution), followed by the hybrid temporal modeling components. Cross-modality analysis confirmed that accelerometer signals yielded richer discriminative content for stationary activities, while MHAT-Net sustained consistent performance across both sensor types. The proposed integration of multi-scale spatial encoding, hybrid temporal modeling, and multi-level attention gives MHAT-Net the ability to reliably detect subtle activity-specific patterns, establishing a new benchmark in wearable sensor-based recognition for comprehensive daily behavior monitoring.</p>
	]]></content:encoded>

	<dc:title>Multi-Scale Hybrid Attention Temporal Network for Motionless Activity Using Smartphone Inertial Sensors</dc:title>
			<dc:creator>Sakorn Mekruksavanich</dc:creator>
			<dc:creator>Anuchit Jitpattanakul</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050272</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>272</prism:startingPage>
		<prism:doi>10.3390/technologies14050272</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/272</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/271">

	<title>Technologies, Vol. 14, Pages 271: Multi-Factor Statistical Analysis and Numerical Modeling of an Anode-Supported SOFC Fueled by Synthetic Diesel Using Taguchi Orthogonal Arrays</title>
	<link>https://www.mdpi.com/2227-7080/14/5/271</link>
	<description>The global transition toward carbon-neutral energy solutions has established Solid Oxide Fuel Cells (SOFCs) as a key technology for next-generation power generation. This work presents a comprehensive numerical study and multi-factor statistical analysis of an anode-supported SOFC fueled by synthetic diesel. A three-dimensional computational fluid dynamics model, validated against experimental data, was integrated with a Taguchi L27 orthogonal array to systematically evaluate the influence of six key parameters: temperature, fuel mass flow rate, operating pressure, current load, flow channel configuration, and methane molar fraction. Statistical analysis through the signal-to-noise ratio and analysis of variance identified the operating current as the most significant factor affecting cell voltage, followed by the fuel mass flow rate and temperature. The experiments showed that the highest levels of all factors (except for the current, which had the lowest level) maximize electrochemical performance while maintaining a steam-to-carbon ratio (S/C) within a range of 0.83 to 0.92, calculated based on total carbon content, ensuring sufficient humidification for internal reforming across all tested fuel compositions. Furthermore, a multiple linear regression model was developed as a computationally efficient surrogate, demonstrating exceptional predictive accuracy with an R2 of 0.9954 and a mean relative error of 1.76% across independent validation cases. These results provide a robust methodology for rapid design and sensitivity analysis of internal-reforming SOFCs, offering a precise tool for optimizing fuel utilization in high-temperature electrochemical systems.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 271: Multi-Factor Statistical Analysis and Numerical Modeling of an Anode-Supported SOFC Fueled by Synthetic Diesel Using Taguchi Orthogonal Arrays</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/271">doi: 10.3390/technologies14050271</a></p>
	<p>Authors:
		Alan Uriel Estrada-Herrera
		Ismael Urbina-Salas
		David Aaron Rodriguez-Alejandro
		José de Jesús Ramírez-Minguela
		Martin Valtierra-Rodriguez
		Francisco Elizalde-Blancas
		</p>
	<p>The global transition toward carbon-neutral energy solutions has established Solid Oxide Fuel Cells (SOFCs) as a key technology for next-generation power generation. This work presents a comprehensive numerical study and multi-factor statistical analysis of an anode-supported SOFC fueled by synthetic diesel. A three-dimensional computational fluid dynamics model, validated against experimental data, was integrated with a Taguchi L27 orthogonal array to systematically evaluate the influence of six key parameters: temperature, fuel mass flow rate, operating pressure, current load, flow channel configuration, and methane molar fraction. Statistical analysis through the signal-to-noise ratio and analysis of variance identified the operating current as the most significant factor affecting cell voltage, followed by the fuel mass flow rate and temperature. The experiments showed that the highest levels of all factors (except for the current, which had the lowest level) maximize electrochemical performance while maintaining a steam-to-carbon ratio (S/C) within a range of 0.83 to 0.92, calculated based on total carbon content, ensuring sufficient humidification for internal reforming across all tested fuel compositions. Furthermore, a multiple linear regression model was developed as a computationally efficient surrogate, demonstrating exceptional predictive accuracy with an R2 of 0.9954 and a mean relative error of 1.76% across independent validation cases. These results provide a robust methodology for rapid design and sensitivity analysis of internal-reforming SOFCs, offering a precise tool for optimizing fuel utilization in high-temperature electrochemical systems.</p>
	]]></content:encoded>

	<dc:title>Multi-Factor Statistical Analysis and Numerical Modeling of an Anode-Supported SOFC Fueled by Synthetic Diesel Using Taguchi Orthogonal Arrays</dc:title>
			<dc:creator>Alan Uriel Estrada-Herrera</dc:creator>
			<dc:creator>Ismael Urbina-Salas</dc:creator>
			<dc:creator>David Aaron Rodriguez-Alejandro</dc:creator>
			<dc:creator>José de Jesús Ramírez-Minguela</dc:creator>
			<dc:creator>Martin Valtierra-Rodriguez</dc:creator>
			<dc:creator>Francisco Elizalde-Blancas</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050271</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>271</prism:startingPage>
		<prism:doi>10.3390/technologies14050271</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/271</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/270">

	<title>Technologies, Vol. 14, Pages 270: W-HiTS-Attention: A Unified Wavelet-Hierarchical Residual-Attention Framework for Accurate and Efficient Short-Term Wind Power Forecasting</title>
	<link>https://www.mdpi.com/2227-7080/14/5/270</link>
	<description>Short-term wind power forecasting is considered a critical challenge in smart grid management due to the nonlinear, unstable, and multi-scale noise characteristics of wind signals. Although recent advances in hybrid deep learning have improved the accuracy of short-term wind power forecasting, many state-of-the-art models usually consider signal denoising, residual decomposition, and attention mechanisms as independent modules without providing a unified solution. This paper proposes an end-to-end solution, W-HiTS-Attention (Wavelet Transform, N-stacked Hierarchical Interpolation for Time Series, Attention), which coherently integrates wavelet denoising, hierarchical residual learning from N-HiTS (Neural Hierarchical Interpolation), and an in-block self-attention mechanism. The proposed solution outperforms 21 benchmarks in accuracy, including state-of-the-art baselines such as N-BEATS, N-HiTS, TCN, Informer, Autoformer, LSTM, BiLSTM, GRU, and Prophet, achieving an RMSE of 55.56 W and an R2 of 0.9918. Furthermore, the results show that the proposed solution is efficient in terms of parameter count (0.033M), latency (0.0036 ms/sample), and training time, making it promising for low-latency inference in resource-constrained environments. The results show that the coherent integration of frequency preprocessing, hierarchical residual forecasting, and attention-based temporal refinement provides a robust, explainable, and deployable solution for short-term wind power forecasting.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 270: W-HiTS-Attention: A Unified Wavelet-Hierarchical Residual-Attention Framework for Accurate and Efficient Short-Term Wind Power Forecasting</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/270">doi: 10.3390/technologies14050270</a></p>
	<p>Authors:
		Kaoutar Ait Chaoui
		Hassan El Fadil
		Oumaima Choukai
		</p>
	<p>Short-term wind power forecasting is considered a critical challenge in smart grid management due to the nonlinear, unstable, and multi-scale noise characteristics of wind signals. Although recent advances in hybrid deep learning have improved the accuracy of short-term wind power forecasting, many state-of-the-art models usually consider signal denoising, residual decomposition, and attention mechanisms as independent modules without providing a unified solution. This paper proposes an end-to-end solution, W-HiTS-Attention (Wavelet Transform, N-stacked Hierarchical Interpolation for Time Series, Attention), which coherently integrates wavelet denoising, hierarchical residual learning from N-HiTS (Neural Hierarchical Interpolation), and an in-block self-attention mechanism. The proposed solution outperforms 21 benchmarks in accuracy, including state-of-the-art baselines such as N-BEATS, N-HiTS, TCN, Informer, Autoformer, LSTM, BiLSTM, GRU, and Prophet, achieving an RMSE of 55.56 W and an R2 of 0.9918. Furthermore, the results show that the proposed solution is efficient in terms of parameter count (0.033M), latency (0.0036 ms/sample), and training time, making it promising for low-latency inference in resource-constrained environments. The results show that the coherent integration of frequency preprocessing, hierarchical residual forecasting, and attention-based temporal refinement provides a robust, explainable, and deployable solution for short-term wind power forecasting.</p>
	]]></content:encoded>

	<dc:title>W-HiTS-Attention: A Unified Wavelet-Hierarchical Residual-Attention Framework for Accurate and Efficient Short-Term Wind Power Forecasting</dc:title>
			<dc:creator>Kaoutar Ait Chaoui</dc:creator>
			<dc:creator>Hassan El Fadil</dc:creator>
			<dc:creator>Oumaima Choukai</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050270</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>270</prism:startingPage>
		<prism:doi>10.3390/technologies14050270</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/270</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/268">

	<title>Technologies, Vol. 14, Pages 268: Mapping the Industry 5.0 Landscape: Enabling Technologies, Human-Centered Systems, Sectoral Applications, and SDG Alignment&amp;mdash;A PRISMA-ScR Review</title>
	<link>https://www.mdpi.com/2227-7080/14/5/268</link>
	<description>Industry 5.0 is no longer understood merely as an extension of automation; it reflects a broader shift toward integrating technological advancement with human well-being, sustainability, and resilience. However, the literature reveals a fragmented landscape in which technological, industrial, and ecological dimensions are often treated separately, hindering a cohesive understanding of the paradigm. To address this gap, this study conducts a PRISMA-ScR-based review of 52 peer-reviewed studies (January 2021&amp;amp;ndash;March 2026), structured around ten research questions that examine technologies, sectors, methods, human-centered design, sustainability alignment, and implementation barriers. The review demonstrates high reliability (Cohen&amp;amp;rsquo;s &amp;amp;kappa; = 0.981). Findings highlight artificial intelligence (86%), collaborative robotics (80%), IoT (71%), and digital twins (63%) as core technologies, typically integrated within human-in-the-loop systems. Manufacturing and healthcare lead adoption, reporting reduced physical workload and improved safety. Nonetheless, only 63% of studies explicitly align with sustainability frameworks, revealing a persistent gap. Thus, inclusive Industry 5.0 remains a promising yet still insufficiently consolidated concept.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 268: Mapping the Industry 5.0 Landscape: Enabling Technologies, Human-Centered Systems, Sectoral Applications, and SDG Alignment&amp;mdash;A PRISMA-ScR Review</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/268">doi: 10.3390/technologies14050268</a></p>
	<p>Authors:
		Patricia Acosta-Vargas
		Luis Suarez
		Tomas Cuadrado
		Luis Salvador-Ullauri
		</p>
	<p>Industry 5.0 is no longer understood merely as an extension of automation; it reflects a broader shift toward integrating technological advancement with human well-being, sustainability, and resilience. However, the literature reveals a fragmented landscape in which technological, industrial, and ecological dimensions are often treated separately, hindering a cohesive understanding of the paradigm. To address this gap, this study conducts a PRISMA-ScR-based review of 52 peer-reviewed studies (January 2021&amp;amp;ndash;March 2026), structured around ten research questions that examine technologies, sectors, methods, human-centered design, sustainability alignment, and implementation barriers. The review demonstrates high reliability (Cohen&amp;amp;rsquo;s &amp;amp;kappa; = 0.981). Findings highlight artificial intelligence (86%), collaborative robotics (80%), IoT (71%), and digital twins (63%) as core technologies, typically integrated within human-in-the-loop systems. Manufacturing and healthcare lead adoption, reporting reduced physical workload and improved safety. Nonetheless, only 63% of studies explicitly align with sustainability frameworks, revealing a persistent gap. Thus, inclusive Industry 5.0 remains a promising yet still insufficiently consolidated concept.</p>
	]]></content:encoded>

	<dc:title>Mapping the Industry 5.0 Landscape: Enabling Technologies, Human-Centered Systems, Sectoral Applications, and SDG Alignment&amp;amp;mdash;A PRISMA-ScR Review</dc:title>
			<dc:creator>Patricia Acosta-Vargas</dc:creator>
			<dc:creator>Luis Suarez</dc:creator>
			<dc:creator>Tomas Cuadrado</dc:creator>
			<dc:creator>Luis Salvador-Ullauri</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050268</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>268</prism:startingPage>
		<prism:doi>10.3390/technologies14050268</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/268</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/269">

	<title>Technologies, Vol. 14, Pages 269: AI-Driven Digital Twins in Mining Operations: A Comprehensive Review</title>
	<link>https://www.mdpi.com/2227-7080/14/5/269</link>
	<description>The mining industry is going through a big digital change because of the use of new technologies that are meant to make work safer, more productive, and more environmentally friendly. AI-driven digital twins (AI-DTs) are one of these new ideas. They combine real-time data collection with smart analytics to make it possible for decisions to be made in a predictive, adaptive, and autonomous way. This paper provides a thorough systematic literature review (SLR) of AI-DT applications in mining operations, encompassing studies published from 2015 to 2025. According to the PRISMA method, 68 primary studies were chosen and looked at from many angles, such as publication trends, demographic analysis, research methods, data sources, mining domains, and the AI techniques that were used. The findings reveal an increasing scholarly interest in AI-DTs, characterized by a significant prevalence of machine learning and deep learning methodologies, alongside a preference for real-world sensory data to augment model accuracy. Most applications deal with physical assets, processing plants, and operational systems. Subsurface environments, on the other hand, are still not well understood. The review also points out some major problems with data integration, scalability, interoperability, and the fact that there has not been much large-scale industrial validation. Based on these findings, the paper points out important areas of research that need more work and suggests ways to move forward with the development and use of AI-DTs in mining. In conclusion, this study gives researchers and practitioners a clear plan for how to use AI-DTs to make mining operations more efficient, resilient, and long-lasting.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 269: AI-Driven Digital Twins in Mining Operations: A Comprehensive Review</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/269">doi: 10.3390/technologies14050269</a></p>
	<p>Authors:
		Shouki A. Ebad
		Aws I. Abueid
		Marwa Amara
		Rabie Ahmed
		</p>
	<p>The mining industry is going through a big digital change because of the use of new technologies that are meant to make work safer, more productive, and more environmentally friendly. AI-driven digital twins (AI-DTs) are one of these new ideas. They combine real-time data collection with smart analytics to make it possible for decisions to be made in a predictive, adaptive, and autonomous way. This paper provides a thorough systematic literature review (SLR) of AI-DT applications in mining operations, encompassing studies published from 2015 to 2025. According to the PRISMA method, 68 primary studies were chosen and looked at from many angles, such as publication trends, demographic analysis, research methods, data sources, mining domains, and the AI techniques that were used. The findings reveal an increasing scholarly interest in AI-DTs, characterized by a significant prevalence of machine learning and deep learning methodologies, alongside a preference for real-world sensory data to augment model accuracy. Most applications deal with physical assets, processing plants, and operational systems. Subsurface environments, on the other hand, are still not well understood. The review also points out some major problems with data integration, scalability, interoperability, and the fact that there has not been much large-scale industrial validation. Based on these findings, the paper points out important areas of research that need more work and suggests ways to move forward with the development and use of AI-DTs in mining. In conclusion, this study gives researchers and practitioners a clear plan for how to use AI-DTs to make mining operations more efficient, resilient, and long-lasting.</p>
	]]></content:encoded>

	<dc:title>AI-Driven Digital Twins in Mining Operations: A Comprehensive Review</dc:title>
			<dc:creator>Shouki A. Ebad</dc:creator>
			<dc:creator>Aws I. Abueid</dc:creator>
			<dc:creator>Marwa Amara</dc:creator>
			<dc:creator>Rabie Ahmed</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050269</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>269</prism:startingPage>
		<prism:doi>10.3390/technologies14050269</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/269</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/267">

	<title>Technologies, Vol. 14, Pages 267: Green Computing for Critical Infrastructure: A Sustainability-First AI Framework for Energy-Efficient Anomaly Detection in Industrial Control Systems</title>
	<link>https://www.mdpi.com/2227-7080/14/5/267</link>
	<description>Industrial Control Systems (ICSs) face dual imperatives: protecting critical infrastructure from escalating cybersecurity threats while reducing the environmental impact of AI-powered defense mechanisms. Current deep learning anomaly detection approaches achieve security performance but consumes substantial computational resources, creating an environmental paradox in which AI solutions designed to protect infrastructure contribute to carbon emissions at scale. This competition between cybersecurity effectiveness and sustainability objectives intensifies as regulatory frameworks increasingly mandate both security resilience and environmental accountability. This research presents Green-USAD, a sustainability-first AI framework that inverts traditional design paradigms by integrating energy efficiency as a primary architectural constraint from inception rather than applying compression retrospectively. The proposed approach advances green computing for critical infrastructure through four key contributions: (1) a compressed architecture with validation-guided convergence protocols achieving competitive detection performance with minimal computational overhead; (2) a multi-objective optimization framework using the Analytic Hierarchy Process to systematically balance security and sustainability requirements; (3) a hardware-validated energy measurement methodology addressing reproducibility challenges in green AI literature; and (4) a comprehensive evaluation demonstrating cross-datasets and edge-deployment viability. Validation on ICS benchmarks demonstrates that sustainability-first design achieves substantial energy reduction while maintaining operational detection accuracy, with measured training consumption below 1% of conventional approaches and proportional carbon emission reductions. Comparative analysis against post hoc compression baselines establishes fundamental advantages of design-from-inception over train-then-compress paradigms. Edge device deployment on resource-constrained hardware confirms real-world applicability for distributed industrial environments. Results establish that robust cybersecurity and environmental sustainability represent unified rather than competing objectives when intelligent systems are designed with sustainability as a foundational principle.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 267: Green Computing for Critical Infrastructure: A Sustainability-First AI Framework for Energy-Efficient Anomaly Detection in Industrial Control Systems</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/267">doi: 10.3390/technologies14050267</a></p>
	<p>Authors:
		Muhammad Muzamil Aslam
		Ali Tufail
		Yepeng Ding
		Liyanage Chandratilak De Silva
		Rosyzie Anna Awg Haji Mohd Apong
		Megat F. Zuhairi
		</p>
	<p>Industrial Control Systems (ICSs) face dual imperatives: protecting critical infrastructure from escalating cybersecurity threats while reducing the environmental impact of AI-powered defense mechanisms. Current deep learning anomaly detection approaches achieve security performance but consumes substantial computational resources, creating an environmental paradox in which AI solutions designed to protect infrastructure contribute to carbon emissions at scale. This competition between cybersecurity effectiveness and sustainability objectives intensifies as regulatory frameworks increasingly mandate both security resilience and environmental accountability. This research presents Green-USAD, a sustainability-first AI framework that inverts traditional design paradigms by integrating energy efficiency as a primary architectural constraint from inception rather than applying compression retrospectively. The proposed approach advances green computing for critical infrastructure through four key contributions: (1) a compressed architecture with validation-guided convergence protocols achieving competitive detection performance with minimal computational overhead; (2) a multi-objective optimization framework using the Analytic Hierarchy Process to systematically balance security and sustainability requirements; (3) a hardware-validated energy measurement methodology addressing reproducibility challenges in green AI literature; and (4) a comprehensive evaluation demonstrating cross-datasets and edge-deployment viability. Validation on ICS benchmarks demonstrates that sustainability-first design achieves substantial energy reduction while maintaining operational detection accuracy, with measured training consumption below 1% of conventional approaches and proportional carbon emission reductions. Comparative analysis against post hoc compression baselines establishes fundamental advantages of design-from-inception over train-then-compress paradigms. Edge device deployment on resource-constrained hardware confirms real-world applicability for distributed industrial environments. Results establish that robust cybersecurity and environmental sustainability represent unified rather than competing objectives when intelligent systems are designed with sustainability as a foundational principle.</p>
	]]></content:encoded>

	<dc:title>Green Computing for Critical Infrastructure: A Sustainability-First AI Framework for Energy-Efficient Anomaly Detection in Industrial Control Systems</dc:title>
			<dc:creator>Muhammad Muzamil Aslam</dc:creator>
			<dc:creator>Ali Tufail</dc:creator>
			<dc:creator>Yepeng Ding</dc:creator>
			<dc:creator>Liyanage Chandratilak De Silva</dc:creator>
			<dc:creator>Rosyzie Anna Awg Haji Mohd Apong</dc:creator>
			<dc:creator>Megat F. Zuhairi</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050267</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>267</prism:startingPage>
		<prism:doi>10.3390/technologies14050267</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/267</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/266">

	<title>Technologies, Vol. 14, Pages 266: Kuramoto Object-Centric Reinforcement Learning for Robotic Manipulation Tasks</title>
	<link>https://www.mdpi.com/2227-7080/14/5/266</link>
	<description>Model-based reinforcement learning (MBRL) is a promising approach for achieving high sample efficiency in learning control policies. The existing world models in MBRL typically represent the environment&amp;amp;rsquo;s state as a single global latent vector. However, such representations limit the model&amp;amp;rsquo;s ability to capture object interactions and reason about individual objects&amp;amp;mdash;capabilities that are critical for visual object-oriented tasks&amp;amp;mdash;and may lead to lower sample efficiency. To address this limitation, we propose Kuramoto Object-Centric Reinforcement Learning (KORL), a model-based agent that learns an object-centric world model. Our approach introduces a novel Kuramoto Slot Attention for Video (KSAVi) model that integrates Kuramoto oscillatory neurons with the Slot Attention module to robustly extract object representations. We design a world model that leverages these structured object-centric latents and predicts dynamics using graph neural networks, thereby incorporating an inductive bias for modeling object interactions. We evaluate KORL on a suite of visually diverse object-oriented robotic manipulation tasks and demonstrate that our method outperforms object-centric model-free and model-based approaches.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 266: Kuramoto Object-Centric Reinforcement Learning for Robotic Manipulation Tasks</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/266">doi: 10.3390/technologies14050266</a></p>
	<p>Authors:
		Leonid Ugadiarov
		Aleksandr Panov
		</p>
	<p>Model-based reinforcement learning (MBRL) is a promising approach for achieving high sample efficiency in learning control policies. The existing world models in MBRL typically represent the environment&amp;amp;rsquo;s state as a single global latent vector. However, such representations limit the model&amp;amp;rsquo;s ability to capture object interactions and reason about individual objects&amp;amp;mdash;capabilities that are critical for visual object-oriented tasks&amp;amp;mdash;and may lead to lower sample efficiency. To address this limitation, we propose Kuramoto Object-Centric Reinforcement Learning (KORL), a model-based agent that learns an object-centric world model. Our approach introduces a novel Kuramoto Slot Attention for Video (KSAVi) model that integrates Kuramoto oscillatory neurons with the Slot Attention module to robustly extract object representations. We design a world model that leverages these structured object-centric latents and predicts dynamics using graph neural networks, thereby incorporating an inductive bias for modeling object interactions. We evaluate KORL on a suite of visually diverse object-oriented robotic manipulation tasks and demonstrate that our method outperforms object-centric model-free and model-based approaches.</p>
	]]></content:encoded>

	<dc:title>Kuramoto Object-Centric Reinforcement Learning for Robotic Manipulation Tasks</dc:title>
			<dc:creator>Leonid Ugadiarov</dc:creator>
			<dc:creator>Aleksandr Panov</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050266</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>266</prism:startingPage>
		<prism:doi>10.3390/technologies14050266</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/266</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/265">

	<title>Technologies, Vol. 14, Pages 265: Comparative Evaluation of Deep Learning Object Detectors for Embedded Weed Detection on Resource-Constrained Platforms</title>
	<link>https://www.mdpi.com/2227-7080/14/5/265</link>
	<description>Computer vision&amp;amp;ndash;based weed detection plays a critical role in agricultural robotics, enabling accurate, selective weeding. These systems operate on resource-constrained embedded platforms, which introduces a significant trade-off between accuracy and efficiency. This study presents a comparative evaluation of six detection models (YOLOv11n, YOLOv11s, SSD-Lite, NanoDet, Faster R-CNN, RT-DETR) for agro-robotic applications, measuring precision, recall, mAP@0.5, and runtime on low-power hard-ware. NanoDet achieved the highest detection accuracy (precision 98.6%, recall 94.2%, mAP@0.5 97.7%). YOLOv11s demonstrated similar performance (mAP@0.5: 96.1%) but required more computation. YOLOv11n provides the most favourable balance between accuracy and throughput (mAP@0.5: 94.6%, 207 FPS on a workstation). On Raspberry Pi 5, light models achieved 3&amp;amp;ndash;5 FPS. RT-DETR and Faster R-CNN exhibited high latency (3112&amp;amp;ndash;6500 ms/frame), which prevents real-time operation. NanoDet excelled in detection, while YOLOv11n provides the best balance between accuracy and efficiency for limited devices.</description>
	<pubDate>2026-04-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 265: Comparative Evaluation of Deep Learning Object Detectors for Embedded Weed Detection on Resource-Constrained Platforms</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/265">doi: 10.3390/technologies14050265</a></p>
	<p>Authors:
		Nurtay Albanbay
		Yerik Nugman
		Mukhagali Sagyntay
		Azamat Mustafa
		Ramona Blanes
		Algazy Zhauyt
		Rustem Kaiyrov
		Nurgali Nurgozhayev
		</p>
	<p>Computer vision&amp;amp;ndash;based weed detection plays a critical role in agricultural robotics, enabling accurate, selective weeding. These systems operate on resource-constrained embedded platforms, which introduces a significant trade-off between accuracy and efficiency. This study presents a comparative evaluation of six detection models (YOLOv11n, YOLOv11s, SSD-Lite, NanoDet, Faster R-CNN, RT-DETR) for agro-robotic applications, measuring precision, recall, mAP@0.5, and runtime on low-power hard-ware. NanoDet achieved the highest detection accuracy (precision 98.6%, recall 94.2%, mAP@0.5 97.7%). YOLOv11s demonstrated similar performance (mAP@0.5: 96.1%) but required more computation. YOLOv11n provides the most favourable balance between accuracy and throughput (mAP@0.5: 94.6%, 207 FPS on a workstation). On Raspberry Pi 5, light models achieved 3&amp;amp;ndash;5 FPS. RT-DETR and Faster R-CNN exhibited high latency (3112&amp;amp;ndash;6500 ms/frame), which prevents real-time operation. NanoDet excelled in detection, while YOLOv11n provides the best balance between accuracy and efficiency for limited devices.</p>
	]]></content:encoded>

	<dc:title>Comparative Evaluation of Deep Learning Object Detectors for Embedded Weed Detection on Resource-Constrained Platforms</dc:title>
			<dc:creator>Nurtay Albanbay</dc:creator>
			<dc:creator>Yerik Nugman</dc:creator>
			<dc:creator>Mukhagali Sagyntay</dc:creator>
			<dc:creator>Azamat Mustafa</dc:creator>
			<dc:creator>Ramona Blanes</dc:creator>
			<dc:creator>Algazy Zhauyt</dc:creator>
			<dc:creator>Rustem Kaiyrov</dc:creator>
			<dc:creator>Nurgali Nurgozhayev</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050265</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-27</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-27</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>265</prism:startingPage>
		<prism:doi>10.3390/technologies14050265</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/265</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/263">

	<title>Technologies, Vol. 14, Pages 263: Spiral-Loop Sequential-Phase-Fed Radial-Sector Patch CP Antenna with Metasurface Superstrate and Parasitic Elements for S-Band CubeSats</title>
	<link>https://www.mdpi.com/2227-7080/14/5/263</link>
	<description>This work presents a spiral-loop sequential-phase (SLSP)-fed radial-sector patch circularly polarized (CP) antenna for S-band CubeSat platforms. The architecture stacks three RO4003C substrates in an aluminum enclosure: a lower layer with tapered-blade parasitic elements, a middle layer with the SLSP feed and four radial-sector patches, and an upper tilted hexagonal metasurface superstrate separated by an air-gap. Characteristic mode analysis is used to realize an orthogonal modal pair. A prototype integrated on a CubeSat structure was measured in an anechoic chamber and validated under vibration and thermal-vacuum testing per ECSS/NASA practices. The antenna achieves a measured return loss bandwidth of 2&amp;amp;ndash;2.34 GHz, an axial ratio bandwidth of 2.04&amp;amp;ndash;2.25 GHz, and a maximum gain of 7.24 dBic at 2.18 GHz. The metasurface and parasitic elements enhance bandwidth while maintaining boresight CP. The novelty lies in the integration of SLSP-fed radial-sector patches with a tilted hexagonal metasurface superstrate and tapered-blade parasitic elements within a compact stacked configuration, making the proposed antenna well suited for CubeSat S-band applications.</description>
	<pubDate>2026-04-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 263: Spiral-Loop Sequential-Phase-Fed Radial-Sector Patch CP Antenna with Metasurface Superstrate and Parasitic Elements for S-Band CubeSats</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/263">doi: 10.3390/technologies14050263</a></p>
	<p>Authors:
		Apiwat Jirawattanaphol
		Nathapat Supreeyatitikul
		Kentaro Kitamura
		Mengu Cho
		</p>
	<p>This work presents a spiral-loop sequential-phase (SLSP)-fed radial-sector patch circularly polarized (CP) antenna for S-band CubeSat platforms. The architecture stacks three RO4003C substrates in an aluminum enclosure: a lower layer with tapered-blade parasitic elements, a middle layer with the SLSP feed and four radial-sector patches, and an upper tilted hexagonal metasurface superstrate separated by an air-gap. Characteristic mode analysis is used to realize an orthogonal modal pair. A prototype integrated on a CubeSat structure was measured in an anechoic chamber and validated under vibration and thermal-vacuum testing per ECSS/NASA practices. The antenna achieves a measured return loss bandwidth of 2&amp;amp;ndash;2.34 GHz, an axial ratio bandwidth of 2.04&amp;amp;ndash;2.25 GHz, and a maximum gain of 7.24 dBic at 2.18 GHz. The metasurface and parasitic elements enhance bandwidth while maintaining boresight CP. The novelty lies in the integration of SLSP-fed radial-sector patches with a tilted hexagonal metasurface superstrate and tapered-blade parasitic elements within a compact stacked configuration, making the proposed antenna well suited for CubeSat S-band applications.</p>
	]]></content:encoded>

	<dc:title>Spiral-Loop Sequential-Phase-Fed Radial-Sector Patch CP Antenna with Metasurface Superstrate and Parasitic Elements for S-Band CubeSats</dc:title>
			<dc:creator>Apiwat Jirawattanaphol</dc:creator>
			<dc:creator>Nathapat Supreeyatitikul</dc:creator>
			<dc:creator>Kentaro Kitamura</dc:creator>
			<dc:creator>Mengu Cho</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050263</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-27</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-27</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>263</prism:startingPage>
		<prism:doi>10.3390/technologies14050263</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/263</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/264">

	<title>Technologies, Vol. 14, Pages 264: Behavioral Lifestyle Factors Versus Medical History in Determining the Predictive Power of Machine Learning-Based Obesity Classification</title>
	<link>https://www.mdpi.com/2227-7080/14/5/264</link>
	<description>Obesity represents a multifactorial health condition influenced by complex interactions among behavioral, environmental, and physiological factors, yet the relative predictive importance of lifestyle behaviors versus medical history indicators remains incompletely characterized. This investigation employed a three-phase machine learning approach to systematically compare the predictive power of behavioral lifestyle factors, medical history variables, and their integration for obesity classification. Phase A utilized a dedicated obesity dataset containing demographic, dietary, and lifestyle predictors to perform seven-category obesity classification, achieving 81.65% test accuracy with an optimized Random Forest ensemble and macro-averaged F1-score of 0.82. Phase B addressed binary obesity classification using health indicators from diabetes screening data, where a Gradient Boosting model with optimized decision threshold achieved 67.84% accuracy and AUC of 0.735, demonstrating substantially lower performance than behavioral predictors. Phase C integrated both feature sets into a unified model, where Gradient Boosting achieved 68.31% accuracy and AUC of 0.747, representing marginal improvement over medical history alone. Cross-validated performance comparisons revealed that behavioral lifestyle factors provided superior discriminative power compared to medical history indicators, with dedicated lifestyle predictors achieving 13.81 percentage points higher accuracy than medical indicators. Feature importance analysis confirmed that transportation mode, physical activity patterns, and dietary behaviors ranked among the most influential predictors in the combined model. These findings demonstrate that behavioral lifestyle factors constitute stronger obesity predictors than medical history variables, with implications for clinical screening strategies and public health intervention targeting that prioritize lifestyle assessment and modification programs.</description>
	<pubDate>2026-04-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 264: Behavioral Lifestyle Factors Versus Medical History in Determining the Predictive Power of Machine Learning-Based Obesity Classification</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/264">doi: 10.3390/technologies14050264</a></p>
	<p>Authors:
		Ann Murickan
		Milan Toma
		</p>
	<p>Obesity represents a multifactorial health condition influenced by complex interactions among behavioral, environmental, and physiological factors, yet the relative predictive importance of lifestyle behaviors versus medical history indicators remains incompletely characterized. This investigation employed a three-phase machine learning approach to systematically compare the predictive power of behavioral lifestyle factors, medical history variables, and their integration for obesity classification. Phase A utilized a dedicated obesity dataset containing demographic, dietary, and lifestyle predictors to perform seven-category obesity classification, achieving 81.65% test accuracy with an optimized Random Forest ensemble and macro-averaged F1-score of 0.82. Phase B addressed binary obesity classification using health indicators from diabetes screening data, where a Gradient Boosting model with optimized decision threshold achieved 67.84% accuracy and AUC of 0.735, demonstrating substantially lower performance than behavioral predictors. Phase C integrated both feature sets into a unified model, where Gradient Boosting achieved 68.31% accuracy and AUC of 0.747, representing marginal improvement over medical history alone. Cross-validated performance comparisons revealed that behavioral lifestyle factors provided superior discriminative power compared to medical history indicators, with dedicated lifestyle predictors achieving 13.81 percentage points higher accuracy than medical indicators. Feature importance analysis confirmed that transportation mode, physical activity patterns, and dietary behaviors ranked among the most influential predictors in the combined model. These findings demonstrate that behavioral lifestyle factors constitute stronger obesity predictors than medical history variables, with implications for clinical screening strategies and public health intervention targeting that prioritize lifestyle assessment and modification programs.</p>
	]]></content:encoded>

	<dc:title>Behavioral Lifestyle Factors Versus Medical History in Determining the Predictive Power of Machine Learning-Based Obesity Classification</dc:title>
			<dc:creator>Ann Murickan</dc:creator>
			<dc:creator>Milan Toma</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050264</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-27</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-27</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>264</prism:startingPage>
		<prism:doi>10.3390/technologies14050264</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/264</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/262">

	<title>Technologies, Vol. 14, Pages 262: Software-Defined Radio Experimental Validation of an OTFS-Based ISAC for Velocity Estimation in an ARoF Setup</title>
	<link>https://www.mdpi.com/2227-7080/14/5/262</link>
	<description>OTFS, proposed for next-generation wireless communication systems such as 6G mobile networks, incorporates ISAC into DD-domain multiplexing, enabling simple detection of distance, velocity, and movement direction. This paper presents a SDR implementation of OTFS in an ARoF setup with wireless RF transmission. The main goal of this study is to validate and evaluate the implemented OTFS with static objects and to explore the quality of velocity and direction estimation in sensing scenarios involving moving objects. For the BER measurements, experiments were performed using a static object while varying the SDR transmitter power and introducing additional CFO. Experimental validation shows a minimum BER &amp;amp;le; 5 &amp;amp;times; 10&amp;amp;minus;7 with 0 errors per 2 &amp;amp;times; 106 bits. Data transmission at fractional Doppler yielded a BER &amp;amp;asymp; 0.09, which is attributed to the use of a LMMSE channel estimator, that is not optimal for channels with fractional Doppler. Estimation of the velocity of a mobile object with an absolute velocity of |v|=0.15 m/s yielded a RMSE = 0.0839 m/s.</description>
	<pubDate>2026-04-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 262: Software-Defined Radio Experimental Validation of an OTFS-Based ISAC for Velocity Estimation in an ARoF Setup</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/262">doi: 10.3390/technologies14050262</a></p>
	<p>Authors:
		Nikolajs Tihomorskis
		Sandis Migla
		Omid Abbassi Aghda
		Kristaps Rubuls
		Niks Krumins
		Olesja Novikova
		Janis Braunfelds
		Sandis Spolitis
		Oskars Ozolins
		Arturs Aboltins
		</p>
	<p>OTFS, proposed for next-generation wireless communication systems such as 6G mobile networks, incorporates ISAC into DD-domain multiplexing, enabling simple detection of distance, velocity, and movement direction. This paper presents a SDR implementation of OTFS in an ARoF setup with wireless RF transmission. The main goal of this study is to validate and evaluate the implemented OTFS with static objects and to explore the quality of velocity and direction estimation in sensing scenarios involving moving objects. For the BER measurements, experiments were performed using a static object while varying the SDR transmitter power and introducing additional CFO. Experimental validation shows a minimum BER &amp;amp;le; 5 &amp;amp;times; 10&amp;amp;minus;7 with 0 errors per 2 &amp;amp;times; 106 bits. Data transmission at fractional Doppler yielded a BER &amp;amp;asymp; 0.09, which is attributed to the use of a LMMSE channel estimator, that is not optimal for channels with fractional Doppler. Estimation of the velocity of a mobile object with an absolute velocity of |v|=0.15 m/s yielded a RMSE = 0.0839 m/s.</p>
	]]></content:encoded>

	<dc:title>Software-Defined Radio Experimental Validation of an OTFS-Based ISAC for Velocity Estimation in an ARoF Setup</dc:title>
			<dc:creator>Nikolajs Tihomorskis</dc:creator>
			<dc:creator>Sandis Migla</dc:creator>
			<dc:creator>Omid Abbassi Aghda</dc:creator>
			<dc:creator>Kristaps Rubuls</dc:creator>
			<dc:creator>Niks Krumins</dc:creator>
			<dc:creator>Olesja Novikova</dc:creator>
			<dc:creator>Janis Braunfelds</dc:creator>
			<dc:creator>Sandis Spolitis</dc:creator>
			<dc:creator>Oskars Ozolins</dc:creator>
			<dc:creator>Arturs Aboltins</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050262</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-27</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-27</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>262</prism:startingPage>
		<prism:doi>10.3390/technologies14050262</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/262</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/261">

	<title>Technologies, Vol. 14, Pages 261: Statistical Evaluation of Robot Trajectories in Automated Dimensional Measurements</title>
	<link>https://www.mdpi.com/2227-7080/14/5/261</link>
	<description>The influence of a robot&amp;amp;rsquo;s manipulation can be observed in a robotic measurement system. Different robot end-effector trajectories yield different robot end-effector accuracy and repeatability errors. Trajectory parameters, robot motion type, velocity, and length of motion were identified as influential sources. A robot arm was used to insert measuring objects into the measurement device for dimensional measurements. In the first part, the measurement datasets for linear and joint robot motions were compared for three different velocities and four motion lengths. The influence of the number of active joints in the robot&amp;amp;rsquo;s motion was compared for two velocities and four magnitudes of joint rotation. Dimensional measurement variability was analysed using measurement system analysis (MSA), and the statistical influence of trajectory parameters was further addressed by analysis of variance (ANOVA). All identified trajectory parameters have a statistically significant impact on measurement variability, reflecting the robot end-effector&amp;amp;rsquo;s accuracy and repeatability errors. Linear motion provides higher measurement variability up to 20%, a velocity increase that is typically up to 25&amp;amp;ndash;35% and motion length that is typically up to 15&amp;amp;ndash;35%.</description>
	<pubDate>2026-04-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 261: Statistical Evaluation of Robot Trajectories in Automated Dimensional Measurements</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/261">doi: 10.3390/technologies14050261</a></p>
	<p>Authors:
		Aleš Zore
		Marko Munih
		</p>
	<p>The influence of a robot&amp;amp;rsquo;s manipulation can be observed in a robotic measurement system. Different robot end-effector trajectories yield different robot end-effector accuracy and repeatability errors. Trajectory parameters, robot motion type, velocity, and length of motion were identified as influential sources. A robot arm was used to insert measuring objects into the measurement device for dimensional measurements. In the first part, the measurement datasets for linear and joint robot motions were compared for three different velocities and four motion lengths. The influence of the number of active joints in the robot&amp;amp;rsquo;s motion was compared for two velocities and four magnitudes of joint rotation. Dimensional measurement variability was analysed using measurement system analysis (MSA), and the statistical influence of trajectory parameters was further addressed by analysis of variance (ANOVA). All identified trajectory parameters have a statistically significant impact on measurement variability, reflecting the robot end-effector&amp;amp;rsquo;s accuracy and repeatability errors. Linear motion provides higher measurement variability up to 20%, a velocity increase that is typically up to 25&amp;amp;ndash;35% and motion length that is typically up to 15&amp;amp;ndash;35%.</p>
	]]></content:encoded>

	<dc:title>Statistical Evaluation of Robot Trajectories in Automated Dimensional Measurements</dc:title>
			<dc:creator>Aleš Zore</dc:creator>
			<dc:creator>Marko Munih</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050261</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-26</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-26</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>261</prism:startingPage>
		<prism:doi>10.3390/technologies14050261</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/261</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/260">

	<title>Technologies, Vol. 14, Pages 260: AI-Driven Approaches to System Requirements and Test Case Generation: A New Paradigm in Software Engineering</title>
	<link>https://www.mdpi.com/2227-7080/14/5/260</link>
	<description>Artificial intelligence (AI) is a new paradigm in software engineering that automates key phases of the development cycle. The methods of creating test cases and designing requirements are still mostly manual and prone to error. Unclear requirements can result in expensive rework and undiscovered defects in the development process. Scalability and dependability are crucial concerns in complex systems. These shortcomings highlight the need for improved methods to enhance accuracy and consistency throughout these critical phases. To generate well-organized system requirements, this article outlines a clear strategy that leverages Extended Finite State Machine models as formal inputs for large language models (LLMs). Five system models are used to assess the suggested framework. The comparison analysis evaluates the accuracy, completeness, test coverage, and runtime efficiency of the artifacts. Along with a comparison with a human-made reference standard, the study evaluates the performance of LLMs such as ChatGPT-5, Claude Sonnet 4.5, and DeepSeek V3.2. The findings demonstrate that AI models can achieve human-comparable accuracy by exceeding 90% with EFSM-based prompting. Claude Sonnet generated the most reliable findings, ChatGPT demonstrated exceptional flexibility, and DeepSeek demonstrated exceptional runtime economy. These findings show that human&amp;amp;ndash;AI workflows provide a new paradigm in scalable, traceable, and reproducible system engineering.</description>
	<pubDate>2026-04-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 260: AI-Driven Approaches to System Requirements and Test Case Generation: A New Paradigm in Software Engineering</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/260">doi: 10.3390/technologies14050260</a></p>
	<p>Authors:
		Ziad Salem
		Luay Tahat
		Yasmeen Humaidan
		Noor Tahat
		</p>
	<p>Artificial intelligence (AI) is a new paradigm in software engineering that automates key phases of the development cycle. The methods of creating test cases and designing requirements are still mostly manual and prone to error. Unclear requirements can result in expensive rework and undiscovered defects in the development process. Scalability and dependability are crucial concerns in complex systems. These shortcomings highlight the need for improved methods to enhance accuracy and consistency throughout these critical phases. To generate well-organized system requirements, this article outlines a clear strategy that leverages Extended Finite State Machine models as formal inputs for large language models (LLMs). Five system models are used to assess the suggested framework. The comparison analysis evaluates the accuracy, completeness, test coverage, and runtime efficiency of the artifacts. Along with a comparison with a human-made reference standard, the study evaluates the performance of LLMs such as ChatGPT-5, Claude Sonnet 4.5, and DeepSeek V3.2. The findings demonstrate that AI models can achieve human-comparable accuracy by exceeding 90% with EFSM-based prompting. Claude Sonnet generated the most reliable findings, ChatGPT demonstrated exceptional flexibility, and DeepSeek demonstrated exceptional runtime economy. These findings show that human&amp;amp;ndash;AI workflows provide a new paradigm in scalable, traceable, and reproducible system engineering.</p>
	]]></content:encoded>

	<dc:title>AI-Driven Approaches to System Requirements and Test Case Generation: A New Paradigm in Software Engineering</dc:title>
			<dc:creator>Ziad Salem</dc:creator>
			<dc:creator>Luay Tahat</dc:creator>
			<dc:creator>Yasmeen Humaidan</dc:creator>
			<dc:creator>Noor Tahat</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050260</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-25</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-25</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>260</prism:startingPage>
		<prism:doi>10.3390/technologies14050260</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/260</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/257">

	<title>Technologies, Vol. 14, Pages 257: Charger/Discharger with a Limited Current Derivative and Regulated Bus Voltage: A Simultaneous Converter-Controller Design</title>
	<link>https://www.mdpi.com/2227-7080/14/5/257</link>
	<description>This paper proposes a co-design methodology for the power and control stages of a bidirectional battery charger/discharger based on a boost converter topology. The approach ensures safe operation by limiting the battery current derivative, preventing abrupt transients that could degrade battery lifespan. The control strategy combines a cascade structure with an inner sliding mode current controller (for robustness and fast response) and an outer adaptive PI voltage loop (to regulate the DC-link voltage under varying load conditions). Additionally, the design constrains the switching frequency to reduce power losses. Experimental validation on a prototype converter demonstrates the effectiveness of the co-design framework, showing precise current/voltage regulation, adherence to switching frequency limits, and compliance with battery charging/discharging requirements. The results highlight the methodology&amp;amp;rsquo;s potential to enhance efficiency and reliability in energy storage systems. The dynamic restrictions, overshoot lower than 5%, settling time shorter than 5 ms, and a battery current limitation less than 50 A/ms were always met with SMC and, in some cases, with the PI controller, but the results with SMC were always better: lower overshoot, shorter settling time, and greater restriction on the derivative of the battery current. In addition, the SMC system was 2.5&amp;amp;ndash;5.0% more efficient than the PI controller.</description>
	<pubDate>2026-04-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 257: Charger/Discharger with a Limited Current Derivative and Regulated Bus Voltage: A Simultaneous Converter-Controller Design</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/257">doi: 10.3390/technologies14050257</a></p>
	<p>Authors:
		Carlos Andrés Ramos-Paja
		Elkin Edilberto Henao-Bravo
		Sergio Ignacio Serna-Garcés
		</p>
	<p>This paper proposes a co-design methodology for the power and control stages of a bidirectional battery charger/discharger based on a boost converter topology. The approach ensures safe operation by limiting the battery current derivative, preventing abrupt transients that could degrade battery lifespan. The control strategy combines a cascade structure with an inner sliding mode current controller (for robustness and fast response) and an outer adaptive PI voltage loop (to regulate the DC-link voltage under varying load conditions). Additionally, the design constrains the switching frequency to reduce power losses. Experimental validation on a prototype converter demonstrates the effectiveness of the co-design framework, showing precise current/voltage regulation, adherence to switching frequency limits, and compliance with battery charging/discharging requirements. The results highlight the methodology&amp;amp;rsquo;s potential to enhance efficiency and reliability in energy storage systems. The dynamic restrictions, overshoot lower than 5%, settling time shorter than 5 ms, and a battery current limitation less than 50 A/ms were always met with SMC and, in some cases, with the PI controller, but the results with SMC were always better: lower overshoot, shorter settling time, and greater restriction on the derivative of the battery current. In addition, the SMC system was 2.5&amp;amp;ndash;5.0% more efficient than the PI controller.</p>
	]]></content:encoded>

	<dc:title>Charger/Discharger with a Limited Current Derivative and Regulated Bus Voltage: A Simultaneous Converter-Controller Design</dc:title>
			<dc:creator>Carlos Andrés Ramos-Paja</dc:creator>
			<dc:creator>Elkin Edilberto Henao-Bravo</dc:creator>
			<dc:creator>Sergio Ignacio Serna-Garcés</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050257</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-25</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-25</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>257</prism:startingPage>
		<prism:doi>10.3390/technologies14050257</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/257</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/259">

	<title>Technologies, Vol. 14, Pages 259: Antenna Performance and Effects of Concealment Within Building Structures: A Comprehensive Review</title>
	<link>https://www.mdpi.com/2227-7080/14/5/259</link>
	<description>The rapid expansion of wireless communication in urban environments requires antenna systems that balance high electromagnetic performance with stringent aesthetic and security constraints. This review examines recent advances in concealed antenna technologies integrated into building structures, with a focus on performance variation, material-induced attenuation, and emerging concealment strategies. Techniques such as transparent conductors on glass, structural embedding within walls, and camouflage-based designs are shown to significantly influence resonance behavior, radiation efficiency, and pattern characteristics compared to free-space operation. Despite these challenges, optimized solutions including transparent conductive oxide arrays, wideband embedded antenna geometries, and metasurface-enhanced window structures can partially recover performance while maintaining optical transparency above 70%. Material loading effects are found to induce resonant frequency shifts of approximately 10&amp;amp;ndash;44%, depending on dielectric properties and environmental conditions. Transparent antenna arrays achieve gains ranging from 0.34 to 13.2 dBi, while signal-transmissive wall systems demonstrate transmission improvements of up to 22 dB relative to untreated building materials. These technologies enable a wide range of applications, including 5G and beyond-5G cellular networks across sub-6 GHz and millimeter-wave bands, as well as Internet of Things systems and smart city infrastructure. However, key challenges remain, including the need for comprehensive characterization of building material electromagnetic properties, optimization of multilayer structural environments, and the development of standardized design and evaluation methodologies. This review provides a unified framework for understanding the tradeoffs associated with antenna concealment and identifies critical research directions for the development of building-integrated wireless systems in next-generation communication networks.</description>
	<pubDate>2026-04-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 259: Antenna Performance and Effects of Concealment Within Building Structures: A Comprehensive Review</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/259">doi: 10.3390/technologies14050259</a></p>
	<p>Authors:
		Mirza Farrukh Baig
		Ervina Efzan Mhd Noor
		</p>
	<p>The rapid expansion of wireless communication in urban environments requires antenna systems that balance high electromagnetic performance with stringent aesthetic and security constraints. This review examines recent advances in concealed antenna technologies integrated into building structures, with a focus on performance variation, material-induced attenuation, and emerging concealment strategies. Techniques such as transparent conductors on glass, structural embedding within walls, and camouflage-based designs are shown to significantly influence resonance behavior, radiation efficiency, and pattern characteristics compared to free-space operation. Despite these challenges, optimized solutions including transparent conductive oxide arrays, wideband embedded antenna geometries, and metasurface-enhanced window structures can partially recover performance while maintaining optical transparency above 70%. Material loading effects are found to induce resonant frequency shifts of approximately 10&amp;amp;ndash;44%, depending on dielectric properties and environmental conditions. Transparent antenna arrays achieve gains ranging from 0.34 to 13.2 dBi, while signal-transmissive wall systems demonstrate transmission improvements of up to 22 dB relative to untreated building materials. These technologies enable a wide range of applications, including 5G and beyond-5G cellular networks across sub-6 GHz and millimeter-wave bands, as well as Internet of Things systems and smart city infrastructure. However, key challenges remain, including the need for comprehensive characterization of building material electromagnetic properties, optimization of multilayer structural environments, and the development of standardized design and evaluation methodologies. This review provides a unified framework for understanding the tradeoffs associated with antenna concealment and identifies critical research directions for the development of building-integrated wireless systems in next-generation communication networks.</p>
	]]></content:encoded>

	<dc:title>Antenna Performance and Effects of Concealment Within Building Structures: A Comprehensive Review</dc:title>
			<dc:creator>Mirza Farrukh Baig</dc:creator>
			<dc:creator>Ervina Efzan Mhd Noor</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050259</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-25</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-25</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>259</prism:startingPage>
		<prism:doi>10.3390/technologies14050259</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/259</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/258">

	<title>Technologies, Vol. 14, Pages 258: ESG-Graph: Hierarchical Residual Graph Attention Network with Analyst-Defined ESG Taxonomy</title>
	<link>https://www.mdpi.com/2227-7080/14/5/258</link>
	<description>Environmental, Social, and Governance (ESG) text classification is important for applications in sustainable finance. However, it remains a challenging task due to domain terminology and regulatory constraints. While transformer-based models achieve strong predictive performance, they often lead to high energy costs and provide limited interpretability. To address these limitations, we introduce ESG-Graph, a lightweight and interpretable graph-based framework for modeling ESG disclosures. In our approach, each sentence is represented as a token-level dependency graph augmented with virtual nodes initialized from a European Sustainability Reporting Standards (ESRS)-based taxonomy, enabling the addition of new ESG concepts without retraining. A multi-layer Graph Attention Network is used instead of transformer encoders, allowing grammatical structure and domain semantics to be modeled jointly. Experiments on three ESG benchmark datasets show that ESG-Graph achieves performance comparable to efficient transformer baselines while consuming up to 60&amp;amp;times; less energy and using 10&amp;amp;times; fewer parameters. Additional attribution and ablation studies suggest the method&amp;amp;rsquo;s policy alignment, interpretability, and robustness.</description>
	<pubDate>2026-04-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 258: ESG-Graph: Hierarchical Residual Graph Attention Network with Analyst-Defined ESG Taxonomy</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/258">doi: 10.3390/technologies14050258</a></p>
	<p>Authors:
		Yasser Elouargui
		Abdellatif Sassioui
		Meriyem Chergui
		Rachid Benouini
		Mohamed Elkamili
		Elmehdi Benyoussef
		Mohammed Ouzzif
		</p>
	<p>Environmental, Social, and Governance (ESG) text classification is important for applications in sustainable finance. However, it remains a challenging task due to domain terminology and regulatory constraints. While transformer-based models achieve strong predictive performance, they often lead to high energy costs and provide limited interpretability. To address these limitations, we introduce ESG-Graph, a lightweight and interpretable graph-based framework for modeling ESG disclosures. In our approach, each sentence is represented as a token-level dependency graph augmented with virtual nodes initialized from a European Sustainability Reporting Standards (ESRS)-based taxonomy, enabling the addition of new ESG concepts without retraining. A multi-layer Graph Attention Network is used instead of transformer encoders, allowing grammatical structure and domain semantics to be modeled jointly. Experiments on three ESG benchmark datasets show that ESG-Graph achieves performance comparable to efficient transformer baselines while consuming up to 60&amp;amp;times; less energy and using 10&amp;amp;times; fewer parameters. Additional attribution and ablation studies suggest the method&amp;amp;rsquo;s policy alignment, interpretability, and robustness.</p>
	]]></content:encoded>

	<dc:title>ESG-Graph: Hierarchical Residual Graph Attention Network with Analyst-Defined ESG Taxonomy</dc:title>
			<dc:creator>Yasser Elouargui</dc:creator>
			<dc:creator>Abdellatif Sassioui</dc:creator>
			<dc:creator>Meriyem Chergui</dc:creator>
			<dc:creator>Rachid Benouini</dc:creator>
			<dc:creator>Mohamed Elkamili</dc:creator>
			<dc:creator>Elmehdi Benyoussef</dc:creator>
			<dc:creator>Mohammed Ouzzif</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050258</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-25</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-25</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>258</prism:startingPage>
		<prism:doi>10.3390/technologies14050258</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/258</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/256">

	<title>Technologies, Vol. 14, Pages 256: Eco-Friendly Corrosion Inhibition of OLC45 Steel in H2SO4 Solution Using Rhus typhina L. Plant Extracts</title>
	<link>https://www.mdpi.com/2227-7080/14/5/256</link>
	<description>This study focuses on the evaluation of eco-friendly corrosion inhibitors derived from extracts of Rhus typhina L. leaves, collected in August during the summer season, on OLC45 metal surfaces in a 0.5 M H2SO4 corrosive environment. The extracts were obtained using the microwave extraction technique and characterized by HPLC. The protective properties of OLC45 coated with LESRT (leaf extract collected in summer from Rhus typhina L.) were examined by potentiostatic and potentiodynamic polarization procedures and electrochemical impedance spectroscopy (EIS) in 0.5 M H2SO4. The application of the Langmuir isotherm revealed high values of the adsorption constant and standard free energies (&amp;amp;Delta;G&amp;amp;deg;ads), suggesting a possible mixed adsorption process with an increased tendency toward chemisorption. The influence of temperature on the electrochemical behavior of OLC45 samples in H2SO4, both in the absence and presence of two extracts derived from Rhus typhina leaves at a concentration of 1000 ppm, was investigated over the temperature range of 293&amp;amp;ndash;333 K. A comparison of the two inhibitors&amp;amp;rsquo; effectiveness revealed high inhibitory efficiency, up to 91% at 1000 ppm LESRT1 (methanol/double-distilled water (50%:50%, v/v)) and 92% for LESRT2 (ethanol/double-distilled water (50%:50%, v/v)) at 1000 ppm LESRT2.</description>
	<pubDate>2026-04-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 256: Eco-Friendly Corrosion Inhibition of OLC45 Steel in H2SO4 Solution Using Rhus typhina L. Plant Extracts</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/256">doi: 10.3390/technologies14050256</a></p>
	<p>Authors:
		Denisa-Ioana Răuță (Gheorghe)
		Florina Brânzoi
		Sorin Marius Avramescu
		Roxana-Doina Truşcă
		Ecaterina Matei
		</p>
	<p>This study focuses on the evaluation of eco-friendly corrosion inhibitors derived from extracts of Rhus typhina L. leaves, collected in August during the summer season, on OLC45 metal surfaces in a 0.5 M H2SO4 corrosive environment. The extracts were obtained using the microwave extraction technique and characterized by HPLC. The protective properties of OLC45 coated with LESRT (leaf extract collected in summer from Rhus typhina L.) were examined by potentiostatic and potentiodynamic polarization procedures and electrochemical impedance spectroscopy (EIS) in 0.5 M H2SO4. The application of the Langmuir isotherm revealed high values of the adsorption constant and standard free energies (&amp;amp;Delta;G&amp;amp;deg;ads), suggesting a possible mixed adsorption process with an increased tendency toward chemisorption. The influence of temperature on the electrochemical behavior of OLC45 samples in H2SO4, both in the absence and presence of two extracts derived from Rhus typhina leaves at a concentration of 1000 ppm, was investigated over the temperature range of 293&amp;amp;ndash;333 K. A comparison of the two inhibitors&amp;amp;rsquo; effectiveness revealed high inhibitory efficiency, up to 91% at 1000 ppm LESRT1 (methanol/double-distilled water (50%:50%, v/v)) and 92% for LESRT2 (ethanol/double-distilled water (50%:50%, v/v)) at 1000 ppm LESRT2.</p>
	]]></content:encoded>

	<dc:title>Eco-Friendly Corrosion Inhibition of OLC45 Steel in H2SO4 Solution Using Rhus typhina L. Plant Extracts</dc:title>
			<dc:creator>Denisa-Ioana Răuță (Gheorghe)</dc:creator>
			<dc:creator>Florina Brânzoi</dc:creator>
			<dc:creator>Sorin Marius Avramescu</dc:creator>
			<dc:creator>Roxana-Doina Truşcă</dc:creator>
			<dc:creator>Ecaterina Matei</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050256</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-24</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-24</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>256</prism:startingPage>
		<prism:doi>10.3390/technologies14050256</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/256</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/255">

	<title>Technologies, Vol. 14, Pages 255: Analysis of the Stranding Effect on the Surface Voltage Gradient of Transmission Line Conductors with Round Strands</title>
	<link>https://www.mdpi.com/2227-7080/14/5/255</link>
	<description>For high-voltage power transmission, the surface voltage gradient (SVG) of the conductor plays a crucial role in meeting corona performance requirements. The SVG is greatly impacted by the smoothness of the conductor&amp;amp;rsquo;s surface. Under identical conditions, the SVG of smooth, round conductors differs from that of stranded conductors with the same outer radius. This paper uses Finite Element Analysis (FEA) to study the effect of different stranded conductor geometries and three-phase line topologies with stranded conductor bundles on the SVG. Although industry standards and the scientific literature often rely on simplified smooth-cylinder approximations, this research demonstrates that surface irregularities significantly increase electrical stress compared to idealized smooth surfaces. Through simulating various three-phase configurations, the study reveals a nearly constant field enhancement factor across diverse stranded designs. These results enable us to apply formulas developed for smooth conductors to more realistic power line applications involving stranded conductor bundles. Consequently, this FEA approach offers engineers a precise, versatile method for designing high-voltage transmission lines. The findings presented here facilitate a deeper understanding of the SVG surrounding stranded conductors, particularly with regard to its influence on corona phenomena.</description>
	<pubDate>2026-04-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 255: Analysis of the Stranding Effect on the Surface Voltage Gradient of Transmission Line Conductors with Round Strands</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/255">doi: 10.3390/technologies14050255</a></p>
	<p>Authors:
		Jordi-Roger Riba
		</p>
	<p>For high-voltage power transmission, the surface voltage gradient (SVG) of the conductor plays a crucial role in meeting corona performance requirements. The SVG is greatly impacted by the smoothness of the conductor&amp;amp;rsquo;s surface. Under identical conditions, the SVG of smooth, round conductors differs from that of stranded conductors with the same outer radius. This paper uses Finite Element Analysis (FEA) to study the effect of different stranded conductor geometries and three-phase line topologies with stranded conductor bundles on the SVG. Although industry standards and the scientific literature often rely on simplified smooth-cylinder approximations, this research demonstrates that surface irregularities significantly increase electrical stress compared to idealized smooth surfaces. Through simulating various three-phase configurations, the study reveals a nearly constant field enhancement factor across diverse stranded designs. These results enable us to apply formulas developed for smooth conductors to more realistic power line applications involving stranded conductor bundles. Consequently, this FEA approach offers engineers a precise, versatile method for designing high-voltage transmission lines. The findings presented here facilitate a deeper understanding of the SVG surrounding stranded conductors, particularly with regard to its influence on corona phenomena.</p>
	]]></content:encoded>

	<dc:title>Analysis of the Stranding Effect on the Surface Voltage Gradient of Transmission Line Conductors with Round Strands</dc:title>
			<dc:creator>Jordi-Roger Riba</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050255</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-24</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-24</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>255</prism:startingPage>
		<prism:doi>10.3390/technologies14050255</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/255</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/254">

	<title>Technologies, Vol. 14, Pages 254: Open-Source Design of Solar-Powered Picnic Table for Outdoor Device Charging</title>
	<link>https://www.mdpi.com/2227-7080/14/5/254</link>
	<description>The ubiquitous use of electronic devices requires outdoor charging capabilities. A successful approach uses solar photovoltaic (PV)-powered picnic tables, but the existing designs share several limitations including proprietary designs that limit replication/modification and high costs. This study addresses these limitations by presenting the design of a novel open-source solar-powered picnic table fabricated from reused, decommissioned PVs and recycled plastic lumber. The open-source solar-powered picnic table acts as a conventional picnic table and provides electrical charging that supports learning and connectivity by providing outdoor power. The system integrates a 320 W PV module, maximum power point charge controller, and 12 V LiFePO4 battery, enabling reliable off-grid power generation and storage. The device was validated under real outdoor operating conditions using everyday user loads, including smartphones, tablets, and laptops as individual and multiple connected devices at different times of the day and night. In addition to this functionality, the materials cost was &amp;amp;lt;USD 450, 90&amp;amp;ndash;95% less than commercially available options. The system, built using recycled and repurposed components, further enhances sustainability while maintaining durability for outdoor deployment. These results indicate that open-source solar furniture can provide an affordable and replicable approach for expanding renewable-powered charging access in outdoor environments.</description>
	<pubDate>2026-04-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 254: Open-Source Design of Solar-Powered Picnic Table for Outdoor Device Charging</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/254">doi: 10.3390/technologies14050254</a></p>
	<p>Authors:
		Sara Khan
		Joshua M. Pearce
		</p>
	<p>The ubiquitous use of electronic devices requires outdoor charging capabilities. A successful approach uses solar photovoltaic (PV)-powered picnic tables, but the existing designs share several limitations including proprietary designs that limit replication/modification and high costs. This study addresses these limitations by presenting the design of a novel open-source solar-powered picnic table fabricated from reused, decommissioned PVs and recycled plastic lumber. The open-source solar-powered picnic table acts as a conventional picnic table and provides electrical charging that supports learning and connectivity by providing outdoor power. The system integrates a 320 W PV module, maximum power point charge controller, and 12 V LiFePO4 battery, enabling reliable off-grid power generation and storage. The device was validated under real outdoor operating conditions using everyday user loads, including smartphones, tablets, and laptops as individual and multiple connected devices at different times of the day and night. In addition to this functionality, the materials cost was &amp;amp;lt;USD 450, 90&amp;amp;ndash;95% less than commercially available options. The system, built using recycled and repurposed components, further enhances sustainability while maintaining durability for outdoor deployment. These results indicate that open-source solar furniture can provide an affordable and replicable approach for expanding renewable-powered charging access in outdoor environments.</p>
	]]></content:encoded>

	<dc:title>Open-Source Design of Solar-Powered Picnic Table for Outdoor Device Charging</dc:title>
			<dc:creator>Sara Khan</dc:creator>
			<dc:creator>Joshua M. Pearce</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050254</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-24</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-24</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>254</prism:startingPage>
		<prism:doi>10.3390/technologies14050254</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/254</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/253">

	<title>Technologies, Vol. 14, Pages 253: Neuro-Fuzzy Approach for Detecting DDoS Attacks in IoT Environments Applied to Biosignal Monitoring</title>
	<link>https://www.mdpi.com/2227-7080/14/5/253</link>
	<description>Distributed denial-of-service (DDoS) attacks pose a critical threat to the availability of the Internet of Medical Things (IoMT). This paper proposes an intrusion detection system (IDS) based on a hybrid neuro-fuzzy-inspired approach to identify DDoS attacks in IoMT environments. The architecture combines an ensemble of decision trees, a sigmoidal smoothing mechanism, and a multilayer neural meta-classifier, enabling the modeling of nonlinear relationships between legitimate and malicious traffic without requiring explicit fuzzy rules or a formal fuzzy inference mechanism. The evaluation was conducted using the public DoS/DDoS-MQTT-IoT dataset, which was extended by incorporating legitimate traffic generated by electrocardiography (ECG) monitoring devices to approximate real operational IoMT conditions. The model was validated using stratified cross-validation and bootstrap procedures. In the extended IoMT scenario including ECG traffic, the proposed approach achieved an area under the ROC curve (AUC) of 0.904 and an F1 score of 0.823. Finally, the IDS was integrated into an intrusion detection and prevention system (IDPS) capable of detecting anomalous traffic patterns within three seconds and automatically blocking malicious IP addresses after repeated detections.</description>
	<pubDate>2026-04-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 253: Neuro-Fuzzy Approach for Detecting DDoS Attacks in IoT Environments Applied to Biosignal Monitoring</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/253">doi: 10.3390/technologies14050253</a></p>
	<p>Authors:
		Angela M. Parra
		Marcia M. Bayas
		</p>
	<p>Distributed denial-of-service (DDoS) attacks pose a critical threat to the availability of the Internet of Medical Things (IoMT). This paper proposes an intrusion detection system (IDS) based on a hybrid neuro-fuzzy-inspired approach to identify DDoS attacks in IoMT environments. The architecture combines an ensemble of decision trees, a sigmoidal smoothing mechanism, and a multilayer neural meta-classifier, enabling the modeling of nonlinear relationships between legitimate and malicious traffic without requiring explicit fuzzy rules or a formal fuzzy inference mechanism. The evaluation was conducted using the public DoS/DDoS-MQTT-IoT dataset, which was extended by incorporating legitimate traffic generated by electrocardiography (ECG) monitoring devices to approximate real operational IoMT conditions. The model was validated using stratified cross-validation and bootstrap procedures. In the extended IoMT scenario including ECG traffic, the proposed approach achieved an area under the ROC curve (AUC) of 0.904 and an F1 score of 0.823. Finally, the IDS was integrated into an intrusion detection and prevention system (IDPS) capable of detecting anomalous traffic patterns within three seconds and automatically blocking malicious IP addresses after repeated detections.</p>
	]]></content:encoded>

	<dc:title>Neuro-Fuzzy Approach for Detecting DDoS Attacks in IoT Environments Applied to Biosignal Monitoring</dc:title>
			<dc:creator>Angela M. Parra</dc:creator>
			<dc:creator>Marcia M. Bayas</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050253</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-24</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-24</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>253</prism:startingPage>
		<prism:doi>10.3390/technologies14050253</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/253</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/252">

	<title>Technologies, Vol. 14, Pages 252: Complexity and Performance Analysis of Supervised Machine Learning Models for Applied Technologies: An Experimental Study with Impulsive &amp;alpha;-Stable Noise</title>
	<link>https://www.mdpi.com/2227-7080/14/5/252</link>
	<description>Impulsive alpha (&amp;amp;alpha;)-stable noise, characterized by heavy tails and intense outliers, is a key ingredient in simulating financial, medical, seismic, and digital communication technologies. It poses versatile challenges to conventional machine learning (ML) algorithms in predicting noise parameters for multidisciplinary artificial intelligence (AI)-embedded devices. In this study, we adopted a two-phase methodology to investigate the complexity and performance of supervised ML algorithms while classifying impulsive noise parameters. We generated synthetic datasets of &amp;amp;alpha;-stable noise distributions for experimentation in a controlled environment. It was followed by experimental evaluation to derive the complexity and performance of ML classifiers&amp;amp;mdash;k-nearest neighbors (KNN), Support Vector Machine (SVM), Na&amp;amp;iuml;ve Bayes (NB), Decision Tree (DT), and Random Forest (RF). Moreover, we employed a very high channel noise level of &amp;amp;minus;15 dB in the test datasets to ensure that the derived analysis applies to real-world devices. The results demonstrate the high performance of DT and RF in structured binary classification of the &amp;amp;alpha; regime and the sign of skewness, while incurring satisfactory computational costs. However, SVM and kNN are comparatively more robust for multi-class classification, albeit with higher memory and training costs. On the contrary, NB fails to address the skewed and impulsive behavior of &amp;amp;alpha;-stable noise. We observed that even the most effective classifiers struggle to achieve perfect accuracy in multi-class classification. Overall, the experimental results reveal significant trade-off relationships between the complexity and performance of ML classifiers. Conclusively, simple models are well-suited for coarse-grained tasks, such as &amp;amp;alpha;-approximation and sign-of-skewness classification. In contrast, sophisticated models can be deployed to predict noise parameters to some extent. Our study provides a clear set of trade-offs for future applied AI devices that address adversarial and impulsive noise.</description>
	<pubDate>2026-04-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 252: Complexity and Performance Analysis of Supervised Machine Learning Models for Applied Technologies: An Experimental Study with Impulsive &amp;alpha;-Stable Noise</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/252">doi: 10.3390/technologies14050252</a></p>
	<p>Authors:
		Areeb Ahmed
		Zoran Bosnić
		</p>
	<p>Impulsive alpha (&amp;amp;alpha;)-stable noise, characterized by heavy tails and intense outliers, is a key ingredient in simulating financial, medical, seismic, and digital communication technologies. It poses versatile challenges to conventional machine learning (ML) algorithms in predicting noise parameters for multidisciplinary artificial intelligence (AI)-embedded devices. In this study, we adopted a two-phase methodology to investigate the complexity and performance of supervised ML algorithms while classifying impulsive noise parameters. We generated synthetic datasets of &amp;amp;alpha;-stable noise distributions for experimentation in a controlled environment. It was followed by experimental evaluation to derive the complexity and performance of ML classifiers&amp;amp;mdash;k-nearest neighbors (KNN), Support Vector Machine (SVM), Na&amp;amp;iuml;ve Bayes (NB), Decision Tree (DT), and Random Forest (RF). Moreover, we employed a very high channel noise level of &amp;amp;minus;15 dB in the test datasets to ensure that the derived analysis applies to real-world devices. The results demonstrate the high performance of DT and RF in structured binary classification of the &amp;amp;alpha; regime and the sign of skewness, while incurring satisfactory computational costs. However, SVM and kNN are comparatively more robust for multi-class classification, albeit with higher memory and training costs. On the contrary, NB fails to address the skewed and impulsive behavior of &amp;amp;alpha;-stable noise. We observed that even the most effective classifiers struggle to achieve perfect accuracy in multi-class classification. Overall, the experimental results reveal significant trade-off relationships between the complexity and performance of ML classifiers. Conclusively, simple models are well-suited for coarse-grained tasks, such as &amp;amp;alpha;-approximation and sign-of-skewness classification. In contrast, sophisticated models can be deployed to predict noise parameters to some extent. Our study provides a clear set of trade-offs for future applied AI devices that address adversarial and impulsive noise.</p>
	]]></content:encoded>

	<dc:title>Complexity and Performance Analysis of Supervised Machine Learning Models for Applied Technologies: An Experimental Study with Impulsive &amp;amp;alpha;-Stable Noise</dc:title>
			<dc:creator>Areeb Ahmed</dc:creator>
			<dc:creator>Zoran Bosnić</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050252</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-23</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-23</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>252</prism:startingPage>
		<prism:doi>10.3390/technologies14050252</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/252</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/250">

	<title>Technologies, Vol. 14, Pages 250: AMD-Proj: Adaptive Memory-Driven Selective Gradient Projection for Continual Learning in Document Understanding</title>
	<link>https://www.mdpi.com/2227-7080/14/5/250</link>
	<description>Visually rich document understanding (VrDU) models rely on tightly coupled textual, layout, and visual representations. In real-world deployments, these models must continuously adapt to new document domains over time. However, na&amp;amp;iuml;ve sequential fine-tuning leads to severe catastrophic forgetting due to shared parameters and strong cross-task interference. Existing continual learning approaches either constrain parameter updates, preserve output distributions, or uniformly suppress gradient directions associated with previous tasks. While effective in limited settings, these strategies fail to balance stability and plasticity in large multimodal transformers. We propose AMD-Proj, an adaptive memory-driven selective gradient projection framework for continual learning in document understanding. It models task knowledge using specific gradient subspaces and adaptively modulates incoming gradients based on their alignment with this memory, selectively blocking interfering directions while reinforcing reusable ones. An efficient truncated SVD mechanism with online subspace merging ensures bounded memory usage and scalability to large transformer-based architectures. We evaluate AMD-Proj on four VrDU benchmarks (FUNSD, SROIE, CORD, and BuDDIE) under a task-incremental learning setting using LayoutLMv2 and LayoutLMv3 backbones. Results show that AMD-Proj reduces catastrophic forgetting and improves F1-based stability over EWC, GPM, LwF, OWM, CUBER, TRGP and parameter-efficient fine-tuning methods. Extensive mechanistic analyses, including gradient spectrum decomposition and layer-wise reuse versus block dynamics, provide insight into how selective gradient projection controls optimization geometry during continual adaptation. These findings establish selective gradient projection as a principled and interpretable approach for continual learning in visually rich document understanding.</description>
	<pubDate>2026-04-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 250: AMD-Proj: Adaptive Memory-Driven Selective Gradient Projection for Continual Learning in Document Understanding</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/250">doi: 10.3390/technologies14050250</a></p>
	<p>Authors:
		Abdellatif Sassioui
		Yasser Elouargui
		Mohamed El Kamili
		Rachid Benouini
		El Mehdi Benyoussef
		Meriyem Chergui
		Mohammed Ouzzif
		</p>
	<p>Visually rich document understanding (VrDU) models rely on tightly coupled textual, layout, and visual representations. In real-world deployments, these models must continuously adapt to new document domains over time. However, na&amp;amp;iuml;ve sequential fine-tuning leads to severe catastrophic forgetting due to shared parameters and strong cross-task interference. Existing continual learning approaches either constrain parameter updates, preserve output distributions, or uniformly suppress gradient directions associated with previous tasks. While effective in limited settings, these strategies fail to balance stability and plasticity in large multimodal transformers. We propose AMD-Proj, an adaptive memory-driven selective gradient projection framework for continual learning in document understanding. It models task knowledge using specific gradient subspaces and adaptively modulates incoming gradients based on their alignment with this memory, selectively blocking interfering directions while reinforcing reusable ones. An efficient truncated SVD mechanism with online subspace merging ensures bounded memory usage and scalability to large transformer-based architectures. We evaluate AMD-Proj on four VrDU benchmarks (FUNSD, SROIE, CORD, and BuDDIE) under a task-incremental learning setting using LayoutLMv2 and LayoutLMv3 backbones. Results show that AMD-Proj reduces catastrophic forgetting and improves F1-based stability over EWC, GPM, LwF, OWM, CUBER, TRGP and parameter-efficient fine-tuning methods. Extensive mechanistic analyses, including gradient spectrum decomposition and layer-wise reuse versus block dynamics, provide insight into how selective gradient projection controls optimization geometry during continual adaptation. These findings establish selective gradient projection as a principled and interpretable approach for continual learning in visually rich document understanding.</p>
	]]></content:encoded>

	<dc:title>AMD-Proj: Adaptive Memory-Driven Selective Gradient Projection for Continual Learning in Document Understanding</dc:title>
			<dc:creator>Abdellatif Sassioui</dc:creator>
			<dc:creator>Yasser Elouargui</dc:creator>
			<dc:creator>Mohamed El Kamili</dc:creator>
			<dc:creator>Rachid Benouini</dc:creator>
			<dc:creator>El Mehdi Benyoussef</dc:creator>
			<dc:creator>Meriyem Chergui</dc:creator>
			<dc:creator>Mohammed Ouzzif</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050250</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-23</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-23</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>250</prism:startingPage>
		<prism:doi>10.3390/technologies14050250</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/250</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/251">

	<title>Technologies, Vol. 14, Pages 251: Reproducibility of 3D-Printed Breast Phantoms in Mammography and Breast Tomosynthesis</title>
	<link>https://www.mdpi.com/2227-7080/14/5/251</link>
	<description>The development of realistic breast phantoms is critical for the evaluation of imaging systems and quantitative image analysis methods. In this work, breast samples derived from the same digital model were produced using 3D printing technology and evaluated for structural similarity and reproducibility. Four independently manufactured phantoms were imaged using mammography and breast tomosynthesis. Radiomic features were extracted from regions of interest in order to assess inter-phantom variability. The results showed very good agreement between the four printed phantoms. Most first-order and GLCM radiomic features exhibited very low inter-phantom variability, indicating consistent structural and intensity characteristics. Neighborhood-based texture features showed slightly higher variability, reflecting their sensitivity to local structural differences. Fractal and power spectrum analyses also confirmed the high structural similarity of the phantoms. These results indicate that the proposed manufacturing approach can produce reproducible breast imaging phantoms suitable for mammography and tomosynthesis imaging studies, with potential applications in imaging system evaluation and radiomic research.</description>
	<pubDate>2026-04-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 251: Reproducibility of 3D-Printed Breast Phantoms in Mammography and Breast Tomosynthesis</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/251">doi: 10.3390/technologies14050251</a></p>
	<p>Authors:
		Kristina Bliznakova
		Vencislav Nastev
		Nikolay Dukov
		Ivan Buliev
		Zhivko Bliznakov
		Valentina Dobreva
		Chavdar Bachvarov
		Georgi Todorov
		Deyan Grancharov
		</p>
	<p>The development of realistic breast phantoms is critical for the evaluation of imaging systems and quantitative image analysis methods. In this work, breast samples derived from the same digital model were produced using 3D printing technology and evaluated for structural similarity and reproducibility. Four independently manufactured phantoms were imaged using mammography and breast tomosynthesis. Radiomic features were extracted from regions of interest in order to assess inter-phantom variability. The results showed very good agreement between the four printed phantoms. Most first-order and GLCM radiomic features exhibited very low inter-phantom variability, indicating consistent structural and intensity characteristics. Neighborhood-based texture features showed slightly higher variability, reflecting their sensitivity to local structural differences. Fractal and power spectrum analyses also confirmed the high structural similarity of the phantoms. These results indicate that the proposed manufacturing approach can produce reproducible breast imaging phantoms suitable for mammography and tomosynthesis imaging studies, with potential applications in imaging system evaluation and radiomic research.</p>
	]]></content:encoded>

	<dc:title>Reproducibility of 3D-Printed Breast Phantoms in Mammography and Breast Tomosynthesis</dc:title>
			<dc:creator>Kristina Bliznakova</dc:creator>
			<dc:creator>Vencislav Nastev</dc:creator>
			<dc:creator>Nikolay Dukov</dc:creator>
			<dc:creator>Ivan Buliev</dc:creator>
			<dc:creator>Zhivko Bliznakov</dc:creator>
			<dc:creator>Valentina Dobreva</dc:creator>
			<dc:creator>Chavdar Bachvarov</dc:creator>
			<dc:creator>Georgi Todorov</dc:creator>
			<dc:creator>Deyan Grancharov</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050251</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-23</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-23</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>251</prism:startingPage>
		<prism:doi>10.3390/technologies14050251</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/251</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/249">

	<title>Technologies, Vol. 14, Pages 249: Automatic Diagnosis of Colorectal Cancer Based on Histopathological Images Using Artificial Intelligence Models</title>
	<link>https://www.mdpi.com/2227-7080/14/5/249</link>
	<description>Early diagnosis of colorectal cancer is considered a key factor in reducing the complications of the disease before it spreads throughout the body, which raises survival and increases the chance of patient recovery. This research introduces machine learning and deep learning classifiers trained on a publicly available histopathological dataset with four magnification levels (40&amp;amp;times;, 100&amp;amp;times;, 200&amp;amp;times;, 400&amp;amp;times;) and uses stratified k-fold cross-validation to classify colorectal cancer into five classes. Features are extracted in two ways: manually and automatically using a Convolutional Neural Network (CNN). Both machine learning and deep learning techniques performed well on the test data. In summary, machine learning using an automatic feature extraction method achieved an accuracy of 89%, Precision of 88%, Recall of 87%, and F1-Score of 87% using a Support Vector Machine (SVM) classifier, while machine learning using a manual feature extraction method achieved an accuracy, Precision, Recall, and F1-Score of 79% using an Extreme Gradient Boosting (XG-Boost) classifier at 200&amp;amp;times; magnification. On the other hand, the deep learning model ResNet-50 achieved the best result with 97% accuracy, Precision, Recall, and F1-Score using 100&amp;amp;times; magnification.</description>
	<pubDate>2026-04-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 249: Automatic Diagnosis of Colorectal Cancer Based on Histopathological Images Using Artificial Intelligence Models</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/249">doi: 10.3390/technologies14050249</a></p>
	<p>Authors:
		Asmaa Al-Damen
		Yazan Al-Issa
		Hiam Alquran
		</p>
	<p>Early diagnosis of colorectal cancer is considered a key factor in reducing the complications of the disease before it spreads throughout the body, which raises survival and increases the chance of patient recovery. This research introduces machine learning and deep learning classifiers trained on a publicly available histopathological dataset with four magnification levels (40&amp;amp;times;, 100&amp;amp;times;, 200&amp;amp;times;, 400&amp;amp;times;) and uses stratified k-fold cross-validation to classify colorectal cancer into five classes. Features are extracted in two ways: manually and automatically using a Convolutional Neural Network (CNN). Both machine learning and deep learning techniques performed well on the test data. In summary, machine learning using an automatic feature extraction method achieved an accuracy of 89%, Precision of 88%, Recall of 87%, and F1-Score of 87% using a Support Vector Machine (SVM) classifier, while machine learning using a manual feature extraction method achieved an accuracy, Precision, Recall, and F1-Score of 79% using an Extreme Gradient Boosting (XG-Boost) classifier at 200&amp;amp;times; magnification. On the other hand, the deep learning model ResNet-50 achieved the best result with 97% accuracy, Precision, Recall, and F1-Score using 100&amp;amp;times; magnification.</p>
	]]></content:encoded>

	<dc:title>Automatic Diagnosis of Colorectal Cancer Based on Histopathological Images Using Artificial Intelligence Models</dc:title>
			<dc:creator>Asmaa Al-Damen</dc:creator>
			<dc:creator>Yazan Al-Issa</dc:creator>
			<dc:creator>Hiam Alquran</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050249</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-23</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-23</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>249</prism:startingPage>
		<prism:doi>10.3390/technologies14050249</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/249</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/248">

	<title>Technologies, Vol. 14, Pages 248: A Safety-Constrained Multi-Objective Optimization Framework for Autonomous Mining Systems: Statistical Validation in Surface and Underground Environments</title>
	<link>https://www.mdpi.com/2227-7080/14/5/248</link>
	<description>The incorporation of artificial intelligence, multi-sensor perception, and cyber-physical control into mining operations offers tremendous opportunities for increasing productivity, safety, and sustainability. However, present frameworks focus on discrete subsystems rather than providing a unified, safety-constrained optimization method that has been verified in both surface and underground environments. This paper describes a scalable, hierarchical autonomous mining architecture that incorporates sensor fusion, edge intelligence, fleet coordination, and digital twin-based decision support. It is designed to operate in GNSS-denied conditions and extreme climatic constraints common to Nordic mining environments. A mathematical modeling approach formalizes vehicle dynamics, drilling mechanics, and multi-agent fleet coordination inside a safety-constrained multi-objective optimization formulation. The framework is validated using Monte Carlo simulation with uncertainty measurement, sensitivity analysis, and statistical hypothesis testing. The preliminary results show improvements over a typical baseline, with productivity increasing by approximately 24.3% &amp;amp;plusmn; 3.2%, energy consumption decreasing by 12.8% &amp;amp;plusmn; 2.5%, and safety risk decreasing by 48.6% &amp;amp;plusmn; 4.1%. A sensitivity study identifies localization accuracy, communication delay, and optimization weighting as the primary system performance drivers. The suggested framework serves as a reproducible and transferable reference model for next-generation intelligent mining systems, having direct applications to both industrial deployment and future research in autonomous resource extraction.</description>
	<pubDate>2026-04-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 248: A Safety-Constrained Multi-Objective Optimization Framework for Autonomous Mining Systems: Statistical Validation in Surface and Underground Environments</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/248">doi: 10.3390/technologies14050248</a></p>
	<p>Authors:
		Rajesh Patil
		Magnus Löfstrand
		</p>
	<p>The incorporation of artificial intelligence, multi-sensor perception, and cyber-physical control into mining operations offers tremendous opportunities for increasing productivity, safety, and sustainability. However, present frameworks focus on discrete subsystems rather than providing a unified, safety-constrained optimization method that has been verified in both surface and underground environments. This paper describes a scalable, hierarchical autonomous mining architecture that incorporates sensor fusion, edge intelligence, fleet coordination, and digital twin-based decision support. It is designed to operate in GNSS-denied conditions and extreme climatic constraints common to Nordic mining environments. A mathematical modeling approach formalizes vehicle dynamics, drilling mechanics, and multi-agent fleet coordination inside a safety-constrained multi-objective optimization formulation. The framework is validated using Monte Carlo simulation with uncertainty measurement, sensitivity analysis, and statistical hypothesis testing. The preliminary results show improvements over a typical baseline, with productivity increasing by approximately 24.3% &amp;amp;plusmn; 3.2%, energy consumption decreasing by 12.8% &amp;amp;plusmn; 2.5%, and safety risk decreasing by 48.6% &amp;amp;plusmn; 4.1%. A sensitivity study identifies localization accuracy, communication delay, and optimization weighting as the primary system performance drivers. The suggested framework serves as a reproducible and transferable reference model for next-generation intelligent mining systems, having direct applications to both industrial deployment and future research in autonomous resource extraction.</p>
	]]></content:encoded>

	<dc:title>A Safety-Constrained Multi-Objective Optimization Framework for Autonomous Mining Systems: Statistical Validation in Surface and Underground Environments</dc:title>
			<dc:creator>Rajesh Patil</dc:creator>
			<dc:creator>Magnus Löfstrand</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050248</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-22</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-22</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>248</prism:startingPage>
		<prism:doi>10.3390/technologies14050248</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/248</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/247">

	<title>Technologies, Vol. 14, Pages 247: Design and Deployment of an IoT-Based Digital Agriculture System in a Hydroponic Plant Factory</title>
	<link>https://www.mdpi.com/2227-7080/14/5/247</link>
	<description>The incorporation of the Internet of Things (IoT) in indoor agricultural systems has become an essential tool for monitoring and analyzing environmental variables, contributing to more efficient decision-making. This article presents the design and implementation of an IoT-based digital agriculture system applied to a Plant Factory (PF) for hydroponic vegetable cultivation using the Nutrient Film Technique (NFT). The objective of this study was to develop a system capable of effectively monitoring and controlling the environmental variables that directly influence the microclimate of a closed agricultural environment. The proposed system integrates a four-layer IoT architecture based on a MODBUS RS-485 communication bus, which allows for continuous data acquisition and the operation of multiple sensors and controlled devices. Additionally, user-oriented tools such as a human&amp;amp;ndash;machine interface (HMI), a web application, a mobile application and an automatic alert module were incorporated, enhancing accessibility and remote supervision. Experimental results showed stable control performance of ambient temperature (TA), relative humidity (RH), photoperiod, and photosynthetic photon flux density (PPFD), along with continuous monitoring of CO2 concentration. A 30-day validation experiment using Swiss chard (Beta vulgaris L. var. cicla) under controlled conditions was conducted. The results showed progressive plant development, with leaf area increasing from 15.17 cm2 to 690.39 cm2, plant height from 7 cm to 31 cm, fresh weight from 23 g to 171 g, and the number of leaves from 9 to 20. These results support the functional validity of the proposed system as a reliable platform for environmental monitoring and control in controlled-environment agriculture.</description>
	<pubDate>2026-04-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 247: Design and Deployment of an IoT-Based Digital Agriculture System in a Hydroponic Plant Factory</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/247">doi: 10.3390/technologies14050247</a></p>
	<p>Authors:
		Herrera-Arroyo Raul Omar
		Moreno-Aguilera Cristal Yoselin
		Coral Martinez-Nolasco
		Víctor Sámano-Ortega
		Mauro Santoyo-Mora
		Martínez-Nolasco Juan José
		</p>
	<p>The incorporation of the Internet of Things (IoT) in indoor agricultural systems has become an essential tool for monitoring and analyzing environmental variables, contributing to more efficient decision-making. This article presents the design and implementation of an IoT-based digital agriculture system applied to a Plant Factory (PF) for hydroponic vegetable cultivation using the Nutrient Film Technique (NFT). The objective of this study was to develop a system capable of effectively monitoring and controlling the environmental variables that directly influence the microclimate of a closed agricultural environment. The proposed system integrates a four-layer IoT architecture based on a MODBUS RS-485 communication bus, which allows for continuous data acquisition and the operation of multiple sensors and controlled devices. Additionally, user-oriented tools such as a human&amp;amp;ndash;machine interface (HMI), a web application, a mobile application and an automatic alert module were incorporated, enhancing accessibility and remote supervision. Experimental results showed stable control performance of ambient temperature (TA), relative humidity (RH), photoperiod, and photosynthetic photon flux density (PPFD), along with continuous monitoring of CO2 concentration. A 30-day validation experiment using Swiss chard (Beta vulgaris L. var. cicla) under controlled conditions was conducted. The results showed progressive plant development, with leaf area increasing from 15.17 cm2 to 690.39 cm2, plant height from 7 cm to 31 cm, fresh weight from 23 g to 171 g, and the number of leaves from 9 to 20. These results support the functional validity of the proposed system as a reliable platform for environmental monitoring and control in controlled-environment agriculture.</p>
	]]></content:encoded>

	<dc:title>Design and Deployment of an IoT-Based Digital Agriculture System in a Hydroponic Plant Factory</dc:title>
			<dc:creator>Herrera-Arroyo Raul Omar</dc:creator>
			<dc:creator>Moreno-Aguilera Cristal Yoselin</dc:creator>
			<dc:creator>Coral Martinez-Nolasco</dc:creator>
			<dc:creator>Víctor Sámano-Ortega</dc:creator>
			<dc:creator>Mauro Santoyo-Mora</dc:creator>
			<dc:creator>Martínez-Nolasco Juan José</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050247</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-22</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-22</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>247</prism:startingPage>
		<prism:doi>10.3390/technologies14050247</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/247</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/246">

	<title>Technologies, Vol. 14, Pages 246: Edge-Based Multi-Scale Predator Detection for Stingless Bee Protection Using Attention-Integrated YOLOv11</title>
	<link>https://www.mdpi.com/2227-7080/14/5/246</link>
	<description>Stingless bee colonies are vulnerable to predators of widely varying sizes, and repeated intrusions can cause stress, reduce productivity, and trigger colony absconding. Existing automated surveillance systems detect only a limited range of predators and often struggle with multi-scale object detection in high-resolution images. This study proposes a real-time predator monitoring system that integrates a Multi-Scale Attention module into the YOLOv11-nano architecture (MSYOLO11) to enhance detection performance across both small and large predators. The proposed model combines convolutional features with an attention mechanism to improve global&amp;amp;ndash;local feature fusion. Experimental evaluation shows that MSYOLO11 increases overall Recall from 0.830 to 0.853 compared to YOLOv11-nano, with substantial improvements for small-object classes such as ants (+0.096), humans (+0.083), and H. itama (+0.026), while maintaining comparable Precision (0.868 vs 0.842) and mAP50 (0.898 vs 0.896) at a nearly identical computational cost (6.3 GFLOPs). The system operates at 5 FPS on a Jetson Orin Nano, with an end-to-end latency of 181 ms. A Firebase-integrated mobile application delivers instant push notifications, displays detected predators with bounding boxes, and provides real-time data synchronization. The results demonstrate that MSYOLO11 offers a practical and efficient solution for multi-scale predator detection, supporting continuous hive surveillance and timely beekeeper intervention.</description>
	<pubDate>2026-04-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 246: Edge-Based Multi-Scale Predator Detection for Stingless Bee Protection Using Attention-Integrated YOLOv11</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/246">doi: 10.3390/technologies14050246</a></p>
	<p>Authors:
		Ashan Milinda Bandara Ratnayake
		Marha Sahirah Majid
		Hartini Yasin
		Abdul Ghani Naim
		Pg Emeroylariffion Abas
		</p>
	<p>Stingless bee colonies are vulnerable to predators of widely varying sizes, and repeated intrusions can cause stress, reduce productivity, and trigger colony absconding. Existing automated surveillance systems detect only a limited range of predators and often struggle with multi-scale object detection in high-resolution images. This study proposes a real-time predator monitoring system that integrates a Multi-Scale Attention module into the YOLOv11-nano architecture (MSYOLO11) to enhance detection performance across both small and large predators. The proposed model combines convolutional features with an attention mechanism to improve global&amp;amp;ndash;local feature fusion. Experimental evaluation shows that MSYOLO11 increases overall Recall from 0.830 to 0.853 compared to YOLOv11-nano, with substantial improvements for small-object classes such as ants (+0.096), humans (+0.083), and H. itama (+0.026), while maintaining comparable Precision (0.868 vs 0.842) and mAP50 (0.898 vs 0.896) at a nearly identical computational cost (6.3 GFLOPs). The system operates at 5 FPS on a Jetson Orin Nano, with an end-to-end latency of 181 ms. A Firebase-integrated mobile application delivers instant push notifications, displays detected predators with bounding boxes, and provides real-time data synchronization. The results demonstrate that MSYOLO11 offers a practical and efficient solution for multi-scale predator detection, supporting continuous hive surveillance and timely beekeeper intervention.</p>
	]]></content:encoded>

	<dc:title>Edge-Based Multi-Scale Predator Detection for Stingless Bee Protection Using Attention-Integrated YOLOv11</dc:title>
			<dc:creator>Ashan Milinda Bandara Ratnayake</dc:creator>
			<dc:creator>Marha Sahirah Majid</dc:creator>
			<dc:creator>Hartini Yasin</dc:creator>
			<dc:creator>Abdul Ghani Naim</dc:creator>
			<dc:creator>Pg Emeroylariffion Abas</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050246</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-22</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-22</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>246</prism:startingPage>
		<prism:doi>10.3390/technologies14050246</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/246</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/245">

	<title>Technologies, Vol. 14, Pages 245: Predictive Models of Soil Electrical Resistivity Based on Environmental Parameters: A Systematic Review of Modeling Approaches, Influencing Factors and Applications</title>
	<link>https://www.mdpi.com/2227-7080/14/5/245</link>
	<description>Soil electrical resistivity (SER) is widely used as an indirect indicator of soil physical, chemical, and hydrological properties and plays an important role in applications such as grounding system design, geotechnical site characterization, agricultural soil monitoring, and environmental contamination assessment. However, SER is strongly influenced by environmental variables including soil moisture content, temperature, salinity, and soil texture, which makes accurate prediction challenging under heterogeneous field conditions. A systematic review was conducted following the PRISMA 2020 protocol using the Scopus database to identify peer-reviewed studies published between 2018 and 2026 related to predictive models of soil electrical resistivity based on environmental parameters. After applying defined inclusion and exclusion criteria, a set of relevant studies was selected for qualitative and comparative analysis. The reviewed studies consistently identify soil moisture content as the most frequently reported influential factor affecting SER, followed by temperature, salinity, and soil texture. This observation reflects the predominant focus of the analyzed literature within the selected time frame rather than a definitive representation of all controlling physical processes. Similarly, the reviewed literature suggests that empirical and statistical models remain valuable due to their simplicity and interpretability, whereas machine learning approaches such as artificial neural networks, support vector regression, and ensemble methods are often reported to achieve higher predictive accuracy in complex soil environments. The predictive SER modeling represents a rapidly evolving research field, and future work should focus on hybrid physics-informed machine learning models, the development of standardized datasets, and the integration of predictive algorithms with emerging sensing technologies and IoT-based monitoring systems.</description>
	<pubDate>2026-04-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 245: Predictive Models of Soil Electrical Resistivity Based on Environmental Parameters: A Systematic Review of Modeling Approaches, Influencing Factors and Applications</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/245">doi: 10.3390/technologies14050245</a></p>
	<p>Authors:
		Cesar Augusto Navarro Rubio
		Hugo Martínez Ángeles
		Mario Trejo Perea
		Roberto Valentín Carrillo-Serrano
		Saúl Obregón-Biosca
		Mariano Garduño Aparicio
		José Luis Reyes Araiza
		José Gabriel Ríos Moreno
		</p>
	<p>Soil electrical resistivity (SER) is widely used as an indirect indicator of soil physical, chemical, and hydrological properties and plays an important role in applications such as grounding system design, geotechnical site characterization, agricultural soil monitoring, and environmental contamination assessment. However, SER is strongly influenced by environmental variables including soil moisture content, temperature, salinity, and soil texture, which makes accurate prediction challenging under heterogeneous field conditions. A systematic review was conducted following the PRISMA 2020 protocol using the Scopus database to identify peer-reviewed studies published between 2018 and 2026 related to predictive models of soil electrical resistivity based on environmental parameters. After applying defined inclusion and exclusion criteria, a set of relevant studies was selected for qualitative and comparative analysis. The reviewed studies consistently identify soil moisture content as the most frequently reported influential factor affecting SER, followed by temperature, salinity, and soil texture. This observation reflects the predominant focus of the analyzed literature within the selected time frame rather than a definitive representation of all controlling physical processes. Similarly, the reviewed literature suggests that empirical and statistical models remain valuable due to their simplicity and interpretability, whereas machine learning approaches such as artificial neural networks, support vector regression, and ensemble methods are often reported to achieve higher predictive accuracy in complex soil environments. The predictive SER modeling represents a rapidly evolving research field, and future work should focus on hybrid physics-informed machine learning models, the development of standardized datasets, and the integration of predictive algorithms with emerging sensing technologies and IoT-based monitoring systems.</p>
	]]></content:encoded>

	<dc:title>Predictive Models of Soil Electrical Resistivity Based on Environmental Parameters: A Systematic Review of Modeling Approaches, Influencing Factors and Applications</dc:title>
			<dc:creator>Cesar Augusto Navarro Rubio</dc:creator>
			<dc:creator>Hugo Martínez Ángeles</dc:creator>
			<dc:creator>Mario Trejo Perea</dc:creator>
			<dc:creator>Roberto Valentín Carrillo-Serrano</dc:creator>
			<dc:creator>Saúl Obregón-Biosca</dc:creator>
			<dc:creator>Mariano Garduño Aparicio</dc:creator>
			<dc:creator>José Luis Reyes Araiza</dc:creator>
			<dc:creator>José Gabriel Ríos Moreno</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050245</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-22</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-22</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>245</prism:startingPage>
		<prism:doi>10.3390/technologies14050245</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/245</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/5/244">

	<title>Technologies, Vol. 14, Pages 244: Modified&amp;nbsp;and&amp;nbsp;Standard Touch Screen Technology to Help People with Intellectual and Developmental Disabilities Access Leisure Events and Complete Match-to-Sample Tasks: A Case Series Study</title>
	<link>https://www.mdpi.com/2227-7080/14/5/244</link>
	<description>While touch screen technology is largely available, people with intellectual and developmental disabilities might have difficulties using it due to erratic touch responses. This study included six adults with intellectual and developmental disabilities who were managing touch screen technology modified via an input adaptation module. Such a module allowed them to activate the screen and access leisure events and complete match-to-sample tasks with various response configurations (e.g., precise clicks, heavy/prolonged touch, and taps). The first question of the study was whether successful history with the modified technology would facilitate the participants&amp;amp;rsquo; use of standard touch screen technology. A second (subordinate) question was whether practice with standard touch screen technology would improve their use of it. To address these questions, the participants were presented with series of sessions with standard technology in alternation with series of sessions with the modified technology. The results showed that the participants were highly successful with the modified technology and partially (or minimally) successful with the standard technology. Only three of them seemed to improve their performance with the standard technology following practice. In conclusion, the modified touch screen technology was consistently effective in helping participants with intellectual and developmental disabilities who continued to have difficulties in using standard touch screen technology.</description>
	<pubDate>2026-04-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 244: Modified&amp;nbsp;and&amp;nbsp;Standard Touch Screen Technology to Help People with Intellectual and Developmental Disabilities Access Leisure Events and Complete Match-to-Sample Tasks: A Case Series Study</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/5/244">doi: 10.3390/technologies14050244</a></p>
	<p>Authors:
		Giulio E. Lancioni
		Gloria Alberti
		Chiara Filippini
		Simone Draghi
		Nirbhay N. Singh
		Mark F. O’Reilly
		Jeff Sigafoos
		Lorenzo Desideri
		</p>
	<p>While touch screen technology is largely available, people with intellectual and developmental disabilities might have difficulties using it due to erratic touch responses. This study included six adults with intellectual and developmental disabilities who were managing touch screen technology modified via an input adaptation module. Such a module allowed them to activate the screen and access leisure events and complete match-to-sample tasks with various response configurations (e.g., precise clicks, heavy/prolonged touch, and taps). The first question of the study was whether successful history with the modified technology would facilitate the participants&amp;amp;rsquo; use of standard touch screen technology. A second (subordinate) question was whether practice with standard touch screen technology would improve their use of it. To address these questions, the participants were presented with series of sessions with standard technology in alternation with series of sessions with the modified technology. The results showed that the participants were highly successful with the modified technology and partially (or minimally) successful with the standard technology. Only three of them seemed to improve their performance with the standard technology following practice. In conclusion, the modified touch screen technology was consistently effective in helping participants with intellectual and developmental disabilities who continued to have difficulties in using standard touch screen technology.</p>
	]]></content:encoded>

	<dc:title>Modified&amp;amp;nbsp;and&amp;amp;nbsp;Standard Touch Screen Technology to Help People with Intellectual and Developmental Disabilities Access Leisure Events and Complete Match-to-Sample Tasks: A Case Series Study</dc:title>
			<dc:creator>Giulio E. Lancioni</dc:creator>
			<dc:creator>Gloria Alberti</dc:creator>
			<dc:creator>Chiara Filippini</dc:creator>
			<dc:creator>Simone Draghi</dc:creator>
			<dc:creator>Nirbhay N. Singh</dc:creator>
			<dc:creator>Mark F. O’Reilly</dc:creator>
			<dc:creator>Jeff Sigafoos</dc:creator>
			<dc:creator>Lorenzo Desideri</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14050244</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-22</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-22</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>244</prism:startingPage>
		<prism:doi>10.3390/technologies14050244</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/5/244</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/243">

	<title>Technologies, Vol. 14, Pages 243: Application of a Linear Hash Function in Adaptive Image Steganography</title>
	<link>https://www.mdpi.com/2227-7080/14/4/243</link>
	<description>This paper discusses an adaptive method of image steganography issues based on the application of a linear hash function over the GF (2) field to control the embedding process. The method uses staggered splitting of an image into 8 &amp;amp;times; 8-pixel blocks to provide blind steganography. Classification thresholds are defined as the percentiles of the distribution of gradients throughout the image, allowing for efficient load distribution between textured and smooth areas. Experiments on the BOSSBase, SIPI and Kaggle kits show that the method provides an actual capacity of up to 0.7 bpp at PSNR 47&amp;amp;ndash;50 dB and is resistant to statistical tests and RS analysis. At the same time, like other approaches based on modification of pixel differences, it remains vulnerable to modern stegoanalysis based on spatial rich models (SRMs). However, thanks to the modular structure of embedding control based on linear hash function, the proposed architecture allows direct integration with many modern adaptive strategies aimed at minimizing statistical anomalies.</description>
	<pubDate>2026-04-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 243: Application of a Linear Hash Function in Adaptive Image Steganography</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/243">doi: 10.3390/technologies14040243</a></p>
	<p>Authors:
		Elmira Daiyrbayeva
		Ekaterina Merzlyakova
		Aigerim Yerimbetova
		Lyailya Cherikbayeva
		Bekturgan Akhmetov
		Nurzhigit Smailov
		Gulmira Shangytbayeva
		</p>
	<p>This paper discusses an adaptive method of image steganography issues based on the application of a linear hash function over the GF (2) field to control the embedding process. The method uses staggered splitting of an image into 8 &amp;amp;times; 8-pixel blocks to provide blind steganography. Classification thresholds are defined as the percentiles of the distribution of gradients throughout the image, allowing for efficient load distribution between textured and smooth areas. Experiments on the BOSSBase, SIPI and Kaggle kits show that the method provides an actual capacity of up to 0.7 bpp at PSNR 47&amp;amp;ndash;50 dB and is resistant to statistical tests and RS analysis. At the same time, like other approaches based on modification of pixel differences, it remains vulnerable to modern stegoanalysis based on spatial rich models (SRMs). However, thanks to the modular structure of embedding control based on linear hash function, the proposed architecture allows direct integration with many modern adaptive strategies aimed at minimizing statistical anomalies.</p>
	]]></content:encoded>

	<dc:title>Application of a Linear Hash Function in Adaptive Image Steganography</dc:title>
			<dc:creator>Elmira Daiyrbayeva</dc:creator>
			<dc:creator>Ekaterina Merzlyakova</dc:creator>
			<dc:creator>Aigerim Yerimbetova</dc:creator>
			<dc:creator>Lyailya Cherikbayeva</dc:creator>
			<dc:creator>Bekturgan Akhmetov</dc:creator>
			<dc:creator>Nurzhigit Smailov</dc:creator>
			<dc:creator>Gulmira Shangytbayeva</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040243</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-21</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-21</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>243</prism:startingPage>
		<prism:doi>10.3390/technologies14040243</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/243</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/242">

	<title>Technologies, Vol. 14, Pages 242: A Novel Energy-Selective Surface Endowed with High Shielding Effectiveness by Using a Shape Memory Alloy</title>
	<link>https://www.mdpi.com/2227-7080/14/4/242</link>
	<description>In this paper, a novel high-shielding-effectiveness energy-selective surface (HSE&amp;amp;ndash;ESS) is proposed. In previous solutions regarding energy-selective surfaces (ESSs) presented in the literature, PIN diodes are usually employed as nonlinear transmission components; however, these diodes may be burnt by powerful high-power microwave (HPM) beams, causing ESSs to lose their shielding effectiveness (SE). To date, no studies have focused on maintaining the SE performance of ESSs after PIN diode failure. To address these limitations, we introduce shape memory alloys (SMAs) into ESS design. The consequences of PIN diode failure are offset by the physical deformation of SMA components caused by high-amplitude-current heating. This characteristic, featuring 30 dB SE, can be defined as high shielding effectiveness (HSE). After completing the design and performing accurate numerical simulations, we fabricated a prototype using PCB technology and characterized it in an anechoic environment, verifying the overall method. In particular, the SMA components proved to be an effective medium for guaranteeing electrical continuity under thermal stress conditions, thus paving the way for their extended adoption in ESSs by substituting or acting as a back-up for PIN diodes. Overall, this approach enhances the reliability and SE of ESSs by adding SMA components.</description>
	<pubDate>2026-04-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 242: A Novel Energy-Selective Surface Endowed with High Shielding Effectiveness by Using a Shape Memory Alloy</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/242">doi: 10.3390/technologies14040242</a></p>
	<p>Authors:
		Zongze Li
		Hang Yuan
		Wenxing Li
		Danilo Brizi
		Agostino Monorchio
		</p>
	<p>In this paper, a novel high-shielding-effectiveness energy-selective surface (HSE&amp;amp;ndash;ESS) is proposed. In previous solutions regarding energy-selective surfaces (ESSs) presented in the literature, PIN diodes are usually employed as nonlinear transmission components; however, these diodes may be burnt by powerful high-power microwave (HPM) beams, causing ESSs to lose their shielding effectiveness (SE). To date, no studies have focused on maintaining the SE performance of ESSs after PIN diode failure. To address these limitations, we introduce shape memory alloys (SMAs) into ESS design. The consequences of PIN diode failure are offset by the physical deformation of SMA components caused by high-amplitude-current heating. This characteristic, featuring 30 dB SE, can be defined as high shielding effectiveness (HSE). After completing the design and performing accurate numerical simulations, we fabricated a prototype using PCB technology and characterized it in an anechoic environment, verifying the overall method. In particular, the SMA components proved to be an effective medium for guaranteeing electrical continuity under thermal stress conditions, thus paving the way for their extended adoption in ESSs by substituting or acting as a back-up for PIN diodes. Overall, this approach enhances the reliability and SE of ESSs by adding SMA components.</p>
	]]></content:encoded>

	<dc:title>A Novel Energy-Selective Surface Endowed with High Shielding Effectiveness by Using a Shape Memory Alloy</dc:title>
			<dc:creator>Zongze Li</dc:creator>
			<dc:creator>Hang Yuan</dc:creator>
			<dc:creator>Wenxing Li</dc:creator>
			<dc:creator>Danilo Brizi</dc:creator>
			<dc:creator>Agostino Monorchio</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040242</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-21</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-21</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>242</prism:startingPage>
		<prism:doi>10.3390/technologies14040242</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/242</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/241">

	<title>Technologies, Vol. 14, Pages 241: Machine Learning in Personalized Medication Regimen Design for the Geriatric Population: Integrating Pharmacokinetic and Pharmacodynamic Modeling with Clinical Decision-Making</title>
	<link>https://www.mdpi.com/2227-7080/14/4/241</link>
	<description>Geriatric pharmacotherapy is usually challenged by physiological senescence. For instance, progressive declines in organ function and alterations in body composition can complicate drug disposition. However, conventional pharmacometrics models commonly have limited capacity to map these high-dimensional, nonlinear relationships. In this review, we are examining the recent shift toward integrating machine learning (ML) with mechanistic pharmacokinetic (PK)/pharmacodynamic (PD) models to improve the accuracy and precision of dosing. Machine learning approaches like Random Forest and XGBoost consistently provided more accurate exposure predictions and significantly more efficient computational workflows than conventional methods. Nevertheless, concerns such as &amp;amp;ldquo;black box&amp;amp;rdquo; transparency and the potential of algorithmic bias toward specific patient demographics are challenging. It is important to incorporate explainability tools like SHAP, and adopting FAIR data principles is crucial for achieving professional trust and ensuring site-specific generalizability.</description>
	<pubDate>2026-04-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 241: Machine Learning in Personalized Medication Regimen Design for the Geriatric Population: Integrating Pharmacokinetic and Pharmacodynamic Modeling with Clinical Decision-Making</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/241">doi: 10.3390/technologies14040241</a></p>
	<p>Authors:
		Ahmad R. Alsayed
		Mohanad Al-Darraji
		Mohannad Al-Qaiseiah
		Anas Samara
		Mustafa Al-Bayati
		</p>
	<p>Geriatric pharmacotherapy is usually challenged by physiological senescence. For instance, progressive declines in organ function and alterations in body composition can complicate drug disposition. However, conventional pharmacometrics models commonly have limited capacity to map these high-dimensional, nonlinear relationships. In this review, we are examining the recent shift toward integrating machine learning (ML) with mechanistic pharmacokinetic (PK)/pharmacodynamic (PD) models to improve the accuracy and precision of dosing. Machine learning approaches like Random Forest and XGBoost consistently provided more accurate exposure predictions and significantly more efficient computational workflows than conventional methods. Nevertheless, concerns such as &amp;amp;ldquo;black box&amp;amp;rdquo; transparency and the potential of algorithmic bias toward specific patient demographics are challenging. It is important to incorporate explainability tools like SHAP, and adopting FAIR data principles is crucial for achieving professional trust and ensuring site-specific generalizability.</p>
	]]></content:encoded>

	<dc:title>Machine Learning in Personalized Medication Regimen Design for the Geriatric Population: Integrating Pharmacokinetic and Pharmacodynamic Modeling with Clinical Decision-Making</dc:title>
			<dc:creator>Ahmad R. Alsayed</dc:creator>
			<dc:creator>Mohanad Al-Darraji</dc:creator>
			<dc:creator>Mohannad Al-Qaiseiah</dc:creator>
			<dc:creator>Anas Samara</dc:creator>
			<dc:creator>Mustafa Al-Bayati</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040241</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-21</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-21</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>241</prism:startingPage>
		<prism:doi>10.3390/technologies14040241</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/241</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/240">

	<title>Technologies, Vol. 14, Pages 240: EX-CCCII with Controlled Current Gain and Its Applications</title>
	<link>https://www.mdpi.com/2227-7080/14/4/240</link>
	<description>This paper presents a novel extra-X second-generation current-controlled conveyor (EX-CCCII) with controllable current gain. Unlike the conventional EX-CCCII, the proposed EX-CCCII provides a controllable current gain between the x- and z-terminals. To demonstrate the advantages of the EX-CCCII with the controllable current gain, the proposed EX-CCCII is employed to realize a universal current-mode filter and a three-phase current-mode oscillator. The universal filter can realize five standard filtering responses (low-pass, high-pass, band-pass, band-stop, and all-pass) using the same topology. The current gains of these filters can be controlled through the current gain of the EX-CCCII, while the natural frequency of the universal filter can be electronically tuned via the intrinsic resistance at the x-terminal. When the proposed EX-CCCII is used to implement the three-phase oscillator, the condition of oscillation can be adjusted through the current gain of the EX-CCCII, whereas the oscillation frequency can be tuned using the parasitic resistance of the x-terminals. The proposed EX-CCCII and its applications were verified through SPICE simulations using the transistor model parameters NR100N (NPN) and PR100N (PNP) of the bipolar array ALA400-CBIC-R from AT&amp;amp;amp;T to confirm the functionality and feasibility of the proposed topologies. Furthermore, experimental verification of the EX-CCCII and its integration into a three-phase oscillator further substantiates the proposed concept and demonstrates its practical viability.</description>
	<pubDate>2026-04-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 240: EX-CCCII with Controlled Current Gain and Its Applications</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/240">doi: 10.3390/technologies14040240</a></p>
	<p>Authors:
		Siraphop Tooprakai
		Fabian Khateb
		Tomasz Kulej
		Thanat Nonthaputha
		Jiri Vavra
		Montree Kumngern
		</p>
	<p>This paper presents a novel extra-X second-generation current-controlled conveyor (EX-CCCII) with controllable current gain. Unlike the conventional EX-CCCII, the proposed EX-CCCII provides a controllable current gain between the x- and z-terminals. To demonstrate the advantages of the EX-CCCII with the controllable current gain, the proposed EX-CCCII is employed to realize a universal current-mode filter and a three-phase current-mode oscillator. The universal filter can realize five standard filtering responses (low-pass, high-pass, band-pass, band-stop, and all-pass) using the same topology. The current gains of these filters can be controlled through the current gain of the EX-CCCII, while the natural frequency of the universal filter can be electronically tuned via the intrinsic resistance at the x-terminal. When the proposed EX-CCCII is used to implement the three-phase oscillator, the condition of oscillation can be adjusted through the current gain of the EX-CCCII, whereas the oscillation frequency can be tuned using the parasitic resistance of the x-terminals. The proposed EX-CCCII and its applications were verified through SPICE simulations using the transistor model parameters NR100N (NPN) and PR100N (PNP) of the bipolar array ALA400-CBIC-R from AT&amp;amp;amp;T to confirm the functionality and feasibility of the proposed topologies. Furthermore, experimental verification of the EX-CCCII and its integration into a three-phase oscillator further substantiates the proposed concept and demonstrates its practical viability.</p>
	]]></content:encoded>

	<dc:title>EX-CCCII with Controlled Current Gain and Its Applications</dc:title>
			<dc:creator>Siraphop Tooprakai</dc:creator>
			<dc:creator>Fabian Khateb</dc:creator>
			<dc:creator>Tomasz Kulej</dc:creator>
			<dc:creator>Thanat Nonthaputha</dc:creator>
			<dc:creator>Jiri Vavra</dc:creator>
			<dc:creator>Montree Kumngern</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040240</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-21</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-21</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>240</prism:startingPage>
		<prism:doi>10.3390/technologies14040240</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/240</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/239">

	<title>Technologies, Vol. 14, Pages 239: Deep Reinforcement Learning-Based Dual-Loop Adaptive Control Method and Simulation for Loitering Munition Fuze</title>
	<link>https://www.mdpi.com/2227-7080/14/4/239</link>
	<description>To address the poor adaptability and rigid initiation modes of the loitering munition fuze in complex environments and the inadequacy of single fuzzy control against strong interference, this paper proposes a dual-loop adaptive reconfiguration control method. The architecture integrates the Twin Delayed Deep Deterministic Policy Gradient (TD3) algorithm with fuzzy logic. The inner loop uses TD3 to dynamically optimize fuzzy scaling factors based on real-time interference and state deviations. Concurrently, the outer loop utilizes a Fuze Readiness Index (FRI) and a finite state machine to manage real-time multi-modal mission switching (e.g., proximity, delay, and airburst) and reverse safety-state conversions. Co-simulations under non-stationary composite interference show that the proposed method reduces the burst height RMSE by 82.4% and 61.6% compared with the fixed-threshold and standard fuzzy baselines under the considered non-stationary composite interference setting, respectively. The false alarm rate (FAR) is reduced to 0.15%, and the reconfiguration response time under sudden interference is shortened to 12 ms. Even under extreme conditions, such as 400 ms sensor signal loss, the relative error remains within 5%. These simulation results demonstrate the potential of the proposed architecture to improve precision, responsiveness, and robustness under dynamic interference conditions and show good robustness to intermittent observation loss within the simulated operating envelope.</description>
	<pubDate>2026-04-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 239: Deep Reinforcement Learning-Based Dual-Loop Adaptive Control Method and Simulation for Loitering Munition Fuze</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/239">doi: 10.3390/technologies14040239</a></p>
	<p>Authors:
		Lingyun Zhang
		Haojie Li
		Chuanhao Zhang
		Yuan Zhao
		Shixiang Qiao
		Hang Yu
		</p>
	<p>To address the poor adaptability and rigid initiation modes of the loitering munition fuze in complex environments and the inadequacy of single fuzzy control against strong interference, this paper proposes a dual-loop adaptive reconfiguration control method. The architecture integrates the Twin Delayed Deep Deterministic Policy Gradient (TD3) algorithm with fuzzy logic. The inner loop uses TD3 to dynamically optimize fuzzy scaling factors based on real-time interference and state deviations. Concurrently, the outer loop utilizes a Fuze Readiness Index (FRI) and a finite state machine to manage real-time multi-modal mission switching (e.g., proximity, delay, and airburst) and reverse safety-state conversions. Co-simulations under non-stationary composite interference show that the proposed method reduces the burst height RMSE by 82.4% and 61.6% compared with the fixed-threshold and standard fuzzy baselines under the considered non-stationary composite interference setting, respectively. The false alarm rate (FAR) is reduced to 0.15%, and the reconfiguration response time under sudden interference is shortened to 12 ms. Even under extreme conditions, such as 400 ms sensor signal loss, the relative error remains within 5%. These simulation results demonstrate the potential of the proposed architecture to improve precision, responsiveness, and robustness under dynamic interference conditions and show good robustness to intermittent observation loss within the simulated operating envelope.</p>
	]]></content:encoded>

	<dc:title>Deep Reinforcement Learning-Based Dual-Loop Adaptive Control Method and Simulation for Loitering Munition Fuze</dc:title>
			<dc:creator>Lingyun Zhang</dc:creator>
			<dc:creator>Haojie Li</dc:creator>
			<dc:creator>Chuanhao Zhang</dc:creator>
			<dc:creator>Yuan Zhao</dc:creator>
			<dc:creator>Shixiang Qiao</dc:creator>
			<dc:creator>Hang Yu</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040239</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-20</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-20</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>239</prism:startingPage>
		<prism:doi>10.3390/technologies14040239</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/239</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/238">

	<title>Technologies, Vol. 14, Pages 238: Multi-Scale Spectral Recurrent Network Based on Random Fourier Features for Wind Speed Forecasting</title>
	<link>https://www.mdpi.com/2227-7080/14/4/238</link>
	<description>Accurate wind speed forecasting is critical for reliable wind-power integration, yet it remains challenging due to the strongly non-stationary and inherently multi-scale nature of atmospheric processes. While deep learning models&amp;amp;mdash;such as LSTM, GRU, and Transformer architectures&amp;amp;mdash;achieve competitive short- and medium-term performance, they frequently suffer from spectral bias, hyperparameter sensitivity, and reduced generalization under heterogeneous operating regimes. To address these limitations, we propose a multi-scale spectral&amp;amp;ndash;recurrent framework, termed RFF-RNN, which integrates multi-band Random Fourier Feature (RFF) encodings with parameterizable recurrent backbones. A key innovation of our approach is the deliberate relaxation of strict shift-invariance constraints; by jointly optimizing spectral frequencies, phase biases, and bandwidth scales alongside the neural weights, the framework dynamically shapes a fully data-driven spectral embedding. To ensure robust adaptation, we employ a two-stage optimization strategy combining gradient-based inner-loop learning with outer-loop Bayesian hyperparameter tuning. Our extensive evaluations on a controlled synthetic benchmark and six geographically diverse real-world wind datasets (spanning the USA, China, and the Netherlands) demonstrate the superiority of the proposed framework. Statistical validation via the Friedman test confirms that RFF-enhanced models&amp;amp;mdash;particularly RFF-GRU and RFF-LSTM&amp;amp;mdash;systematically outperform standard recurrent networks and state-of-the-art Transformer architectures (Autoformer and FEDformer). The proposed approach yields significantly lower error metrics (MAE and RMSE) and higher explained variance (R2), while exhibiting remarkable resilience against error accumulation at extended forecasting horizons.</description>
	<pubDate>2026-04-18</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 238: Multi-Scale Spectral Recurrent Network Based on Random Fourier Features for Wind Speed Forecasting</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/238">doi: 10.3390/technologies14040238</a></p>
	<p>Authors:
		Eder Arley Leon-Gomez
		Víctor Elvira
		Jorge Iván Montes-Monsalve
		Andrés Marino Álvarez-Meza
		Alvaro Orozco-Gutierrez
		German Castellanos-Dominguez
		</p>
	<p>Accurate wind speed forecasting is critical for reliable wind-power integration, yet it remains challenging due to the strongly non-stationary and inherently multi-scale nature of atmospheric processes. While deep learning models&amp;amp;mdash;such as LSTM, GRU, and Transformer architectures&amp;amp;mdash;achieve competitive short- and medium-term performance, they frequently suffer from spectral bias, hyperparameter sensitivity, and reduced generalization under heterogeneous operating regimes. To address these limitations, we propose a multi-scale spectral&amp;amp;ndash;recurrent framework, termed RFF-RNN, which integrates multi-band Random Fourier Feature (RFF) encodings with parameterizable recurrent backbones. A key innovation of our approach is the deliberate relaxation of strict shift-invariance constraints; by jointly optimizing spectral frequencies, phase biases, and bandwidth scales alongside the neural weights, the framework dynamically shapes a fully data-driven spectral embedding. To ensure robust adaptation, we employ a two-stage optimization strategy combining gradient-based inner-loop learning with outer-loop Bayesian hyperparameter tuning. Our extensive evaluations on a controlled synthetic benchmark and six geographically diverse real-world wind datasets (spanning the USA, China, and the Netherlands) demonstrate the superiority of the proposed framework. Statistical validation via the Friedman test confirms that RFF-enhanced models&amp;amp;mdash;particularly RFF-GRU and RFF-LSTM&amp;amp;mdash;systematically outperform standard recurrent networks and state-of-the-art Transformer architectures (Autoformer and FEDformer). The proposed approach yields significantly lower error metrics (MAE and RMSE) and higher explained variance (R2), while exhibiting remarkable resilience against error accumulation at extended forecasting horizons.</p>
	]]></content:encoded>

	<dc:title>Multi-Scale Spectral Recurrent Network Based on Random Fourier Features for Wind Speed Forecasting</dc:title>
			<dc:creator>Eder Arley Leon-Gomez</dc:creator>
			<dc:creator>Víctor Elvira</dc:creator>
			<dc:creator>Jorge Iván Montes-Monsalve</dc:creator>
			<dc:creator>Andrés Marino Álvarez-Meza</dc:creator>
			<dc:creator>Alvaro Orozco-Gutierrez</dc:creator>
			<dc:creator>German Castellanos-Dominguez</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040238</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-18</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-18</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>238</prism:startingPage>
		<prism:doi>10.3390/technologies14040238</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/238</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/237">

	<title>Technologies, Vol. 14, Pages 237: Optimized Reconfigurable Intelligent Surfaces Configuration in Multiuser Wireless Networks via Fuzzy-Enhanced Pied Kingfisher Strategy</title>
	<link>https://www.mdpi.com/2227-7080/14/4/237</link>
	<description>This paper proposes a new fuzzified multi-objective wireless communication optimization model that maximizes the quantity and placement of Reconfigurable Intelligent Surfaces (RISs). In order to meet realistic deployment constraints like non-overlapping and acceptable location, the model aims to decrease the number of deployed RISs while raising the achievable rate. The Modified Pied Kingfisher Optimization Algorithm (MPKOA) is suggested as a solution to this intricate optimization issue. MPKOA features many significant improvements over the traditional Pied Kingfisher Optimization Algorithm (PKOA), such as energy-based motion control, adaptive subgrouping, flock cooperation, and memory-driven re-perching. These techniques speed up convergence, improve solution precision, reduce computation time, and balance exploration and exploitation. MPKOA performs better than standard PKOA, Enhanced version of PKOA (EPKO), Differential Evolution (DE), Grey Wolf Optimizer (GWO), and other existing algorithms, according to extensive comparisons. MPKOA can achieve up to 20% higher optimization values and 30% faster convergence, according to simulation data. In addition, the proposed MPKOA reduces computational complexity and runtime by about 50% when compared to standard PKOA-based approaches since it only requires single fitness evaluation per iteration. This enables the deployment of fewer RISs while still achieving higher communication rates. In multiuser wireless systems, MPKOA offers a robust and effective approach to RIS placement optimization, which helps to boost capacity and provide more energy-efficient 6G communication networks.</description>
	<pubDate>2026-04-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 237: Optimized Reconfigurable Intelligent Surfaces Configuration in Multiuser Wireless Networks via Fuzzy-Enhanced Pied Kingfisher Strategy</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/237">doi: 10.3390/technologies14040237</a></p>
	<p>Authors:
		Mona Gafar
		Shahenda Sarhan
		Abdullah M. Shaheen
		Ahmed S. Alwakeel
		</p>
	<p>This paper proposes a new fuzzified multi-objective wireless communication optimization model that maximizes the quantity and placement of Reconfigurable Intelligent Surfaces (RISs). In order to meet realistic deployment constraints like non-overlapping and acceptable location, the model aims to decrease the number of deployed RISs while raising the achievable rate. The Modified Pied Kingfisher Optimization Algorithm (MPKOA) is suggested as a solution to this intricate optimization issue. MPKOA features many significant improvements over the traditional Pied Kingfisher Optimization Algorithm (PKOA), such as energy-based motion control, adaptive subgrouping, flock cooperation, and memory-driven re-perching. These techniques speed up convergence, improve solution precision, reduce computation time, and balance exploration and exploitation. MPKOA performs better than standard PKOA, Enhanced version of PKOA (EPKO), Differential Evolution (DE), Grey Wolf Optimizer (GWO), and other existing algorithms, according to extensive comparisons. MPKOA can achieve up to 20% higher optimization values and 30% faster convergence, according to simulation data. In addition, the proposed MPKOA reduces computational complexity and runtime by about 50% when compared to standard PKOA-based approaches since it only requires single fitness evaluation per iteration. This enables the deployment of fewer RISs while still achieving higher communication rates. In multiuser wireless systems, MPKOA offers a robust and effective approach to RIS placement optimization, which helps to boost capacity and provide more energy-efficient 6G communication networks.</p>
	]]></content:encoded>

	<dc:title>Optimized Reconfigurable Intelligent Surfaces Configuration in Multiuser Wireless Networks via Fuzzy-Enhanced Pied Kingfisher Strategy</dc:title>
			<dc:creator>Mona Gafar</dc:creator>
			<dc:creator>Shahenda Sarhan</dc:creator>
			<dc:creator>Abdullah M. Shaheen</dc:creator>
			<dc:creator>Ahmed S. Alwakeel</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040237</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-17</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-17</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>237</prism:startingPage>
		<prism:doi>10.3390/technologies14040237</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/237</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/236">

	<title>Technologies, Vol. 14, Pages 236: Reproducible System Innovation in DICOM Mammography Processing with Pixel-Monotonic Dynamic Range Control</title>
	<link>https://www.mdpi.com/2227-7080/14/4/236</link>
	<description>This paper presents a reproducible system innovation for processing Digital Imaging and Communications in Medicine (DICOM) mammography images based on pixel-monotonic dynamic range management and engineering-verifiable intensity transformations. Standard DICOM conversion schemes to 8-bit representation often result in irreversible luminance-range compression, locality-dependent contrast distortions, and reduced robustness of deep learning models. The proposed framework preserves the physical consistency of the Modality LUT and photometric polarity, performs breast-aware robust Winsor normalization, and applies strictly monotonic global tone mapping while preserving the 16-bit depth of the training data. System validation was performed using architecture-independent metrics. Compared to standard processing, the median value of normalized mutual information increased from 0.878 to 0.892, the effective number of bits increased from 7.88 to 10.11 (+2.25), the representation entropy increased by 1.42 bits, and the clipping rate was reduced to almost zero. Experiments with the Faster R-CNN detector showed stable or improved calcification localization at Intersection over Union (IoU) &amp;amp;ge; 0.5 under controlled augmentation conditions. The results confirm that pixel-monotonic dynamic range control provides a reproducible, engineering-verifiable basis for AI-based mammography analysis within the evaluated dataset and experimental setting.</description>
	<pubDate>2026-04-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 236: Reproducible System Innovation in DICOM Mammography Processing with Pixel-Monotonic Dynamic Range Control</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/236">doi: 10.3390/technologies14040236</a></p>
	<p>Authors:
		Gulzira Abdikerimova
		Moldir Yessenova
		Ainur Shekerbek
		Ainur Orynbayeva
		Balkiya Zhylanbaeva
		Gulbarshin Rakhimbayeva
		Aisulu Ismailova
		Kuanysh Kadirkulov
		Zhanat Manbetova
		</p>
	<p>This paper presents a reproducible system innovation for processing Digital Imaging and Communications in Medicine (DICOM) mammography images based on pixel-monotonic dynamic range management and engineering-verifiable intensity transformations. Standard DICOM conversion schemes to 8-bit representation often result in irreversible luminance-range compression, locality-dependent contrast distortions, and reduced robustness of deep learning models. The proposed framework preserves the physical consistency of the Modality LUT and photometric polarity, performs breast-aware robust Winsor normalization, and applies strictly monotonic global tone mapping while preserving the 16-bit depth of the training data. System validation was performed using architecture-independent metrics. Compared to standard processing, the median value of normalized mutual information increased from 0.878 to 0.892, the effective number of bits increased from 7.88 to 10.11 (+2.25), the representation entropy increased by 1.42 bits, and the clipping rate was reduced to almost zero. Experiments with the Faster R-CNN detector showed stable or improved calcification localization at Intersection over Union (IoU) &amp;amp;ge; 0.5 under controlled augmentation conditions. The results confirm that pixel-monotonic dynamic range control provides a reproducible, engineering-verifiable basis for AI-based mammography analysis within the evaluated dataset and experimental setting.</p>
	]]></content:encoded>

	<dc:title>Reproducible System Innovation in DICOM Mammography Processing with Pixel-Monotonic Dynamic Range Control</dc:title>
			<dc:creator>Gulzira Abdikerimova</dc:creator>
			<dc:creator>Moldir Yessenova</dc:creator>
			<dc:creator>Ainur Shekerbek</dc:creator>
			<dc:creator>Ainur Orynbayeva</dc:creator>
			<dc:creator>Balkiya Zhylanbaeva</dc:creator>
			<dc:creator>Gulbarshin Rakhimbayeva</dc:creator>
			<dc:creator>Aisulu Ismailova</dc:creator>
			<dc:creator>Kuanysh Kadirkulov</dc:creator>
			<dc:creator>Zhanat Manbetova</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040236</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-17</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-17</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>236</prism:startingPage>
		<prism:doi>10.3390/technologies14040236</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/236</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/235">

	<title>Technologies, Vol. 14, Pages 235: Lossless Reversible Color Image Encryption Using Multilayer Hybrid Chaos with Gram&amp;ndash;Schmidt Orthogonalization and ChaCha20-HMAC-Authenticated Transport</title>
	<link>https://www.mdpi.com/2227-7080/14/4/235</link>
	<description>In this study, a hybrid multi-layer scheme for reversible color image encryption is proposed, ensuring lossless reconstruction and strong cryptographic security concurrently. This method consists of three main stages. First, session-specific keys are generated using HKDF-SHA256 along with a timestamp-based mechanism to prevent replay attacks and support dynamic key management. Second, a four-layer confusion&amp;amp;ndash;diffusion structure is applied. It uses Gram&amp;amp;ndash;Schmidt orthogonal matrices, integer-based PWLCM chaotic mapping, the Hill cipher, and dynamically created S-Boxes. These operations rely on integer modular arithmetic &amp;amp;#8484;256 and Q16.16 fixed-point precision. Finally, ChaCha20 stream encryption with HMAC-SHA256 authentication is used to secure data transmission in distributed environments. Experimental tests conducted on standard images show strong cryptographic performance, including near-ideal entropy (7.9993 bits), a significant avalanche effect (NPCR&amp;amp;nbsp;99.6%, UACI&amp;amp;nbsp;33.4%), and very low pixel correlation. The method achieves perfect lossless reconstruction and provides an effective key space &amp;amp;ge;2128. These results confirm the suitability of the proposed scheme for secure image protection in applications requiring bit-exact recovery, such as medical imaging, digital forensics, and satellite communications.</description>
	<pubDate>2026-04-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 235: Lossless Reversible Color Image Encryption Using Multilayer Hybrid Chaos with Gram&amp;ndash;Schmidt Orthogonalization and ChaCha20-HMAC-Authenticated Transport</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/235">doi: 10.3390/technologies14040235</a></p>
	<p>Authors:
		Saadia Drissi
		Faiq Gmira
		Meriyem Chergui
		</p>
	<p>In this study, a hybrid multi-layer scheme for reversible color image encryption is proposed, ensuring lossless reconstruction and strong cryptographic security concurrently. This method consists of three main stages. First, session-specific keys are generated using HKDF-SHA256 along with a timestamp-based mechanism to prevent replay attacks and support dynamic key management. Second, a four-layer confusion&amp;amp;ndash;diffusion structure is applied. It uses Gram&amp;amp;ndash;Schmidt orthogonal matrices, integer-based PWLCM chaotic mapping, the Hill cipher, and dynamically created S-Boxes. These operations rely on integer modular arithmetic &amp;amp;#8484;256 and Q16.16 fixed-point precision. Finally, ChaCha20 stream encryption with HMAC-SHA256 authentication is used to secure data transmission in distributed environments. Experimental tests conducted on standard images show strong cryptographic performance, including near-ideal entropy (7.9993 bits), a significant avalanche effect (NPCR&amp;amp;nbsp;99.6%, UACI&amp;amp;nbsp;33.4%), and very low pixel correlation. The method achieves perfect lossless reconstruction and provides an effective key space &amp;amp;ge;2128. These results confirm the suitability of the proposed scheme for secure image protection in applications requiring bit-exact recovery, such as medical imaging, digital forensics, and satellite communications.</p>
	]]></content:encoded>

	<dc:title>Lossless Reversible Color Image Encryption Using Multilayer Hybrid Chaos with Gram&amp;amp;ndash;Schmidt Orthogonalization and ChaCha20-HMAC-Authenticated Transport</dc:title>
			<dc:creator>Saadia Drissi</dc:creator>
			<dc:creator>Faiq Gmira</dc:creator>
			<dc:creator>Meriyem Chergui</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040235</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-16</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-16</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>235</prism:startingPage>
		<prism:doi>10.3390/technologies14040235</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/235</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/234">

	<title>Technologies, Vol. 14, Pages 234: Enhancing Distribution Network Performance with Coordinated PV and D-STATCOM Compensation Under Fixed and Variable Reactive Power Modes</title>
	<link>https://www.mdpi.com/2227-7080/14/4/234</link>
	<description>This paper addresses the optimal management of photovoltaic (PV) systems and distribution static synchronous compensators (D-STATCOMs) in modern electrical distribution networks. A mixed-integer nonlinear programming (MINLP) model is formulated which co-optimizes device placement, sizing, and multi-period dispatch to minimize the total annualized system costs while satisfying AC power flow and operational constraints. To solve this challenging problem, a decomposition methodology is proposed, wherein the binary location decisions for the PVs and D-STATCOMs are treated as predefined inputs, upon the basis of site selections commonly reported in the literature. With the integer variables fixed, the problem is reduced to a continuous nonlinear programming (NLP) subproblem for optimal capacity sizing and operational scheduling, which is solved using the interior point optimizer (IPOPT) via the Julia/JuMP environment. The core contribution of this work lies in its comprehensive demonstration of the economic superiority of variable reactive power injection over conventional fixed compensation schemes. Through numerical validation on standard 33- and 69-bus test systems, it is shown that a variable D-STATCOM operation yields substantial and consistent economic gains. Compared to optimized fixed-injection solutions, variable injection provides additional annual savings averaging USD 120,516 (33-bus feeder) and USD 125,620 (69-bus grid), corresponding to a further 3.4% reduction in total costs. These benefits prove robust across different device location sets identified by various metaheuristic algorithms, and they scale effectively to larger network topologies. The results demonstrate that transitioning to variable power injection is not merely an incremental improvement but a fundamental advancement for achieving techno-economic optimality in distribution system planning. The proposed methodology provides utilities with a computationally efficient framework for determining near-optimal PV and D-STATCOM management strategies by first fixing deployment locations based on established planning insights and then rigorously optimizing sizing and dispatch, in order to maximize economic returns while ensuring reliable network operation.</description>
	<pubDate>2026-04-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 234: Enhancing Distribution Network Performance with Coordinated PV and D-STATCOM Compensation Under Fixed and Variable Reactive Power Modes</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/234">doi: 10.3390/technologies14040234</a></p>
	<p>Authors:
		Oscar Danilo Montoya
		Luis Fernando Grisales-Noreña
		Diego Armando Giral-Ramírez
		</p>
	<p>This paper addresses the optimal management of photovoltaic (PV) systems and distribution static synchronous compensators (D-STATCOMs) in modern electrical distribution networks. A mixed-integer nonlinear programming (MINLP) model is formulated which co-optimizes device placement, sizing, and multi-period dispatch to minimize the total annualized system costs while satisfying AC power flow and operational constraints. To solve this challenging problem, a decomposition methodology is proposed, wherein the binary location decisions for the PVs and D-STATCOMs are treated as predefined inputs, upon the basis of site selections commonly reported in the literature. With the integer variables fixed, the problem is reduced to a continuous nonlinear programming (NLP) subproblem for optimal capacity sizing and operational scheduling, which is solved using the interior point optimizer (IPOPT) via the Julia/JuMP environment. The core contribution of this work lies in its comprehensive demonstration of the economic superiority of variable reactive power injection over conventional fixed compensation schemes. Through numerical validation on standard 33- and 69-bus test systems, it is shown that a variable D-STATCOM operation yields substantial and consistent economic gains. Compared to optimized fixed-injection solutions, variable injection provides additional annual savings averaging USD 120,516 (33-bus feeder) and USD 125,620 (69-bus grid), corresponding to a further 3.4% reduction in total costs. These benefits prove robust across different device location sets identified by various metaheuristic algorithms, and they scale effectively to larger network topologies. The results demonstrate that transitioning to variable power injection is not merely an incremental improvement but a fundamental advancement for achieving techno-economic optimality in distribution system planning. The proposed methodology provides utilities with a computationally efficient framework for determining near-optimal PV and D-STATCOM management strategies by first fixing deployment locations based on established planning insights and then rigorously optimizing sizing and dispatch, in order to maximize economic returns while ensuring reliable network operation.</p>
	]]></content:encoded>

	<dc:title>Enhancing Distribution Network Performance with Coordinated PV and D-STATCOM Compensation Under Fixed and Variable Reactive Power Modes</dc:title>
			<dc:creator>Oscar Danilo Montoya</dc:creator>
			<dc:creator>Luis Fernando Grisales-Noreña</dc:creator>
			<dc:creator>Diego Armando Giral-Ramírez</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040234</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-16</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-16</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>234</prism:startingPage>
		<prism:doi>10.3390/technologies14040234</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/234</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/233">

	<title>Technologies, Vol. 14, Pages 233: CAPG: Context-Aware Perturbation Generation for Multi-Label Adversarial Attacks</title>
	<link>https://www.mdpi.com/2227-7080/14/4/233</link>
	<description>Multi-label deep learning models are widely used in real-world applications where predictions depend on the joint presence of several semantically correlated labels. However, existing adversarial attacks largely overlook these inter-label dependencies, often perturbing outputs indiscriminately and producing structurally implausible or easily detectable changes. This paper presents CAPG (Context-Aware Perturbation Generation), a white-box, label-space targeted adversarial framework for generating selective and contextually consistent perturbations in multi-label settings. CAPG incorporates correlation-weighted regularization into the adversarial objective, enabling targeted manipulation of specific labels while preserving the contextual integrity of non-target outputs. Using the Pascal VOC 2012 dataset and a ResNet-101 multi-label classifier, we show that CAPG achieves higher Attack Success Rates (ASR) and substantially improved Contextual Consistency Scores (CCSs) than FGSM, PGD, CW, and DeepFool under identical perturbation budgets. CAPG also produces lower perceptual distortion, yielding adversarial examples that better preserve contextual structure. These results highlight the importance of correlation-aware adversarial evaluation for assessing the robustness of modern multi-label deep learning systems.</description>
	<pubDate>2026-04-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 233: CAPG: Context-Aware Perturbation Generation for Multi-Label Adversarial Attacks</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/233">doi: 10.3390/technologies14040233</a></p>
	<p>Authors:
		Aidos Askhatuly
		Dinara Berdysheva
		Azamat Berdyshev
		Aigul Adamova
		Didar Yedilkhan
		</p>
	<p>Multi-label deep learning models are widely used in real-world applications where predictions depend on the joint presence of several semantically correlated labels. However, existing adversarial attacks largely overlook these inter-label dependencies, often perturbing outputs indiscriminately and producing structurally implausible or easily detectable changes. This paper presents CAPG (Context-Aware Perturbation Generation), a white-box, label-space targeted adversarial framework for generating selective and contextually consistent perturbations in multi-label settings. CAPG incorporates correlation-weighted regularization into the adversarial objective, enabling targeted manipulation of specific labels while preserving the contextual integrity of non-target outputs. Using the Pascal VOC 2012 dataset and a ResNet-101 multi-label classifier, we show that CAPG achieves higher Attack Success Rates (ASR) and substantially improved Contextual Consistency Scores (CCSs) than FGSM, PGD, CW, and DeepFool under identical perturbation budgets. CAPG also produces lower perceptual distortion, yielding adversarial examples that better preserve contextual structure. These results highlight the importance of correlation-aware adversarial evaluation for assessing the robustness of modern multi-label deep learning systems.</p>
	]]></content:encoded>

	<dc:title>CAPG: Context-Aware Perturbation Generation for Multi-Label Adversarial Attacks</dc:title>
			<dc:creator>Aidos Askhatuly</dc:creator>
			<dc:creator>Dinara Berdysheva</dc:creator>
			<dc:creator>Azamat Berdyshev</dc:creator>
			<dc:creator>Aigul Adamova</dc:creator>
			<dc:creator>Didar Yedilkhan</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040233</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-16</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-16</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>233</prism:startingPage>
		<prism:doi>10.3390/technologies14040233</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/233</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/231">

	<title>Technologies, Vol. 14, Pages 231: Understanding Maritime Traffic Complexity: A Comprehensive Concept Development Review</title>
	<link>https://www.mdpi.com/2227-7080/14/4/231</link>
	<description>Maritime traffic complexity (MTC) is a term that has gained increased importance in the last decade in the maritime safety domain. It is a concept for understanding navigational safety and operational challenges in congested maritime environments. Although research interest in MTC has grown, it is a concept that remains fragmented, with various interpretations of definitions, indicators, and modeling approaches present in the literature. This study presents a comprehensive literature review and bibliometric analysis to synthesize the current state of research on MTC as a scientific construct and clarify its conceptual foundations from an analytical perspective. In accordance with PRISMA guidelines and systematic literature review (SLR) methodology, relevant studies were identified and screened across major scientific databases. A detailed analysis was conducted on 40 scientific publications. The findings indicate that most existing MTC models rely mainly on Automatic Identification System (AIS) data and corresponding derived metrics. MTC is primarily assessed through geometric vessel&amp;amp;ndash;vessel interactions, relative motion parameters, and collision-risk indicators. Bibliometric analysis demonstrates a rapid increase in scientific interest in this topic since 2015, with research concentrated in several leading journals. The study identifies a significant methodological limitation in current frameworks, which often overlook the heterogeneity of marine traffic, environmental conditions, vessel reliability, and human factors. Therefore, this study highlights the need for a more comprehensive MTC evaluation framework that incorporates operational, geographical constraint-based, environmental, and behavioral variables alongside traditional AIS-based metrics.</description>
	<pubDate>2026-04-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 231: Understanding Maritime Traffic Complexity: A Comprehensive Concept Development Review</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/231">doi: 10.3390/technologies14040231</a></p>
	<p>Authors:
		Vice Milin
		Branko Lalić
		Tatjana Stanivuk
		Matko Maleš
		</p>
	<p>Maritime traffic complexity (MTC) is a term that has gained increased importance in the last decade in the maritime safety domain. It is a concept for understanding navigational safety and operational challenges in congested maritime environments. Although research interest in MTC has grown, it is a concept that remains fragmented, with various interpretations of definitions, indicators, and modeling approaches present in the literature. This study presents a comprehensive literature review and bibliometric analysis to synthesize the current state of research on MTC as a scientific construct and clarify its conceptual foundations from an analytical perspective. In accordance with PRISMA guidelines and systematic literature review (SLR) methodology, relevant studies were identified and screened across major scientific databases. A detailed analysis was conducted on 40 scientific publications. The findings indicate that most existing MTC models rely mainly on Automatic Identification System (AIS) data and corresponding derived metrics. MTC is primarily assessed through geometric vessel&amp;amp;ndash;vessel interactions, relative motion parameters, and collision-risk indicators. Bibliometric analysis demonstrates a rapid increase in scientific interest in this topic since 2015, with research concentrated in several leading journals. The study identifies a significant methodological limitation in current frameworks, which often overlook the heterogeneity of marine traffic, environmental conditions, vessel reliability, and human factors. Therefore, this study highlights the need for a more comprehensive MTC evaluation framework that incorporates operational, geographical constraint-based, environmental, and behavioral variables alongside traditional AIS-based metrics.</p>
	]]></content:encoded>

	<dc:title>Understanding Maritime Traffic Complexity: A Comprehensive Concept Development Review</dc:title>
			<dc:creator>Vice Milin</dc:creator>
			<dc:creator>Branko Lalić</dc:creator>
			<dc:creator>Tatjana Stanivuk</dc:creator>
			<dc:creator>Matko Maleš</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040231</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-16</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-16</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>231</prism:startingPage>
		<prism:doi>10.3390/technologies14040231</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/231</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/232">

	<title>Technologies, Vol. 14, Pages 232: Conformal Predictions for Visual Animal Identification</title>
	<link>https://www.mdpi.com/2227-7080/14/4/232</link>
	<description>Neural network-based visual identification of animals has significant potential for livestock farming and herd management. Real farm environments rarely provide controlled visual conditions for high-quality dataset collection, which often leads to reduced model performance on out-of-distribution inputs and makes confidence estimation essential for reliable application. This work introduces a conformal prediction framework for animal identification based on pretrained neural network embeddings (ResNet-50 and Swin Transformer), enabling the generation of prediction sets with formal confidence guarantees. By calibrating a nonconformity score derived from cosine distances in the embedding space, the method ensures that the true identity is included in the prediction set at a user-defined confidence level. Three nonconformity scoring functions are evaluated to determine which produces the most compact prediction sets. Experiments on cow and goat datasets demonstrate that the framework achieves empirical coverage close to the target confidence levels across different embedding models. The ratio-based nonconformity measure consistently outperforms others, reducing mean set sizes by up to 79% compared to alternative measures. Swin-T embeddings outperform ResNet-50 by up to 14 percentage points in singleton prediction rate. The proposed framework preserves formal validity guarantees, improving robustness and interpretability in practical livestock applications where standard identification methods return only the nearest match without reliability estimates.</description>
	<pubDate>2026-04-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 232: Conformal Predictions for Visual Animal Identification</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/232">doi: 10.3390/technologies14040232</a></p>
	<p>Authors:
		Alexander Marazov
		Gergana Balieva
		Dimitar Tanchev
		Ivanka Lazarova
		Ralitsa Rankova
		</p>
	<p>Neural network-based visual identification of animals has significant potential for livestock farming and herd management. Real farm environments rarely provide controlled visual conditions for high-quality dataset collection, which often leads to reduced model performance on out-of-distribution inputs and makes confidence estimation essential for reliable application. This work introduces a conformal prediction framework for animal identification based on pretrained neural network embeddings (ResNet-50 and Swin Transformer), enabling the generation of prediction sets with formal confidence guarantees. By calibrating a nonconformity score derived from cosine distances in the embedding space, the method ensures that the true identity is included in the prediction set at a user-defined confidence level. Three nonconformity scoring functions are evaluated to determine which produces the most compact prediction sets. Experiments on cow and goat datasets demonstrate that the framework achieves empirical coverage close to the target confidence levels across different embedding models. The ratio-based nonconformity measure consistently outperforms others, reducing mean set sizes by up to 79% compared to alternative measures. Swin-T embeddings outperform ResNet-50 by up to 14 percentage points in singleton prediction rate. The proposed framework preserves formal validity guarantees, improving robustness and interpretability in practical livestock applications where standard identification methods return only the nearest match without reliability estimates.</p>
	]]></content:encoded>

	<dc:title>Conformal Predictions for Visual Animal Identification</dc:title>
			<dc:creator>Alexander Marazov</dc:creator>
			<dc:creator>Gergana Balieva</dc:creator>
			<dc:creator>Dimitar Tanchev</dc:creator>
			<dc:creator>Ivanka Lazarova</dc:creator>
			<dc:creator>Ralitsa Rankova</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040232</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-16</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-16</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>232</prism:startingPage>
		<prism:doi>10.3390/technologies14040232</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/232</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/230">

	<title>Technologies, Vol. 14, Pages 230: Data Science and Big Data in Biology, Physical Science and Engineering&amp;mdash;2nd Edition</title>
	<link>https://www.mdpi.com/2227-7080/14/4/230</link>
	<description>Big Data analysis [...]</description>
	<pubDate>2026-04-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 230: Data Science and Big Data in Biology, Physical Science and Engineering&amp;mdash;2nd Edition</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/230">doi: 10.3390/technologies14040230</a></p>
	<p>Authors:
		Mohammed Mahmoud
		</p>
	<p>Big Data analysis [...]</p>
	]]></content:encoded>

	<dc:title>Data Science and Big Data in Biology, Physical Science and Engineering&amp;amp;mdash;2nd Edition</dc:title>
			<dc:creator>Mohammed Mahmoud</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040230</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-15</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-15</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Editorial</prism:section>
	<prism:startingPage>230</prism:startingPage>
		<prism:doi>10.3390/technologies14040230</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/230</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/229">

	<title>Technologies, Vol. 14, Pages 229: The Ethical Double-Edged Sword: A Framework for Dignity-by-Design in Gerontological Assistive Technologies</title>
	<link>https://www.mdpi.com/2227-7080/14/4/229</link>
	<description>The institutional drive to deploy digital assistive technologies&amp;amp;mdash;from IoT monitoring to AI companions&amp;amp;mdash;as a solution to the aging care crisis functions as an ethical double-edged sword. This article argues that beyond isolated risks, these technologies introduce a systemic tension where gains in safety and efficiency often come at the cost of autonomy, human connection, and equity. We propose a critical framework that diagnoses four interconnected dimensions of this tension: (1) the erosion of privacy and autonomy through pervasive surveillance; (2) the risk of dehumanization in high-tech, low-touch interactions; (3) the &amp;amp;ldquo;digital grey divide&amp;amp;rdquo; as a social determinant of health; and (4) the perpetuation of &amp;amp;ldquo;coded ageism&amp;amp;rdquo; through algorithmic bias. To bridge the gap between ethical principle and practice, the framework translates this diagnosis into a practical roadmap for &amp;amp;ldquo;Dignity-by-Design.&amp;amp;rdquo; It operationalizes person-centered care through three actionable shifts: moving from compliance to commitment, replacing static consent with dynamic engagement, and establishing the lived experience of older adults and caregivers as a core design standard via participatory action research. Ultimately, this work provides a critical tool for researchers, developers, and policymakers to guide the ethically aligned implementation of technologies that truly enhance autonomy, foster trust, and uphold dignity in geriatric care.</description>
	<pubDate>2026-04-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 229: The Ethical Double-Edged Sword: A Framework for Dignity-by-Design in Gerontological Assistive Technologies</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/229">doi: 10.3390/technologies14040229</a></p>
	<p>Authors:
		Francisco Nieto-Escamez
		Cleiton Ferreira
		</p>
	<p>The institutional drive to deploy digital assistive technologies&amp;amp;mdash;from IoT monitoring to AI companions&amp;amp;mdash;as a solution to the aging care crisis functions as an ethical double-edged sword. This article argues that beyond isolated risks, these technologies introduce a systemic tension where gains in safety and efficiency often come at the cost of autonomy, human connection, and equity. We propose a critical framework that diagnoses four interconnected dimensions of this tension: (1) the erosion of privacy and autonomy through pervasive surveillance; (2) the risk of dehumanization in high-tech, low-touch interactions; (3) the &amp;amp;ldquo;digital grey divide&amp;amp;rdquo; as a social determinant of health; and (4) the perpetuation of &amp;amp;ldquo;coded ageism&amp;amp;rdquo; through algorithmic bias. To bridge the gap between ethical principle and practice, the framework translates this diagnosis into a practical roadmap for &amp;amp;ldquo;Dignity-by-Design.&amp;amp;rdquo; It operationalizes person-centered care through three actionable shifts: moving from compliance to commitment, replacing static consent with dynamic engagement, and establishing the lived experience of older adults and caregivers as a core design standard via participatory action research. Ultimately, this work provides a critical tool for researchers, developers, and policymakers to guide the ethically aligned implementation of technologies that truly enhance autonomy, foster trust, and uphold dignity in geriatric care.</p>
	]]></content:encoded>

	<dc:title>The Ethical Double-Edged Sword: A Framework for Dignity-by-Design in Gerontological Assistive Technologies</dc:title>
			<dc:creator>Francisco Nieto-Escamez</dc:creator>
			<dc:creator>Cleiton Ferreira</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040229</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-14</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-14</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>229</prism:startingPage>
		<prism:doi>10.3390/technologies14040229</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/229</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/227">

	<title>Technologies, Vol. 14, Pages 227: Selecting a Cybersecurity Risk Analysis Methodology for MSMEs Using a Multi-Criteria Method (AHP)</title>
	<link>https://www.mdpi.com/2227-7080/14/4/227</link>
	<description>In the current context of digital transformation, Micro-, Small-, and Medium-Sized Enterprises (MSMEs) are increasingly exposed to cybersecurity risks. This exposure is intensified by the limited adoption of international standards for identifying impacts, low budgets, and shortages of trained personnel, which collectively result in the absence of structured control plans for mitigating cyber risks. (1) This study proposes a mechanism for selecting a cybersecurity risk analysis and management methodology suited to Colombian MSMEs by applying the multi-criteria Analytic Hierarchy Process (AHP) method. (2) The employed approach is qualitative and follows the AHP procedure to select the most suitable option that can be applied to cybersecurity. This selection process evaluated different criteria in five standards: ISO/IEC 27005:2022, NIST SP 800-30, OCTAVE-S, MAGERIT, and EBIOS-RM. (3) The AHP method enabled, in a practical manner, the selection of OCTAVE-S as the primary methodology, complemented with elements from other standards. Finally, the proposed methodology was implemented in a cloud-based web application called the Risk Analysis Module, integrated into the Keru IT security platform. It is concluded that the multi-criteria AHP method is effective and allows organizations to select the standards most appropriate to their needs, with potential applicability to other types of decisions.</description>
	<pubDate>2026-04-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 227: Selecting a Cybersecurity Risk Analysis Methodology for MSMEs Using a Multi-Criteria Method (AHP)</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/227">doi: 10.3390/technologies14040227</a></p>
	<p>Authors:
		Gabriel Enrique Taborda Blandon
		Juan Fernando Hurtado Rivera
		Javier Mauricio Durán Vásquez
		Maria José Monsalve Ruiz
		Marco Tulio Silva Castillo
		Hector Fernando Vargas Montoya
		</p>
	<p>In the current context of digital transformation, Micro-, Small-, and Medium-Sized Enterprises (MSMEs) are increasingly exposed to cybersecurity risks. This exposure is intensified by the limited adoption of international standards for identifying impacts, low budgets, and shortages of trained personnel, which collectively result in the absence of structured control plans for mitigating cyber risks. (1) This study proposes a mechanism for selecting a cybersecurity risk analysis and management methodology suited to Colombian MSMEs by applying the multi-criteria Analytic Hierarchy Process (AHP) method. (2) The employed approach is qualitative and follows the AHP procedure to select the most suitable option that can be applied to cybersecurity. This selection process evaluated different criteria in five standards: ISO/IEC 27005:2022, NIST SP 800-30, OCTAVE-S, MAGERIT, and EBIOS-RM. (3) The AHP method enabled, in a practical manner, the selection of OCTAVE-S as the primary methodology, complemented with elements from other standards. Finally, the proposed methodology was implemented in a cloud-based web application called the Risk Analysis Module, integrated into the Keru IT security platform. It is concluded that the multi-criteria AHP method is effective and allows organizations to select the standards most appropriate to their needs, with potential applicability to other types of decisions.</p>
	]]></content:encoded>

	<dc:title>Selecting a Cybersecurity Risk Analysis Methodology for MSMEs Using a Multi-Criteria Method (AHP)</dc:title>
			<dc:creator>Gabriel Enrique Taborda Blandon</dc:creator>
			<dc:creator>Juan Fernando Hurtado Rivera</dc:creator>
			<dc:creator>Javier Mauricio Durán Vásquez</dc:creator>
			<dc:creator>Maria José Monsalve Ruiz</dc:creator>
			<dc:creator>Marco Tulio Silva Castillo</dc:creator>
			<dc:creator>Hector Fernando Vargas Montoya</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040227</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-14</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-14</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>227</prism:startingPage>
		<prism:doi>10.3390/technologies14040227</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/227</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/228">

	<title>Technologies, Vol. 14, Pages 228: A Human&amp;ndash;AI Collaborative Pipeline for Decision Support in Urban Development Projects Based on Large-Scale Social Media Text Analysis</title>
	<link>https://www.mdpi.com/2227-7080/14/4/228</link>
	<description>The rapid growth of digital communication platforms has generated vast volumes of user-generated textual data and digital footprints, creating growing demand for scalable artificial intelligence systems capable of supporting evidence-based decision-making. This study proposes and evaluates a human&amp;amp;ndash;AI collaborative analytical pipeline for multi-class sentiment and aggression analysis of large-scale social media data (N = 15,064 messages) related to an urban infrastructure project. The proposed framework integrates standard NLP preprocessing, machine learning-based classifiers, temporal aggregation, and controlled large language model (LLM)-assisted classification within a structured analytical workflow that incorporates expert validation and oversight. A stratified manual validation procedure (n = 301) demonstrated substantial inter-annotator agreement (&amp;amp;kappa; = 0.70) and stable multi-class classification accuracy (80%). The results indicate that combining sentiment polarity and aggression detection as complementary linguistic indicators improves sensitivity to shifts in discourse dynamics and enables early identification of emerging social tension. The study demonstrates the potential of human&amp;amp;ndash;AI collaborative analytical frameworks for transparent, interpretable, and predictive large-scale social media analysis in decision-support contexts.</description>
	<pubDate>2026-04-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 228: A Human&amp;ndash;AI Collaborative Pipeline for Decision Support in Urban Development Projects Based on Large-Scale Social Media Text Analysis</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/228">doi: 10.3390/technologies14040228</a></p>
	<p>Authors:
		Alexander A. Kharlamov
		Maria Pilgun
		</p>
	<p>The rapid growth of digital communication platforms has generated vast volumes of user-generated textual data and digital footprints, creating growing demand for scalable artificial intelligence systems capable of supporting evidence-based decision-making. This study proposes and evaluates a human&amp;amp;ndash;AI collaborative analytical pipeline for multi-class sentiment and aggression analysis of large-scale social media data (N = 15,064 messages) related to an urban infrastructure project. The proposed framework integrates standard NLP preprocessing, machine learning-based classifiers, temporal aggregation, and controlled large language model (LLM)-assisted classification within a structured analytical workflow that incorporates expert validation and oversight. A stratified manual validation procedure (n = 301) demonstrated substantial inter-annotator agreement (&amp;amp;kappa; = 0.70) and stable multi-class classification accuracy (80%). The results indicate that combining sentiment polarity and aggression detection as complementary linguistic indicators improves sensitivity to shifts in discourse dynamics and enables early identification of emerging social tension. The study demonstrates the potential of human&amp;amp;ndash;AI collaborative analytical frameworks for transparent, interpretable, and predictive large-scale social media analysis in decision-support contexts.</p>
	]]></content:encoded>

	<dc:title>A Human&amp;amp;ndash;AI Collaborative Pipeline for Decision Support in Urban Development Projects Based on Large-Scale Social Media Text Analysis</dc:title>
			<dc:creator>Alexander A. Kharlamov</dc:creator>
			<dc:creator>Maria Pilgun</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040228</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-14</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-14</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>228</prism:startingPage>
		<prism:doi>10.3390/technologies14040228</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/228</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/225">

	<title>Technologies, Vol. 14, Pages 225: Robotic Process Automation in Business Process Management: A Systematic Literature Review and an Integrated Framework</title>
	<link>https://www.mdpi.com/2227-7080/14/4/225</link>
	<description>Robotic Process Automation (RPA) has emerged as a significant technology within Business Process Management (BPM), yet the academic literature remains fragmented. This paper presents a systematic literature review (SLR) of RPA in BPM, conducted following PRISMA 2020 guidelines and synthesizing 83 peer-reviewed journal articles published between 2015 and 2025. Systematic content analysis identifies six thematic dimensions: (1) process identification and selection; (2) implementation and life cycle management; (3) benefits and performance outcomes; (4) challenges and barriers; (5) technology integration; and (6) governance and organizational impact. These dimensions are synthesized into a conceptual integrated framework comprising three pillars&amp;amp;mdash;strategic alignment, operational execution, and continuous improvement&amp;amp;mdash;grounded in BPM theory and dynamic capabilities. The framework, while not empirically validated in this study, provides a theoretical foundation for future research and practice. Emerging themes include expanding sectoral breadth, empirical adoption model validation, and heightened focus on governance and sociotechnical work transformation. This review contributes a comprehensive thematic synthesis of RPA within BPM and offers a theory-grounded conceptual framework to guide researchers and practitioners in navigating intelligent process automation.</description>
	<pubDate>2026-04-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 225: Robotic Process Automation in Business Process Management: A Systematic Literature Review and an Integrated Framework</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/225">doi: 10.3390/technologies14040225</a></p>
	<p>Authors:
		Sommai Khantong
		Pankom Sriboonlue
		</p>
	<p>Robotic Process Automation (RPA) has emerged as a significant technology within Business Process Management (BPM), yet the academic literature remains fragmented. This paper presents a systematic literature review (SLR) of RPA in BPM, conducted following PRISMA 2020 guidelines and synthesizing 83 peer-reviewed journal articles published between 2015 and 2025. Systematic content analysis identifies six thematic dimensions: (1) process identification and selection; (2) implementation and life cycle management; (3) benefits and performance outcomes; (4) challenges and barriers; (5) technology integration; and (6) governance and organizational impact. These dimensions are synthesized into a conceptual integrated framework comprising three pillars&amp;amp;mdash;strategic alignment, operational execution, and continuous improvement&amp;amp;mdash;grounded in BPM theory and dynamic capabilities. The framework, while not empirically validated in this study, provides a theoretical foundation for future research and practice. Emerging themes include expanding sectoral breadth, empirical adoption model validation, and heightened focus on governance and sociotechnical work transformation. This review contributes a comprehensive thematic synthesis of RPA within BPM and offers a theory-grounded conceptual framework to guide researchers and practitioners in navigating intelligent process automation.</p>
	]]></content:encoded>

	<dc:title>Robotic Process Automation in Business Process Management: A Systematic Literature Review and an Integrated Framework</dc:title>
			<dc:creator>Sommai Khantong</dc:creator>
			<dc:creator>Pankom Sriboonlue</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040225</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-14</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-14</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>225</prism:startingPage>
		<prism:doi>10.3390/technologies14040225</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/225</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/226">

	<title>Technologies, Vol. 14, Pages 226: Formation of Piezoelectric Coatings on Titanium by Laser Processing with TiO2/SrCO3 Powder</title>
	<link>https://www.mdpi.com/2227-7080/14/4/226</link>
	<description>Titanium and its alloys are widely used in orthopedic and dental implantology for their corrosion resistance and biocompatibility supporting osseointegration; however, their usage is accompanied by release of wear debris that may induce inflammatory responses. The necessity of formation of multifunctional coatings that accelerate osseointegration and provide long-term mechanical stability of titanium implants remains highly relevant. We propose a new simple and scalable coating method based on the laser shock processing technique, with TiO2 and SrCO3 powder mix used as an absorption layer. Our results show that this treatment created an approximately 158.3 &amp;amp;plusmn; 35.8 &amp;amp;mu;m thick coating consisting of a mixed SrTiO3-TiO2 phase. The hardness of this coating evaluated by Vickers microhardness measurements showed a hardness increase of 3.3 times compared to the initial titanium substrate. Piezoelectric force microscopy (PFM) analysis revealed the presence of a reverse piezoelectric effect in the obtained structure confirming the highly likely successful synthesis of coating impregnated with SrTiO3. This piezoelectric coating can be readily deposited onto titanium substrates using the proposed method, enabling exploration of potential biomedical applications in future research.</description>
	<pubDate>2026-04-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 226: Formation of Piezoelectric Coatings on Titanium by Laser Processing with TiO2/SrCO3 Powder</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/226">doi: 10.3390/technologies14040226</a></p>
	<p>Authors:
		Xenia A. Egorova
		Fedor A. Gorensky
		Olesya E. Mayorova
		Anton S. Loshachenko
		Mikhail V. Zhukov
		Evgeniia M. Khairullina
		Dmitry A. Sinev
		</p>
	<p>Titanium and its alloys are widely used in orthopedic and dental implantology for their corrosion resistance and biocompatibility supporting osseointegration; however, their usage is accompanied by release of wear debris that may induce inflammatory responses. The necessity of formation of multifunctional coatings that accelerate osseointegration and provide long-term mechanical stability of titanium implants remains highly relevant. We propose a new simple and scalable coating method based on the laser shock processing technique, with TiO2 and SrCO3 powder mix used as an absorption layer. Our results show that this treatment created an approximately 158.3 &amp;amp;plusmn; 35.8 &amp;amp;mu;m thick coating consisting of a mixed SrTiO3-TiO2 phase. The hardness of this coating evaluated by Vickers microhardness measurements showed a hardness increase of 3.3 times compared to the initial titanium substrate. Piezoelectric force microscopy (PFM) analysis revealed the presence of a reverse piezoelectric effect in the obtained structure confirming the highly likely successful synthesis of coating impregnated with SrTiO3. This piezoelectric coating can be readily deposited onto titanium substrates using the proposed method, enabling exploration of potential biomedical applications in future research.</p>
	]]></content:encoded>

	<dc:title>Formation of Piezoelectric Coatings on Titanium by Laser Processing with TiO2/SrCO3 Powder</dc:title>
			<dc:creator>Xenia A. Egorova</dc:creator>
			<dc:creator>Fedor A. Gorensky</dc:creator>
			<dc:creator>Olesya E. Mayorova</dc:creator>
			<dc:creator>Anton S. Loshachenko</dc:creator>
			<dc:creator>Mikhail V. Zhukov</dc:creator>
			<dc:creator>Evgeniia M. Khairullina</dc:creator>
			<dc:creator>Dmitry A. Sinev</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040226</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-13</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-13</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>226</prism:startingPage>
		<prism:doi>10.3390/technologies14040226</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/226</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/224">

	<title>Technologies, Vol. 14, Pages 224: Cold-Forging Die Optimization Using Experimental and Finite Element Analysis</title>
	<link>https://www.mdpi.com/2227-7080/14/4/224</link>
	<description>This study presents an integrated technological approach for improving the service life and operational stability of a P6 die used in the cold-forging production of automotive brake connectors. The work was conducted in an industrial environment characterized by high production volumes and recurrent premature die failure. A hybrid methodology combining Shainin&amp;amp;rsquo;s dominant-variable methodology with controlled experimentation and finite element analysis (FEA) was implemented to identify and optimize the dominant process variables affecting die durability. The attack angle, chamfer length, and machine rotational speed were determined to be the primary factors influencing stress distribution and fatigue behavior. The optimized configuration (16&amp;amp;deg; attack angle, 1.4 mm chamfer length, and 88 RPM) increased die service life by 416%, improving production throughput from approximately 60,000 to over 250,000 parts per cycle. Numerical simulations confirmed that the geometric redesign effectively reduced localized Von Mises stress concentrations, contributing to enhanced structural reliability. The results demonstrate that integrating empirical industrial methodologies with numerical modeling provides a practical and replicable framework for technological improvement in high-volume cold-forging operations. The proposed approach is transferable to similar tooling optimization challenges in the automotive manufacturing sector.</description>
	<pubDate>2026-04-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 224: Cold-Forging Die Optimization Using Experimental and Finite Element Analysis</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/224">doi: 10.3390/technologies14040224</a></p>
	<p>Authors:
		Deivi Damián-Sánchez
		Pedro Yáñez-Contreras
		Benito Aguilar-Juárez
		Martín Alberto Chimal-Cruz
		Francisco Javier Santander-Bastida
		</p>
	<p>This study presents an integrated technological approach for improving the service life and operational stability of a P6 die used in the cold-forging production of automotive brake connectors. The work was conducted in an industrial environment characterized by high production volumes and recurrent premature die failure. A hybrid methodology combining Shainin&amp;amp;rsquo;s dominant-variable methodology with controlled experimentation and finite element analysis (FEA) was implemented to identify and optimize the dominant process variables affecting die durability. The attack angle, chamfer length, and machine rotational speed were determined to be the primary factors influencing stress distribution and fatigue behavior. The optimized configuration (16&amp;amp;deg; attack angle, 1.4 mm chamfer length, and 88 RPM) increased die service life by 416%, improving production throughput from approximately 60,000 to over 250,000 parts per cycle. Numerical simulations confirmed that the geometric redesign effectively reduced localized Von Mises stress concentrations, contributing to enhanced structural reliability. The results demonstrate that integrating empirical industrial methodologies with numerical modeling provides a practical and replicable framework for technological improvement in high-volume cold-forging operations. The proposed approach is transferable to similar tooling optimization challenges in the automotive manufacturing sector.</p>
	]]></content:encoded>

	<dc:title>Cold-Forging Die Optimization Using Experimental and Finite Element Analysis</dc:title>
			<dc:creator>Deivi Damián-Sánchez</dc:creator>
			<dc:creator>Pedro Yáñez-Contreras</dc:creator>
			<dc:creator>Benito Aguilar-Juárez</dc:creator>
			<dc:creator>Martín Alberto Chimal-Cruz</dc:creator>
			<dc:creator>Francisco Javier Santander-Bastida</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040224</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-13</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-13</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>224</prism:startingPage>
		<prism:doi>10.3390/technologies14040224</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/224</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/223">

	<title>Technologies, Vol. 14, Pages 223: A Feasibility Study of IoT-Based Classification of Residential Water-Use Activities in Storage Tank Systems: A Comparative Analysis of Decision Trees, Random Forest, SVM, KNN, and Neural Networks</title>
	<link>https://www.mdpi.com/2227-7080/14/4/223</link>
	<description>The increasing scarcity of urban water resources, particularly in regions with intermittent supply and household water storage tanks, demands monitoring approaches capable of identifying end-use consumption patterns beyond aggregated volume measurements. Framed primarily as a feasibility study, this research presents an IoT-based framework for the automated classification of residential water consumption activities using water-level dynamics and supervised machine learning. A non-intrusive sensing architecture based on hydrostatic pressure measurements was deployed in a domestic water tank and integrated with a cloud-based data acquisition and processing platform. Five representative household states and activities were considered: tank refilling, stable state, toilet flushing, washing clothes, and taking a bath. A labeled dataset comprising 4396 consumption events was used to train and evaluate Decision Tree, Random Forest, Support Vector Machine (SVM), k-Nearest Neighbors, and Recurrent Neural Network (LSTM) models using features derived from water-level variations. All models achieved high performance, with accuracies above 0.92 and weighted F1-scores up to 0.93. The evaluated models showed highly comparable results, with the SVM (RBF) achieving a slightly higher accuracy (0.9307) in this evaluation setting, while ROC analysis showed AUC values between 0.97 and 1.00 across all classes, indicating strong discriminative capability. Additionally, specific activities such as washing clothes and tank refilling achieved precision and recall values above 0.95. These findings confirm that hydrostatic pressure-based sensing, combined with machine learning, enables reliable identification of domestic water-use events under intermittent supply conditions. The proposed approach provides actionable insights for demand management, leak detection, and user awareness, supporting more efficient and sustainable residential water consumption strategies.</description>
	<pubDate>2026-04-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 223: A Feasibility Study of IoT-Based Classification of Residential Water-Use Activities in Storage Tank Systems: A Comparative Analysis of Decision Trees, Random Forest, SVM, KNN, and Neural Networks</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/223">doi: 10.3390/technologies14040223</a></p>
	<p>Authors:
		Iván Neftalí Chávez-Flores
		Héctor A. Guerrero-Osuna
		Jesuś Antonio Nava-Pintor
		Fabián García-Vázquez
		Luis F. Luque-Vega
		Rocío Carrasco-Navarro
		Marcela E. Mata-Romero
		Jorge A. Lizarraga
		Salvador Castro-Tapia
		</p>
	<p>The increasing scarcity of urban water resources, particularly in regions with intermittent supply and household water storage tanks, demands monitoring approaches capable of identifying end-use consumption patterns beyond aggregated volume measurements. Framed primarily as a feasibility study, this research presents an IoT-based framework for the automated classification of residential water consumption activities using water-level dynamics and supervised machine learning. A non-intrusive sensing architecture based on hydrostatic pressure measurements was deployed in a domestic water tank and integrated with a cloud-based data acquisition and processing platform. Five representative household states and activities were considered: tank refilling, stable state, toilet flushing, washing clothes, and taking a bath. A labeled dataset comprising 4396 consumption events was used to train and evaluate Decision Tree, Random Forest, Support Vector Machine (SVM), k-Nearest Neighbors, and Recurrent Neural Network (LSTM) models using features derived from water-level variations. All models achieved high performance, with accuracies above 0.92 and weighted F1-scores up to 0.93. The evaluated models showed highly comparable results, with the SVM (RBF) achieving a slightly higher accuracy (0.9307) in this evaluation setting, while ROC analysis showed AUC values between 0.97 and 1.00 across all classes, indicating strong discriminative capability. Additionally, specific activities such as washing clothes and tank refilling achieved precision and recall values above 0.95. These findings confirm that hydrostatic pressure-based sensing, combined with machine learning, enables reliable identification of domestic water-use events under intermittent supply conditions. The proposed approach provides actionable insights for demand management, leak detection, and user awareness, supporting more efficient and sustainable residential water consumption strategies.</p>
	]]></content:encoded>

	<dc:title>A Feasibility Study of IoT-Based Classification of Residential Water-Use Activities in Storage Tank Systems: A Comparative Analysis of Decision Trees, Random Forest, SVM, KNN, and Neural Networks</dc:title>
			<dc:creator>Iván Neftalí Chávez-Flores</dc:creator>
			<dc:creator>Héctor A. Guerrero-Osuna</dc:creator>
			<dc:creator>Jesuś Antonio Nava-Pintor</dc:creator>
			<dc:creator>Fabián García-Vázquez</dc:creator>
			<dc:creator>Luis F. Luque-Vega</dc:creator>
			<dc:creator>Rocío Carrasco-Navarro</dc:creator>
			<dc:creator>Marcela E. Mata-Romero</dc:creator>
			<dc:creator>Jorge A. Lizarraga</dc:creator>
			<dc:creator>Salvador Castro-Tapia</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040223</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-13</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-13</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>223</prism:startingPage>
		<prism:doi>10.3390/technologies14040223</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/223</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/222">

	<title>Technologies, Vol. 14, Pages 222: Novel Ceramic and Refractory Composites for Masonry Bricks and Blocks: A Systematic Review of Materials, Properties, and Sustainability</title>
	<link>https://www.mdpi.com/2227-7080/14/4/222</link>
	<description>Masonry bricks and blocks are among the most widely used construction materials worldwide; however, their conventional production relies on energy-intensive firing processes and virgin raw materials, leading to significant environmental impacts. In response to increasing sustainability and decarbonization demands in the construction sector, numerous novel ceramic and refractory materials have been proposed for masonry applications. This systematic review provides a comprehensive assessment of recent advances in ceramic and refractory materials for masonry bricks and blocks, focusing on material classification, processing routes, microstructure&amp;amp;ndash;property relationships, and sustainability performance. Following the PRISMA 2020 guidelines, the peer-reviewed literature published between 2018 and 2025 was systematically identified, screened, and analyzed. An analytical framework based on well-established relationships from ceramic science was adopted to support consistent comparison of mechanical, thermal, acoustic, durability, and sustainability-related properties across heterogeneous material systems. Conventional fired ceramics, waste-derived ceramics, lightweight and porous systems, alkali-activated and unfired materials, and advanced engineered ceramics were comparatively evaluated. The results reveal a clear shift from dense traditional fired ceramics toward materials incorporating industrial and agricultural residues, engineered porosity, and low-temperature or unfired processing routes. Waste-derived and geopolymer-based systems demonstrate significant potential for reducing CO2 emissions and energy consumption while maintaining functional performance suitable for masonry applications. Lightweight and porous ceramics exhibit enhanced thermal and acoustic behavior, often accompanied by reduced mechanical strength, highlighting application-dependent trade-offs. Overall, this review provides an integrated perspective linking composition, processing, microstructure, performance, and environmental impact, identifying key research trends and knowledge gaps relevant to sustainable masonry construction.</description>
	<pubDate>2026-04-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 222: Novel Ceramic and Refractory Composites for Masonry Bricks and Blocks: A Systematic Review of Materials, Properties, and Sustainability</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/222">doi: 10.3390/technologies14040222</a></p>
	<p>Authors:
		Hugo Martínez Ángeles
		Cesar Augusto Navarro Rubio
		Margarita G. García-Barajas
		José Gabriel Ríos Moreno
		Luis Angel Iturralde Carrera
		Leonel Díaz-Tato
		Saúl Obregón-Biosca
		Roberto Valentín Carrillo-Serrano
		Mario Trejo Perea
		</p>
	<p>Masonry bricks and blocks are among the most widely used construction materials worldwide; however, their conventional production relies on energy-intensive firing processes and virgin raw materials, leading to significant environmental impacts. In response to increasing sustainability and decarbonization demands in the construction sector, numerous novel ceramic and refractory materials have been proposed for masonry applications. This systematic review provides a comprehensive assessment of recent advances in ceramic and refractory materials for masonry bricks and blocks, focusing on material classification, processing routes, microstructure&amp;amp;ndash;property relationships, and sustainability performance. Following the PRISMA 2020 guidelines, the peer-reviewed literature published between 2018 and 2025 was systematically identified, screened, and analyzed. An analytical framework based on well-established relationships from ceramic science was adopted to support consistent comparison of mechanical, thermal, acoustic, durability, and sustainability-related properties across heterogeneous material systems. Conventional fired ceramics, waste-derived ceramics, lightweight and porous systems, alkali-activated and unfired materials, and advanced engineered ceramics were comparatively evaluated. The results reveal a clear shift from dense traditional fired ceramics toward materials incorporating industrial and agricultural residues, engineered porosity, and low-temperature or unfired processing routes. Waste-derived and geopolymer-based systems demonstrate significant potential for reducing CO2 emissions and energy consumption while maintaining functional performance suitable for masonry applications. Lightweight and porous ceramics exhibit enhanced thermal and acoustic behavior, often accompanied by reduced mechanical strength, highlighting application-dependent trade-offs. Overall, this review provides an integrated perspective linking composition, processing, microstructure, performance, and environmental impact, identifying key research trends and knowledge gaps relevant to sustainable masonry construction.</p>
	]]></content:encoded>

	<dc:title>Novel Ceramic and Refractory Composites for Masonry Bricks and Blocks: A Systematic Review of Materials, Properties, and Sustainability</dc:title>
			<dc:creator>Hugo Martínez Ángeles</dc:creator>
			<dc:creator>Cesar Augusto Navarro Rubio</dc:creator>
			<dc:creator>Margarita G. García-Barajas</dc:creator>
			<dc:creator>José Gabriel Ríos Moreno</dc:creator>
			<dc:creator>Luis Angel Iturralde Carrera</dc:creator>
			<dc:creator>Leonel Díaz-Tato</dc:creator>
			<dc:creator>Saúl Obregón-Biosca</dc:creator>
			<dc:creator>Roberto Valentín Carrillo-Serrano</dc:creator>
			<dc:creator>Mario Trejo Perea</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040222</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-11</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-11</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>222</prism:startingPage>
		<prism:doi>10.3390/technologies14040222</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/222</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/221">

	<title>Technologies, Vol. 14, Pages 221: Early Crop Type Classification Based on Seasonal Spectral Features and Machine Learning Methods</title>
	<link>https://www.mdpi.com/2227-7080/14/4/221</link>
	<description>This paper explores the feasibility of early-season crop classification based on Sentinel-2-time series using the TimeSen2Crop dataset (&amp;amp;asymp;1 million pixels, 16 crops). The aim of the study was to evaluate the spectral-phenological separability of crops during the season and compare the performance of classical tabular algorithms, deep sequence models, and a seasonally oriented hybrid stacking scheme. Based on multispectral observations, a feature set was formed from 9 optical channels and 13 vegetation indices for 30 dates. F-criteria were calculated, confirming a sharp increase in interclass separability during the active vegetative growth phase and substantiating three time series truncation scenarios (early, early + mid-season, and full season). Random Forest (macro-F1: 0.46/0.74/0.75) was used as the base tabular model. LSTM, BiLSTM, GRU, 1D-CNN, and Transformer were trained in parallel, with Transformer showing the best results among the deep architectures (0.42/0.68/0.78). The main contribution of the work is a hybrid multi-layer stacking scheme combining heterogeneous base algorithms and OOF meta-features, which provides the highest quality (0.51/0.83/0.86) in all scenarios. The obtained results confirm the effectiveness of phenology-oriented selection of time windows, informative indices, and hybrid ensemble learning for improving the accuracy of early-season crop monitoring.</description>
	<pubDate>2026-04-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 221: Early Crop Type Classification Based on Seasonal Spectral Features and Machine Learning Methods</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/221">doi: 10.3390/technologies14040221</a></p>
	<p>Authors:
		Ainagul Alimagambetova
		Moldir Yessenova
		Assem Konyrkhanova
		Ten Tatyana
		Aliya Beissegul
		Zhuldyz Tashenova
		Kuanysh Kadirkulov
		Aitimova Ulzada
		Gulalem Mauina
		</p>
	<p>This paper explores the feasibility of early-season crop classification based on Sentinel-2-time series using the TimeSen2Crop dataset (&amp;amp;asymp;1 million pixels, 16 crops). The aim of the study was to evaluate the spectral-phenological separability of crops during the season and compare the performance of classical tabular algorithms, deep sequence models, and a seasonally oriented hybrid stacking scheme. Based on multispectral observations, a feature set was formed from 9 optical channels and 13 vegetation indices for 30 dates. F-criteria were calculated, confirming a sharp increase in interclass separability during the active vegetative growth phase and substantiating three time series truncation scenarios (early, early + mid-season, and full season). Random Forest (macro-F1: 0.46/0.74/0.75) was used as the base tabular model. LSTM, BiLSTM, GRU, 1D-CNN, and Transformer were trained in parallel, with Transformer showing the best results among the deep architectures (0.42/0.68/0.78). The main contribution of the work is a hybrid multi-layer stacking scheme combining heterogeneous base algorithms and OOF meta-features, which provides the highest quality (0.51/0.83/0.86) in all scenarios. The obtained results confirm the effectiveness of phenology-oriented selection of time windows, informative indices, and hybrid ensemble learning for improving the accuracy of early-season crop monitoring.</p>
	]]></content:encoded>

	<dc:title>Early Crop Type Classification Based on Seasonal Spectral Features and Machine Learning Methods</dc:title>
			<dc:creator>Ainagul Alimagambetova</dc:creator>
			<dc:creator>Moldir Yessenova</dc:creator>
			<dc:creator>Assem Konyrkhanova</dc:creator>
			<dc:creator>Ten Tatyana</dc:creator>
			<dc:creator>Aliya Beissegul</dc:creator>
			<dc:creator>Zhuldyz Tashenova</dc:creator>
			<dc:creator>Kuanysh Kadirkulov</dc:creator>
			<dc:creator>Aitimova Ulzada</dc:creator>
			<dc:creator>Gulalem Mauina</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040221</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-10</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-10</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>221</prism:startingPage>
		<prism:doi>10.3390/technologies14040221</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/221</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/220">

	<title>Technologies, Vol. 14, Pages 220: ISRU and ISFR Science and Technology&amp;mdash;A Review of the Last 15 Years</title>
	<link>https://www.mdpi.com/2227-7080/14/4/220</link>
	<description>In situ resource utilization (ISRU) and in situ fabrication and repair (ISFR) are critical research and technological paradigms for future space exploration. They aim to reduce reliance on Earth-supplied materials by utilizing resources available on celestial bodies, while enabling on-site fabrication and repair through the use and processing of local resources. ISRU and ISFR are strongly interconnected, with the shared objective of enabling more sustainable and autonomous long-duration missions to the Moon, Mars, and beyond. This work presents a comprehensive and critical review of scientific and patent literature published primarily between 2010 and 2025, complemented by selected earlier seminal contributions for context. The analysis provides an integrated perspective on major technological developments, key challenges, and emerging research directions in low-gravity and microgravity environments.</description>
	<pubDate>2026-04-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 220: ISRU and ISFR Science and Technology&amp;mdash;A Review of the Last 15 Years</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/220">doi: 10.3390/technologies14040220</a></p>
	<p>Authors:
		Giacomo Cao
		Alberto Cincotti
		Alessandro Concas
		Antonio Depau
		Giacomo Fais
		Nicola Lai
		Roberta Licheri
		Antonio Mario Locci
		Selena Montinaro
		Roberto Orrù
		Gabriele Traversari
		</p>
	<p>In situ resource utilization (ISRU) and in situ fabrication and repair (ISFR) are critical research and technological paradigms for future space exploration. They aim to reduce reliance on Earth-supplied materials by utilizing resources available on celestial bodies, while enabling on-site fabrication and repair through the use and processing of local resources. ISRU and ISFR are strongly interconnected, with the shared objective of enabling more sustainable and autonomous long-duration missions to the Moon, Mars, and beyond. This work presents a comprehensive and critical review of scientific and patent literature published primarily between 2010 and 2025, complemented by selected earlier seminal contributions for context. The analysis provides an integrated perspective on major technological developments, key challenges, and emerging research directions in low-gravity and microgravity environments.</p>
	]]></content:encoded>

	<dc:title>ISRU and ISFR Science and Technology&amp;amp;mdash;A Review of the Last 15 Years</dc:title>
			<dc:creator>Giacomo Cao</dc:creator>
			<dc:creator>Alberto Cincotti</dc:creator>
			<dc:creator>Alessandro Concas</dc:creator>
			<dc:creator>Antonio Depau</dc:creator>
			<dc:creator>Giacomo Fais</dc:creator>
			<dc:creator>Nicola Lai</dc:creator>
			<dc:creator>Roberta Licheri</dc:creator>
			<dc:creator>Antonio Mario Locci</dc:creator>
			<dc:creator>Selena Montinaro</dc:creator>
			<dc:creator>Roberto Orrù</dc:creator>
			<dc:creator>Gabriele Traversari</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040220</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-10</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-10</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>220</prism:startingPage>
		<prism:doi>10.3390/technologies14040220</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/220</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/219">

	<title>Technologies, Vol. 14, Pages 219: Wind Resource Assessment and Layout Optimization in the Isthmus of Tehuantepec, Mexico: A Microscale Modeling and Parametric Analysis Approach</title>
	<link>https://www.mdpi.com/2227-7080/14/4/219</link>
	<description>This wind farm study provides a detailed and deep investigation into numerous aspects of both wind dynamics and the associated wind turbine performance via a wind data analysis utilizing an extrapolated timeframe of 50 years. The major wind characteristics assessed included wind speed and direction, flow inclination, turbulence intensity, and wind speed (average based on extremes) over the entire duration of the evaluated data set. A majority of study results indicated only narrow wind speed ranges (6.3 m/s to 7.0 m/s) for turbine operation within the wind farm. Higher turbine operation speeds than the average measured wind speed may significantly increase turbine energy output. Turbines were evaluated across numerous geographic locations, resulting in average flow inclination (&amp;amp;minus;4.12&amp;amp;deg; to 1.57&amp;amp;deg;) from the vertical to horizontal directions. The variation in flow inclination indicates that there is a geographic component that likely creates a localized terrain impact on turbine performance. Similarly, the measurement of turbulence intensity was also assessed, which indicated elevated levels of turbine mechanical stress and additional requirements for turbine maintenance. Energy production analyses from each turbine in the wind farm exhibited various regions of energy loss, with the highest energy losses associated with select turbines.</description>
	<pubDate>2026-04-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 219: Wind Resource Assessment and Layout Optimization in the Isthmus of Tehuantepec, Mexico: A Microscale Modeling and Parametric Analysis Approach</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/219">doi: 10.3390/technologies14040219</a></p>
	<p>Authors:
		Brenda Mendoza
		José Rafael Dorrego-Portela
		Alida Ramirez-Jimenez
		Jesus Alejandro Franco
		Alberto-Jesus Perea-Moreno
		David Muñoz-Rodriguez
		Dante Ruiz-Robles
		Araceli Peña-Fernández
		Quetzalcoatl Hernandez-Escobedo
		</p>
	<p>This wind farm study provides a detailed and deep investigation into numerous aspects of both wind dynamics and the associated wind turbine performance via a wind data analysis utilizing an extrapolated timeframe of 50 years. The major wind characteristics assessed included wind speed and direction, flow inclination, turbulence intensity, and wind speed (average based on extremes) over the entire duration of the evaluated data set. A majority of study results indicated only narrow wind speed ranges (6.3 m/s to 7.0 m/s) for turbine operation within the wind farm. Higher turbine operation speeds than the average measured wind speed may significantly increase turbine energy output. Turbines were evaluated across numerous geographic locations, resulting in average flow inclination (&amp;amp;minus;4.12&amp;amp;deg; to 1.57&amp;amp;deg;) from the vertical to horizontal directions. The variation in flow inclination indicates that there is a geographic component that likely creates a localized terrain impact on turbine performance. Similarly, the measurement of turbulence intensity was also assessed, which indicated elevated levels of turbine mechanical stress and additional requirements for turbine maintenance. Energy production analyses from each turbine in the wind farm exhibited various regions of energy loss, with the highest energy losses associated with select turbines.</p>
	]]></content:encoded>

	<dc:title>Wind Resource Assessment and Layout Optimization in the Isthmus of Tehuantepec, Mexico: A Microscale Modeling and Parametric Analysis Approach</dc:title>
			<dc:creator>Brenda Mendoza</dc:creator>
			<dc:creator>José Rafael Dorrego-Portela</dc:creator>
			<dc:creator>Alida Ramirez-Jimenez</dc:creator>
			<dc:creator>Jesus Alejandro Franco</dc:creator>
			<dc:creator>Alberto-Jesus Perea-Moreno</dc:creator>
			<dc:creator>David Muñoz-Rodriguez</dc:creator>
			<dc:creator>Dante Ruiz-Robles</dc:creator>
			<dc:creator>Araceli Peña-Fernández</dc:creator>
			<dc:creator>Quetzalcoatl Hernandez-Escobedo</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040219</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-09</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-09</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>219</prism:startingPage>
		<prism:doi>10.3390/technologies14040219</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/219</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/218">

	<title>Technologies, Vol. 14, Pages 218: From Mechanics to Machine Learning in Additive Manufacturing: A Review of Deformation, Fatigue, and Fracture</title>
	<link>https://www.mdpi.com/2227-7080/14/4/218</link>
	<description>Additive manufacturing (AM) enables a level of design flexibility that is difficult to achieve with conventional techniques, yet it inherently yields materials marked by significant variability, anisotropy, and sensitivity to defects that challenge classical mechanics-of-materials assumptions. Process-driven microstructural heterogeneity, stochastic defect populations, and residual stresses strongly influence deformation, fatigue, and fracture behavior, often outweighing nominal material properties and constraining the predictive capability of traditional constitutive and fracture mechanics models. Machine learning (ML) has emerged as a powerful means of handling the complexity of AM data; however, many current approaches depend on black-box models that lack physical transparency, extrapolate poorly, and treat uncertainty inadequately. This review contends that ML should augment&amp;amp;mdash;rather than replace&amp;amp;mdash;mechanics-based modeling, and that dependable prediction of AM material behavior requires mechanics-informed ML frameworks. We critically analyze the central mechanics challenges in AM and evaluate established modeling strategies alongside emerging ML methods relevant to deformation, damage, fatigue, and fracture. Particular emphasis is given to physics-informed and hybrid ML approaches that explicitly incorporate anisotropy, defect sensitivity, residual stress effects, and uncertainty quantification within learning architectures. Recent progress in ML-assisted constitutive modeling, fatigue and fracture prediction, and digital twin development is synthesized, and the implications for qualification, certification, and structural deployment of AM components are discussed.</description>
	<pubDate>2026-04-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 218: From Mechanics to Machine Learning in Additive Manufacturing: A Review of Deformation, Fatigue, and Fracture</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/218">doi: 10.3390/technologies14040218</a></p>
	<p>Authors:
		Murat Demiral
		Murat Otkur
		</p>
	<p>Additive manufacturing (AM) enables a level of design flexibility that is difficult to achieve with conventional techniques, yet it inherently yields materials marked by significant variability, anisotropy, and sensitivity to defects that challenge classical mechanics-of-materials assumptions. Process-driven microstructural heterogeneity, stochastic defect populations, and residual stresses strongly influence deformation, fatigue, and fracture behavior, often outweighing nominal material properties and constraining the predictive capability of traditional constitutive and fracture mechanics models. Machine learning (ML) has emerged as a powerful means of handling the complexity of AM data; however, many current approaches depend on black-box models that lack physical transparency, extrapolate poorly, and treat uncertainty inadequately. This review contends that ML should augment&amp;amp;mdash;rather than replace&amp;amp;mdash;mechanics-based modeling, and that dependable prediction of AM material behavior requires mechanics-informed ML frameworks. We critically analyze the central mechanics challenges in AM and evaluate established modeling strategies alongside emerging ML methods relevant to deformation, damage, fatigue, and fracture. Particular emphasis is given to physics-informed and hybrid ML approaches that explicitly incorporate anisotropy, defect sensitivity, residual stress effects, and uncertainty quantification within learning architectures. Recent progress in ML-assisted constitutive modeling, fatigue and fracture prediction, and digital twin development is synthesized, and the implications for qualification, certification, and structural deployment of AM components are discussed.</p>
	]]></content:encoded>

	<dc:title>From Mechanics to Machine Learning in Additive Manufacturing: A Review of Deformation, Fatigue, and Fracture</dc:title>
			<dc:creator>Murat Demiral</dc:creator>
			<dc:creator>Murat Otkur</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040218</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-09</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-09</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>218</prism:startingPage>
		<prism:doi>10.3390/technologies14040218</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/218</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/217">

	<title>Technologies, Vol. 14, Pages 217: Memory Cueing and Augmented Sensory Feedback in Virtual Reality as an Assistive Technology for Enhancing Hand Motor Performance</title>
	<link>https://www.mdpi.com/2227-7080/14/4/217</link>
	<description>Neurological injuries and disorders affecting hand motor control can severely impair the ability to perform activities of daily living and substantially reduce quality of life. Technologies such as virtual reality (VR) are increasingly used to address fundamental challenges in therapy, including motivation and engagement; further, programmable features of digital interfaces offer additional opportunities to personalize and optimize motor training. In this proof-of-concept study, we developed and evaluated a novel VR-based training framework to support improved dexterity and hand function using physiological (sensory-driven) and cognitive (memory) cues designed to promote greater task-relevant neural engagement. The proposed approach leverages the integration of augmented sensory feedback (ASF) with memory-anchored cues for motor learning of target hand gestures. Using a within-subjects design, thirteen neurotypical adults completed four training conditions: (1) control (baseline gesture-matching in VR), (2) visual ASF (enhanced visualization and feedback of gesture accuracy), (3) memory-anchored cues (associating gestures with semantically meaningful entities, loosely analogous to American Sign Language), and (4) hybrid multimodal (visual ASF + memory-anchored cues). Training with the hybrid condition produced the fastest skill acquisition (9.3 trials to reach an 80% accuracy threshold) and the steepest initial learning slope (1.86 &amp;amp;plusmn; 0.12%/trial), with all conditions differing significantly in initial slope (all p &amp;amp;lt; 0.002). Post-training assessment showed that the hybrid condition achieved the highest gesture accuracy (95.2%), greatest normalized post-training accuracy gain (14.3% above baseline), fastest execution time to target gesture (1.14 s), and lowest variability in gestural kinematics (SD = 3.9%). Both ASF and memory-anchored cue conditions each also independently outperformed the control condition on gesture accuracy (both p &amp;amp;le; 0.002), with omnibus ANOVAs indicating significant condition effects across metrics. Together, these findings suggest that pairing ASF cues with memory-based cognitive scaffolding can yield additive benefits for motor skill acquisition and stability. Pending validation in clinical populations, such approaches may inform the design of VR-based motor training frameworks for rehabilitation.</description>
	<pubDate>2026-04-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 217: Memory Cueing and Augmented Sensory Feedback in Virtual Reality as an Assistive Technology for Enhancing Hand Motor Performance</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/217">doi: 10.3390/technologies14040217</a></p>
	<p>Authors:
		Zachary Marvin
		Sophie Dewil
		Yu Shi
		Noam Y. Harel
		Raviraj Nataraj
		</p>
	<p>Neurological injuries and disorders affecting hand motor control can severely impair the ability to perform activities of daily living and substantially reduce quality of life. Technologies such as virtual reality (VR) are increasingly used to address fundamental challenges in therapy, including motivation and engagement; further, programmable features of digital interfaces offer additional opportunities to personalize and optimize motor training. In this proof-of-concept study, we developed and evaluated a novel VR-based training framework to support improved dexterity and hand function using physiological (sensory-driven) and cognitive (memory) cues designed to promote greater task-relevant neural engagement. The proposed approach leverages the integration of augmented sensory feedback (ASF) with memory-anchored cues for motor learning of target hand gestures. Using a within-subjects design, thirteen neurotypical adults completed four training conditions: (1) control (baseline gesture-matching in VR), (2) visual ASF (enhanced visualization and feedback of gesture accuracy), (3) memory-anchored cues (associating gestures with semantically meaningful entities, loosely analogous to American Sign Language), and (4) hybrid multimodal (visual ASF + memory-anchored cues). Training with the hybrid condition produced the fastest skill acquisition (9.3 trials to reach an 80% accuracy threshold) and the steepest initial learning slope (1.86 &amp;amp;plusmn; 0.12%/trial), with all conditions differing significantly in initial slope (all p &amp;amp;lt; 0.002). Post-training assessment showed that the hybrid condition achieved the highest gesture accuracy (95.2%), greatest normalized post-training accuracy gain (14.3% above baseline), fastest execution time to target gesture (1.14 s), and lowest variability in gestural kinematics (SD = 3.9%). Both ASF and memory-anchored cue conditions each also independently outperformed the control condition on gesture accuracy (both p &amp;amp;le; 0.002), with omnibus ANOVAs indicating significant condition effects across metrics. Together, these findings suggest that pairing ASF cues with memory-based cognitive scaffolding can yield additive benefits for motor skill acquisition and stability. Pending validation in clinical populations, such approaches may inform the design of VR-based motor training frameworks for rehabilitation.</p>
	]]></content:encoded>

	<dc:title>Memory Cueing and Augmented Sensory Feedback in Virtual Reality as an Assistive Technology for Enhancing Hand Motor Performance</dc:title>
			<dc:creator>Zachary Marvin</dc:creator>
			<dc:creator>Sophie Dewil</dc:creator>
			<dc:creator>Yu Shi</dc:creator>
			<dc:creator>Noam Y. Harel</dc:creator>
			<dc:creator>Raviraj Nataraj</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040217</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-08</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-08</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>217</prism:startingPage>
		<prism:doi>10.3390/technologies14040217</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/217</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/216">

	<title>Technologies, Vol. 14, Pages 216: Numerical Simulation Study on Region Tracking of Jet Formation and Armor-Piercing Process of Zirconium Alloy Shaped Charge Liner</title>
	<link>https://www.mdpi.com/2227-7080/14/4/216</link>
	<description>Zr alloy-shaped charge liners (SCLs) offer broad application prospects due to their multiple post-penetration damage effects. However, research on these liners is still in its early stages. The mechanisms of jet formation and penetration for Zr alloys SCL remain unclear, and the specific contribution of different liner regions to the penetration process is not yet understood. This gap in knowledge has limited their structural design to a black-box correlation between global structural parameters and macroscopic penetration efficiency. To address this gap, a region-tracing Smoothed Particle Hydrodynamics (SPH) simulation was employed. Following a strategy of &amp;amp;ldquo;wall thickness layering + axial segmentation,&amp;amp;rdquo; the Zr alloy liner was partitioned into ten characteristic regions. This methodology facilitated the tracking of material transport from each region during jet formation and penetration into an AISI 1045 steel target. The contribution of each region to the penetration depth was then quantitatively assessed via post-processing. For the first time, the &amp;amp;ldquo;critical region&amp;amp;rdquo; contributing most to penetration depth was identified, and the influence of the liner&amp;amp;rsquo;s cone angle and wall thickness on the contribution of each region was revealed. This study enhances the theoretical framework for understanding the damage effects of Zr alloy shaped charge liners. It not only advances the fundamental understanding of jet penetration mechanisms but also provides a theoretical basis for the refined design and performance optimization of these liners.</description>
	<pubDate>2026-04-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 216: Numerical Simulation Study on Region Tracking of Jet Formation and Armor-Piercing Process of Zirconium Alloy Shaped Charge Liner</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/216">doi: 10.3390/technologies14040216</a></p>
	<p>Authors:
		Yan Wang
		Yifan Du
		Xingwei Liu
		Jinxu Liu
		</p>
	<p>Zr alloy-shaped charge liners (SCLs) offer broad application prospects due to their multiple post-penetration damage effects. However, research on these liners is still in its early stages. The mechanisms of jet formation and penetration for Zr alloys SCL remain unclear, and the specific contribution of different liner regions to the penetration process is not yet understood. This gap in knowledge has limited their structural design to a black-box correlation between global structural parameters and macroscopic penetration efficiency. To address this gap, a region-tracing Smoothed Particle Hydrodynamics (SPH) simulation was employed. Following a strategy of &amp;amp;ldquo;wall thickness layering + axial segmentation,&amp;amp;rdquo; the Zr alloy liner was partitioned into ten characteristic regions. This methodology facilitated the tracking of material transport from each region during jet formation and penetration into an AISI 1045 steel target. The contribution of each region to the penetration depth was then quantitatively assessed via post-processing. For the first time, the &amp;amp;ldquo;critical region&amp;amp;rdquo; contributing most to penetration depth was identified, and the influence of the liner&amp;amp;rsquo;s cone angle and wall thickness on the contribution of each region was revealed. This study enhances the theoretical framework for understanding the damage effects of Zr alloy shaped charge liners. It not only advances the fundamental understanding of jet penetration mechanisms but also provides a theoretical basis for the refined design and performance optimization of these liners.</p>
	]]></content:encoded>

	<dc:title>Numerical Simulation Study on Region Tracking of Jet Formation and Armor-Piercing Process of Zirconium Alloy Shaped Charge Liner</dc:title>
			<dc:creator>Yan Wang</dc:creator>
			<dc:creator>Yifan Du</dc:creator>
			<dc:creator>Xingwei Liu</dc:creator>
			<dc:creator>Jinxu Liu</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040216</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-08</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-08</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>216</prism:startingPage>
		<prism:doi>10.3390/technologies14040216</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/216</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/215">

	<title>Technologies, Vol. 14, Pages 215: TinyML for Sustainable Edge Intelligence: Practical Optimization Under Extreme Resource Constraints</title>
	<link>https://www.mdpi.com/2227-7080/14/4/215</link>
	<description>Deep learning has emerged as an effective tool for automatic waste classification, supporting cleaner cities and more sustainable recycling systems. Because environmental protection is central to the United Nations Sustainable Development Goals (SDGs), improving the sorting and processing of everyday waste is a practical step toward this broader objective. In many real-world settings, however, waste is still sorted manually, which is slow, labor-intensive, and prone to human error. Although convolutional neural networks (CNNs) can automate this task with high accuracy, many state-of-the-art models remain too large and computationally demanding for low-cost edge devices intended for deployment in homes, schools, and small recycling facilities. In this work, we investigate lightweight waste-classification models suitable for TinyML deployment while preserving competitive accuracy. We first benchmark multiple CNN architectures to establish a strong baseline, then apply complementary compression strategies including quantization, pruning, singular value decomposition (SVD) low-rank approximation, and knowledge distillation. In addition, we evaluate an RL-guided multi-teacher selection benchmark that adaptively chooses one teacher per minibatch during distillation to improve student training stability, achieving up to 85% accuracy with only 0.496 M parameters (FP32 &amp;amp;asymp; 1.89 MB; INT8 &amp;amp;asymp; 0.47 MB). Across all experiments, the best accuracy&amp;amp;ndash;size trade-off is obtained by combining knowledge distillation with post-training quantization, reducing the model footprint from approximately 16 MB to 281 KB while maintaining 82% accuracy. The resulting model is feasible for deployment on mobile applications and resource-constrained embedded devices based on model size and TensorFlow Lite Micro compatibility.</description>
	<pubDate>2026-04-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 215: TinyML for Sustainable Edge Intelligence: Practical Optimization Under Extreme Resource Constraints</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/215">doi: 10.3390/technologies14040215</a></p>
	<p>Authors:
		Mohamed Echchidmi
		Anas Bouayad
		</p>
	<p>Deep learning has emerged as an effective tool for automatic waste classification, supporting cleaner cities and more sustainable recycling systems. Because environmental protection is central to the United Nations Sustainable Development Goals (SDGs), improving the sorting and processing of everyday waste is a practical step toward this broader objective. In many real-world settings, however, waste is still sorted manually, which is slow, labor-intensive, and prone to human error. Although convolutional neural networks (CNNs) can automate this task with high accuracy, many state-of-the-art models remain too large and computationally demanding for low-cost edge devices intended for deployment in homes, schools, and small recycling facilities. In this work, we investigate lightweight waste-classification models suitable for TinyML deployment while preserving competitive accuracy. We first benchmark multiple CNN architectures to establish a strong baseline, then apply complementary compression strategies including quantization, pruning, singular value decomposition (SVD) low-rank approximation, and knowledge distillation. In addition, we evaluate an RL-guided multi-teacher selection benchmark that adaptively chooses one teacher per minibatch during distillation to improve student training stability, achieving up to 85% accuracy with only 0.496 M parameters (FP32 &amp;amp;asymp; 1.89 MB; INT8 &amp;amp;asymp; 0.47 MB). Across all experiments, the best accuracy&amp;amp;ndash;size trade-off is obtained by combining knowledge distillation with post-training quantization, reducing the model footprint from approximately 16 MB to 281 KB while maintaining 82% accuracy. The resulting model is feasible for deployment on mobile applications and resource-constrained embedded devices based on model size and TensorFlow Lite Micro compatibility.</p>
	]]></content:encoded>

	<dc:title>TinyML for Sustainable Edge Intelligence: Practical Optimization Under Extreme Resource Constraints</dc:title>
			<dc:creator>Mohamed Echchidmi</dc:creator>
			<dc:creator>Anas Bouayad</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040215</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-07</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-07</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>215</prism:startingPage>
		<prism:doi>10.3390/technologies14040215</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/215</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/214">

	<title>Technologies, Vol. 14, Pages 214: Novel Technologies for Diagnosis of Conveyor Belt Looseness via Motor Current Signature Analysis</title>
	<link>https://www.mdpi.com/2227-7080/14/4/214</link>
	<description>This paper proposes and investigates two novel worldwide non-invasive, low-cost, online automatic diagnostic technologies for conveyor belt looseness by motor current signature analysis. Belt looseness causes impulsive transient spikes due to intermittent belt&amp;amp;ndash;motor engagement, which are captured and essentially enhanced using spectral kurtosis (SK). Two diagnostic technologies are as follows: Cross-Correlations of Spectral Moduli of orders three and four to extract supply frequency harmonic cross-correlations from SK-filtered current signals, and Consolidated Spectral Kurtosis, a band-independent technology, which enables effective diagnosis by summing essential spectral kurtosis values across the entire frequency range. Comprehensive experimental trials on an industrial grain belt conveyor system demonstrate that the proposed technologies are effective for conveyor belt looseness diagnosis. The Cross-Correlations of Spectral Moduli technologies achieved a maximum total probability of correct diagnosis value of 98%. The Consolidated Spectral Kurtosis technology captures overall impulsive energy across the whole frequency range, achieving a maximum total probability of correct diagnosis value of 99.6%. This study highlights the diagnostic effectiveness and computational efficiency of the proposed technologies for the reliable diagnosis of conveyor belt looseness. Experimental comparison of the proposed technologies is undertaken.</description>
	<pubDate>2026-04-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 214: Novel Technologies for Diagnosis of Conveyor Belt Looseness via Motor Current Signature Analysis</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/214">doi: 10.3390/technologies14040214</a></p>
	<p>Authors:
		Len Gelman
		Debanjan Mondal
		Dean Wright
		</p>
	<p>This paper proposes and investigates two novel worldwide non-invasive, low-cost, online automatic diagnostic technologies for conveyor belt looseness by motor current signature analysis. Belt looseness causes impulsive transient spikes due to intermittent belt&amp;amp;ndash;motor engagement, which are captured and essentially enhanced using spectral kurtosis (SK). Two diagnostic technologies are as follows: Cross-Correlations of Spectral Moduli of orders three and four to extract supply frequency harmonic cross-correlations from SK-filtered current signals, and Consolidated Spectral Kurtosis, a band-independent technology, which enables effective diagnosis by summing essential spectral kurtosis values across the entire frequency range. Comprehensive experimental trials on an industrial grain belt conveyor system demonstrate that the proposed technologies are effective for conveyor belt looseness diagnosis. The Cross-Correlations of Spectral Moduli technologies achieved a maximum total probability of correct diagnosis value of 98%. The Consolidated Spectral Kurtosis technology captures overall impulsive energy across the whole frequency range, achieving a maximum total probability of correct diagnosis value of 99.6%. This study highlights the diagnostic effectiveness and computational efficiency of the proposed technologies for the reliable diagnosis of conveyor belt looseness. Experimental comparison of the proposed technologies is undertaken.</p>
	]]></content:encoded>

	<dc:title>Novel Technologies for Diagnosis of Conveyor Belt Looseness via Motor Current Signature Analysis</dc:title>
			<dc:creator>Len Gelman</dc:creator>
			<dc:creator>Debanjan Mondal</dc:creator>
			<dc:creator>Dean Wright</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040214</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-07</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-07</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>214</prism:startingPage>
		<prism:doi>10.3390/technologies14040214</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/214</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/213">

	<title>Technologies, Vol. 14, Pages 213: Vibration Response Analysis Method for an Underground Pedestrian Passage Crossing a Subway Tunnel and Orthogonally Sharing a Slab with a Vehicle Tunnel</title>
	<link>https://www.mdpi.com/2227-7080/14/4/213</link>
	<description>With the rapid urbanization in China, the spatial interaction between newly constructed underground structures and existing transportation tunnels has become increasingly frequent and complex. However, studies on the dynamic response characteristics of underground pedestrian passages subjected to the combined effects of metro- and vehicle-induced vibrations remain relatively limited. This study takes the newly constructed underground pedestrian passage at Want Want Hospital in Hunan Province as the engineering background. The pedestrian passage features a unique structural configuration, in which it is jointly constructed with an overlying vehicular tunnel through a shared slab and simultaneously crosses above an existing metro tunnel. To explore the vibration research methods for this unique structure, a three-dimensional finite element model was developed using ABAQUS and validated through in situ vibration measurements. Based on the validated model, the dynamic response of the pedestrian passage was systematically investigated from two perspectives: traffic loading conditions and shared slab thickness. The results show that metro-induced loads dominate the vibration response of the pedestrian passage. Bidirectional (reversible) train operation produces significantly greater vibration levels than unidirectional operation, and the Z-direction vibration level increases with train speed, with local exceedances occurring at 80 km/h. Under vehicle loading, the vibration response of the passage exhibits a non-monotonic trend, first increasing and then decreasing within the speed range of 30&amp;amp;ndash;40 km/h. When metro and vehicle loads act simultaneously, the vibration level is further amplified and exceeds the allowable limit. In addition, a pronounced vibration energy concentration zone is identified on the pedestrian passage bottom slab directly beneath the tunnel sidewalls, highlighting the necessity for targeted vibration mitigation in this region. Parametric analysis demonstrates that appropriately increasing the thickness of the vehicular tunnel bottom slab does not effectively reduce the vibration response. The findings of this study provide a reliable numerical analysis framework and practical design guidance for vibration control of complex overlapping underground structures in urban environments.</description>
	<pubDate>2026-04-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 213: Vibration Response Analysis Method for an Underground Pedestrian Passage Crossing a Subway Tunnel and Orthogonally Sharing a Slab with a Vehicle Tunnel</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/213">doi: 10.3390/technologies14040213</a></p>
	<p>Authors:
		Shuquan Peng
		Yue Li
		Ling Fan
		Zangnan Yu
		Feixiang Xie
		Yan Zhou
		</p>
	<p>With the rapid urbanization in China, the spatial interaction between newly constructed underground structures and existing transportation tunnels has become increasingly frequent and complex. However, studies on the dynamic response characteristics of underground pedestrian passages subjected to the combined effects of metro- and vehicle-induced vibrations remain relatively limited. This study takes the newly constructed underground pedestrian passage at Want Want Hospital in Hunan Province as the engineering background. The pedestrian passage features a unique structural configuration, in which it is jointly constructed with an overlying vehicular tunnel through a shared slab and simultaneously crosses above an existing metro tunnel. To explore the vibration research methods for this unique structure, a three-dimensional finite element model was developed using ABAQUS and validated through in situ vibration measurements. Based on the validated model, the dynamic response of the pedestrian passage was systematically investigated from two perspectives: traffic loading conditions and shared slab thickness. The results show that metro-induced loads dominate the vibration response of the pedestrian passage. Bidirectional (reversible) train operation produces significantly greater vibration levels than unidirectional operation, and the Z-direction vibration level increases with train speed, with local exceedances occurring at 80 km/h. Under vehicle loading, the vibration response of the passage exhibits a non-monotonic trend, first increasing and then decreasing within the speed range of 30&amp;amp;ndash;40 km/h. When metro and vehicle loads act simultaneously, the vibration level is further amplified and exceeds the allowable limit. In addition, a pronounced vibration energy concentration zone is identified on the pedestrian passage bottom slab directly beneath the tunnel sidewalls, highlighting the necessity for targeted vibration mitigation in this region. Parametric analysis demonstrates that appropriately increasing the thickness of the vehicular tunnel bottom slab does not effectively reduce the vibration response. The findings of this study provide a reliable numerical analysis framework and practical design guidance for vibration control of complex overlapping underground structures in urban environments.</p>
	]]></content:encoded>

	<dc:title>Vibration Response Analysis Method for an Underground Pedestrian Passage Crossing a Subway Tunnel and Orthogonally Sharing a Slab with a Vehicle Tunnel</dc:title>
			<dc:creator>Shuquan Peng</dc:creator>
			<dc:creator>Yue Li</dc:creator>
			<dc:creator>Ling Fan</dc:creator>
			<dc:creator>Zangnan Yu</dc:creator>
			<dc:creator>Feixiang Xie</dc:creator>
			<dc:creator>Yan Zhou</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040213</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-05</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-05</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>213</prism:startingPage>
		<prism:doi>10.3390/technologies14040213</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/213</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/212">

	<title>Technologies, Vol. 14, Pages 212: Integrating Model Explainability and Uncertainty Quantification for Trustworthy Fraud Detection</title>
	<link>https://www.mdpi.com/2227-7080/14/4/212</link>
	<description>Financial fraud and money laundering continue to challenge financial stability and regulatory oversight, motivating the widespread adoption of machine learning models for transaction monitoring. Although ensemble models such as Random Forest and XGBoost achieve strong predictive performance, their deployment in high-stakes financial environments is constrained by limited interpretability, overconfident predictions, and the absence of principled mechanisms for expressing decision uncertainty. Emerging regulatory expectations increasingly emphasise transparency, accountability, and operational reliability, underscoring the need for evaluation frameworks that extend beyond predictive accuracy. This study proposes the Integrated Transparency and Confidence Framework (ITCF), a deployment-oriented approach that unifies model explainability, statistically valid uncertainty quantification, and operational decision support for fraud detection. ITCF combines instance-level explanations generated via Local Interpretable Model-Agnostic Explanations (LIME) with distribution-free uncertainty estimation using split conformal prediction. The framework incorporates selective explainability, abstention-based routing, and uncertainty-driven triage to support human-in-the-loop review. Using the PaySim dataset of 6,362,620 mobile-money transactions, Random Forest and XGBoost models are evaluated under extreme class imbalance using F1-score, AUC&amp;amp;ndash;ROC, and Matthews Correlation Coefficient (MCC). At a target coverage level of 90% (&amp;amp;alpha;=0.1), both models achieve empirical coverage close to the target level, with XGBoost producing smaller prediction sets and superior recall, MCC, and latency. ITCF provides transaction-level explanations for uncertain cases and specifies an auditable workflow that is intended to support transparency, traceability, and risk-aware human review, thereby enabling defensible human decision-making in regulated environments. Overall, this study illustrates how explainability and uncertainty quantification can be combined in a deployment-oriented evaluation workflow while noting that real-world validation remains a future endeavour.</description>
	<pubDate>2026-04-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 212: Integrating Model Explainability and Uncertainty Quantification for Trustworthy Fraud Detection</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/212">doi: 10.3390/technologies14040212</a></p>
	<p>Authors:
		Tebogo Forster Mapaila
		Makhamisa Senekane
		</p>
	<p>Financial fraud and money laundering continue to challenge financial stability and regulatory oversight, motivating the widespread adoption of machine learning models for transaction monitoring. Although ensemble models such as Random Forest and XGBoost achieve strong predictive performance, their deployment in high-stakes financial environments is constrained by limited interpretability, overconfident predictions, and the absence of principled mechanisms for expressing decision uncertainty. Emerging regulatory expectations increasingly emphasise transparency, accountability, and operational reliability, underscoring the need for evaluation frameworks that extend beyond predictive accuracy. This study proposes the Integrated Transparency and Confidence Framework (ITCF), a deployment-oriented approach that unifies model explainability, statistically valid uncertainty quantification, and operational decision support for fraud detection. ITCF combines instance-level explanations generated via Local Interpretable Model-Agnostic Explanations (LIME) with distribution-free uncertainty estimation using split conformal prediction. The framework incorporates selective explainability, abstention-based routing, and uncertainty-driven triage to support human-in-the-loop review. Using the PaySim dataset of 6,362,620 mobile-money transactions, Random Forest and XGBoost models are evaluated under extreme class imbalance using F1-score, AUC&amp;amp;ndash;ROC, and Matthews Correlation Coefficient (MCC). At a target coverage level of 90% (&amp;amp;alpha;=0.1), both models achieve empirical coverage close to the target level, with XGBoost producing smaller prediction sets and superior recall, MCC, and latency. ITCF provides transaction-level explanations for uncertain cases and specifies an auditable workflow that is intended to support transparency, traceability, and risk-aware human review, thereby enabling defensible human decision-making in regulated environments. Overall, this study illustrates how explainability and uncertainty quantification can be combined in a deployment-oriented evaluation workflow while noting that real-world validation remains a future endeavour.</p>
	]]></content:encoded>

	<dc:title>Integrating Model Explainability and Uncertainty Quantification for Trustworthy Fraud Detection</dc:title>
			<dc:creator>Tebogo Forster Mapaila</dc:creator>
			<dc:creator>Makhamisa Senekane</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040212</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-03</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-03</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>212</prism:startingPage>
		<prism:doi>10.3390/technologies14040212</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/212</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/211">

	<title>Technologies, Vol. 14, Pages 211: A Lightweight Python Recovery Tool for Waveform Gap Recovery in Seismic&amp;ndash;Volcanic Monitoring Networks</title>
	<link>https://www.mdpi.com/2227-7080/14/4/211</link>
	<description>Seismic&amp;amp;ndash;volcanic monitoring networks often operate in remote areas over heterogeneous links (e.g., microwave radio and cellular). During event-driven seismic episodes, sustained multi-station waveform streams can stress both last-mile connectivity and data acquisition systems, yielding discontinuities in center-side archives even when stations keep recording locally. This paper presents the Python Recovery Tool (PRT), a lightweight command-line artifact that retrieves buffered waveform files after reconnection and rebuilds daily archives that can be ingested by the monitoring center without hardware upgrades. PRT detects archive gaps from daily (Julian day) file partitions and embedded timestamps, and reduces recovery traffic by selectively fetching only the files needed to backfill missing intervals. We evaluated PRT on five event-driven recovery cases using operational file-based evidence from station and center listings complemented with a simple bandwidth-based recovery-time model. Across the cases, PRT restored archive continuity while reducing download volume by 4.43&amp;amp;ndash;93.75% relative to naive bulk retrieval, with modeled catch-up times ranging from 0.79 to 207.59 min, depending on station-side packaging granularity and bottleneck link capacity. These results support a practical retrofit path to improve archive completeness under constrained links and heterogeneous deployments.</description>
	<pubDate>2026-04-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 211: A Lightweight Python Recovery Tool for Waveform Gap Recovery in Seismic&amp;ndash;Volcanic Monitoring Networks</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/211">doi: 10.3390/technologies14040211</a></p>
	<p>Authors:
		Santiago Arrais
		Paola Nazate-Burgos
		Nathaly Orozco Garzón
		Ángel Leonardo Valdivieso Caraguay
		Luis Urquiza-Aguiar
		</p>
	<p>Seismic&amp;amp;ndash;volcanic monitoring networks often operate in remote areas over heterogeneous links (e.g., microwave radio and cellular). During event-driven seismic episodes, sustained multi-station waveform streams can stress both last-mile connectivity and data acquisition systems, yielding discontinuities in center-side archives even when stations keep recording locally. This paper presents the Python Recovery Tool (PRT), a lightweight command-line artifact that retrieves buffered waveform files after reconnection and rebuilds daily archives that can be ingested by the monitoring center without hardware upgrades. PRT detects archive gaps from daily (Julian day) file partitions and embedded timestamps, and reduces recovery traffic by selectively fetching only the files needed to backfill missing intervals. We evaluated PRT on five event-driven recovery cases using operational file-based evidence from station and center listings complemented with a simple bandwidth-based recovery-time model. Across the cases, PRT restored archive continuity while reducing download volume by 4.43&amp;amp;ndash;93.75% relative to naive bulk retrieval, with modeled catch-up times ranging from 0.79 to 207.59 min, depending on station-side packaging granularity and bottleneck link capacity. These results support a practical retrofit path to improve archive completeness under constrained links and heterogeneous deployments.</p>
	]]></content:encoded>

	<dc:title>A Lightweight Python Recovery Tool for Waveform Gap Recovery in Seismic&amp;amp;ndash;Volcanic Monitoring Networks</dc:title>
			<dc:creator>Santiago Arrais</dc:creator>
			<dc:creator>Paola Nazate-Burgos</dc:creator>
			<dc:creator>Nathaly Orozco Garzón</dc:creator>
			<dc:creator>Ángel Leonardo Valdivieso Caraguay</dc:creator>
			<dc:creator>Luis Urquiza-Aguiar</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040211</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-02</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>211</prism:startingPage>
		<prism:doi>10.3390/technologies14040211</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/211</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/210">

	<title>Technologies, Vol. 14, Pages 210: Motion Planning and Control of Mobile Manipulators for Grasping-on-the-Move Tasks</title>
	<link>https://www.mdpi.com/2227-7080/14/4/210</link>
	<description>Currently, most mobile manipulators employ a &amp;amp;ldquo;Stop-and-Grasp&amp;amp;rdquo; strategy, where the base of the manipulator stops before the arm executes the grasp. However, achieving &amp;amp;ldquo;Grasping-on-the-Move&amp;amp;rdquo; actions&amp;amp;mdash;where the robot grasps a target while the base is in motion&amp;amp;mdash;remains a significant challenge due to the coupling of base and arm dynamics. To address this, we propose a two-phase collaborative motion planning framework. In the first phase (long-range approach), we introduce a spatially constrained visual servoing (SC-VS) method. By establishing a dynamic safety corridor based on the chassis path, this method ensures robust target tracking and obstacle avoidance for the arm during base motion. In the second phase (close-range grasping), to seize the brief grasping opportunity, we propose a Constrained-Sampling RRT-Connect (CSR-RRT-Connect) algorithm. By restricting the sampling region based on target prediction, this algorithm significantly reduces planning time. Comparative experiments demonstrate that our method achieves a 92% success rate at a base speed of 0.3 m/s, significantly outperforming the 46% success rate of baseline methods, while exhibiting superior robustness against dynamic operational disturbances and perception noise.</description>
	<pubDate>2026-04-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 210: Motion Planning and Control of Mobile Manipulators for Grasping-on-the-Move Tasks</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/210">doi: 10.3390/technologies14040210</a></p>
	<p>Authors:
		Zegang Sun
		Shanlin Zuo
		Qiang Jiang
		Peng Zhang
		Jiping Yu
		</p>
	<p>Currently, most mobile manipulators employ a &amp;amp;ldquo;Stop-and-Grasp&amp;amp;rdquo; strategy, where the base of the manipulator stops before the arm executes the grasp. However, achieving &amp;amp;ldquo;Grasping-on-the-Move&amp;amp;rdquo; actions&amp;amp;mdash;where the robot grasps a target while the base is in motion&amp;amp;mdash;remains a significant challenge due to the coupling of base and arm dynamics. To address this, we propose a two-phase collaborative motion planning framework. In the first phase (long-range approach), we introduce a spatially constrained visual servoing (SC-VS) method. By establishing a dynamic safety corridor based on the chassis path, this method ensures robust target tracking and obstacle avoidance for the arm during base motion. In the second phase (close-range grasping), to seize the brief grasping opportunity, we propose a Constrained-Sampling RRT-Connect (CSR-RRT-Connect) algorithm. By restricting the sampling region based on target prediction, this algorithm significantly reduces planning time. Comparative experiments demonstrate that our method achieves a 92% success rate at a base speed of 0.3 m/s, significantly outperforming the 46% success rate of baseline methods, while exhibiting superior robustness against dynamic operational disturbances and perception noise.</p>
	]]></content:encoded>

	<dc:title>Motion Planning and Control of Mobile Manipulators for Grasping-on-the-Move Tasks</dc:title>
			<dc:creator>Zegang Sun</dc:creator>
			<dc:creator>Shanlin Zuo</dc:creator>
			<dc:creator>Qiang Jiang</dc:creator>
			<dc:creator>Peng Zhang</dc:creator>
			<dc:creator>Jiping Yu</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040210</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-04-02</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-04-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>210</prism:startingPage>
		<prism:doi>10.3390/technologies14040210</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/210</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/209">

	<title>Technologies, Vol. 14, Pages 209: EEG-Based Emotion Dynamics Recognition Using Hybrid AI Models for Cybersecurity</title>
	<link>https://www.mdpi.com/2227-7080/14/4/209</link>
	<description>The effectiveness of social engineering schemes, such as phishing, depends significantly on the victim&amp;amp;rsquo;s emotional state, which is intentionally moved by the attacker toward fear, sadness, and disgust through time pressure, threats, or messages about potential losses, which weaken cognitive control. EEG datasets that simultaneously contain basic emotions and realistic phishing scenarios are lacking. Therefore, in some cases, stress-based biophysiological datasets obtained using the Trier Social Stress Test (TSST) are used for neurophishing modeling. The TSST exhibits phasic dynamics: a transition from a neutral state to a peak in fear, followed by an increase in sadness and a partial recovery to a neutral state, highlighting fear and sadness as key components of social stress. The interval of maximum fear probability is interpreted as the window of greatest vulnerability to phishing, when it is critical to consciously pause, verify information across independent channels, and avoid impulsive actions. The suggested hybrid neural network model, WS-KAN-EEGNet, is trained on five emotions and applied to these recordings, generating temporal trajectories of state probabilities with high accuracy, forming a reliable basis for future industrial solutions to ensure a secure digital space.</description>
	<pubDate>2026-03-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 209: EEG-Based Emotion Dynamics Recognition Using Hybrid AI Models for Cybersecurity</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/209">doi: 10.3390/technologies14040209</a></p>
	<p>Authors:
		Ekaterina Pleshakova
		Aleksey Osipov
		Alexander Yudin
		Sergey Gataullin
		</p>
	<p>The effectiveness of social engineering schemes, such as phishing, depends significantly on the victim&amp;amp;rsquo;s emotional state, which is intentionally moved by the attacker toward fear, sadness, and disgust through time pressure, threats, or messages about potential losses, which weaken cognitive control. EEG datasets that simultaneously contain basic emotions and realistic phishing scenarios are lacking. Therefore, in some cases, stress-based biophysiological datasets obtained using the Trier Social Stress Test (TSST) are used for neurophishing modeling. The TSST exhibits phasic dynamics: a transition from a neutral state to a peak in fear, followed by an increase in sadness and a partial recovery to a neutral state, highlighting fear and sadness as key components of social stress. The interval of maximum fear probability is interpreted as the window of greatest vulnerability to phishing, when it is critical to consciously pause, verify information across independent channels, and avoid impulsive actions. The suggested hybrid neural network model, WS-KAN-EEGNet, is trained on five emotions and applied to these recordings, generating temporal trajectories of state probabilities with high accuracy, forming a reliable basis for future industrial solutions to ensure a secure digital space.</p>
	]]></content:encoded>

	<dc:title>EEG-Based Emotion Dynamics Recognition Using Hybrid AI Models for Cybersecurity</dc:title>
			<dc:creator>Ekaterina Pleshakova</dc:creator>
			<dc:creator>Aleksey Osipov</dc:creator>
			<dc:creator>Alexander Yudin</dc:creator>
			<dc:creator>Sergey Gataullin</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040209</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-31</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-31</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>209</prism:startingPage>
		<prism:doi>10.3390/technologies14040209</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/209</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/208">

	<title>Technologies, Vol. 14, Pages 208: Optimization of the Diamond Roller Dressing Parameters of Grinding Wheels to Improve the Ground Surface Quality</title>
	<link>https://www.mdpi.com/2227-7080/14/4/208</link>
	<description>The quality of ground surfaces depends largely on the topography of the active surface of the grinding wheel, which, in turn, is determined both by the structure of the grinding wheel and by the conditions of the dressing process. This article proposes a new approach to improving the quality of ground surfaces by optimizing the dressing conditions with diamond rollers, based on the correlation between the roughness of the ground surfaces, the roughness of the cutting surface of the grinding wheel, and the parameters of the dressing process. A comprehensive theoretical&amp;amp;ndash;experimental study and modeling of the microgeometry of electrocorundum grinding wheels and the roughness of ground surfaces, depending on the dressing conditions with diamond dressing rolls made of medium- and high-strength synthetic diamonds with a mixed grit size, has been carried out. A complex quality indicator has been defined, determined as the ratio between the roughness of the ground surfaces and the roughness of the cutting surface of the grinding wheels, and models have been constructed for its determination, depending on the dressing conditions. By applying a genetic algorithm, optimal conditions for uni-directional and counter-directional dressing (dressing speed ratio, radial feed rate, the dress-out time and the ratio between the grit sizes of the diamond roller dresser and grinding wheel) have been determined, which ensure a minimum value of the complex quality indicator in combination with minimum roughness of the ground surfaces.</description>
	<pubDate>2026-03-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 208: Optimization of the Diamond Roller Dressing Parameters of Grinding Wheels to Improve the Ground Surface Quality</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/208">doi: 10.3390/technologies14040208</a></p>
	<p>Authors:
		Irina Aleksandrova
		Hristian Mitev
		</p>
	<p>The quality of ground surfaces depends largely on the topography of the active surface of the grinding wheel, which, in turn, is determined both by the structure of the grinding wheel and by the conditions of the dressing process. This article proposes a new approach to improving the quality of ground surfaces by optimizing the dressing conditions with diamond rollers, based on the correlation between the roughness of the ground surfaces, the roughness of the cutting surface of the grinding wheel, and the parameters of the dressing process. A comprehensive theoretical&amp;amp;ndash;experimental study and modeling of the microgeometry of electrocorundum grinding wheels and the roughness of ground surfaces, depending on the dressing conditions with diamond dressing rolls made of medium- and high-strength synthetic diamonds with a mixed grit size, has been carried out. A complex quality indicator has been defined, determined as the ratio between the roughness of the ground surfaces and the roughness of the cutting surface of the grinding wheels, and models have been constructed for its determination, depending on the dressing conditions. By applying a genetic algorithm, optimal conditions for uni-directional and counter-directional dressing (dressing speed ratio, radial feed rate, the dress-out time and the ratio between the grit sizes of the diamond roller dresser and grinding wheel) have been determined, which ensure a minimum value of the complex quality indicator in combination with minimum roughness of the ground surfaces.</p>
	]]></content:encoded>

	<dc:title>Optimization of the Diamond Roller Dressing Parameters of Grinding Wheels to Improve the Ground Surface Quality</dc:title>
			<dc:creator>Irina Aleksandrova</dc:creator>
			<dc:creator>Hristian Mitev</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040208</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-31</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-31</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>208</prism:startingPage>
		<prism:doi>10.3390/technologies14040208</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/208</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/207">

	<title>Technologies, Vol. 14, Pages 207: A Lightweight Feature-Grouped Gated Fusion Network for Parkinson&amp;rsquo;s Disease Gait Screening Using Force-Plate GRFs</title>
	<link>https://www.mdpi.com/2227-7080/14/4/207</link>
	<description>Parkinson&amp;amp;rsquo;s disease (PD) is associated with characteristic gait impairment, motivating objective screening methods based on biomechanical signals. This study presents a lightweight, physics-informed framework for PD gait screening using ground reaction force (GRF) signals acquired from force plates, together with a prototype acquisition-and-analysis system for practical screening workflows. Continuous GRF recordings are segmented into complete gait cycles, from which bilateral physics-informed features are constructed, including normalized force, dynamics-derived acceleration and velocity, and friction-related descriptors. The resulting feature tensor is then standardized and used as input to a Feature-Grouped Gated Fusion Network (FGGF-Net). The proposed model separately encodes force&amp;amp;ndash;acceleration features and velocity&amp;amp;ndash;ratio features using low-order nonlinear and linear pathways, respectively, and integrates them via gated fusion with a residual baseline pathway. Under subject-wise five-fold cross-validation, FGGF-Net achieves strong subject-level performance, reaching 94.8% accuracy, 92.9% F1-score, and 0.974 AUC, while consistently outperforming representative baselines. Ablation studies further verify the effectiveness of feature grouping and gated fusion. In addition, the trained model remains compact (1.09 M parameters, 4.16 MB) and supports millisecond-level CPU inference, making the proposed framework promising for practical force-plate screening workflows.</description>
	<pubDate>2026-03-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 207: A Lightweight Feature-Grouped Gated Fusion Network for Parkinson&amp;rsquo;s Disease Gait Screening Using Force-Plate GRFs</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/207">doi: 10.3390/technologies14040207</a></p>
	<p>Authors:
		Jinxuan Wang
		Hua Huo
		Chen Zhang
		</p>
	<p>Parkinson&amp;amp;rsquo;s disease (PD) is associated with characteristic gait impairment, motivating objective screening methods based on biomechanical signals. This study presents a lightweight, physics-informed framework for PD gait screening using ground reaction force (GRF) signals acquired from force plates, together with a prototype acquisition-and-analysis system for practical screening workflows. Continuous GRF recordings are segmented into complete gait cycles, from which bilateral physics-informed features are constructed, including normalized force, dynamics-derived acceleration and velocity, and friction-related descriptors. The resulting feature tensor is then standardized and used as input to a Feature-Grouped Gated Fusion Network (FGGF-Net). The proposed model separately encodes force&amp;amp;ndash;acceleration features and velocity&amp;amp;ndash;ratio features using low-order nonlinear and linear pathways, respectively, and integrates them via gated fusion with a residual baseline pathway. Under subject-wise five-fold cross-validation, FGGF-Net achieves strong subject-level performance, reaching 94.8% accuracy, 92.9% F1-score, and 0.974 AUC, while consistently outperforming representative baselines. Ablation studies further verify the effectiveness of feature grouping and gated fusion. In addition, the trained model remains compact (1.09 M parameters, 4.16 MB) and supports millisecond-level CPU inference, making the proposed framework promising for practical force-plate screening workflows.</p>
	]]></content:encoded>

	<dc:title>A Lightweight Feature-Grouped Gated Fusion Network for Parkinson&amp;amp;rsquo;s Disease Gait Screening Using Force-Plate GRFs</dc:title>
			<dc:creator>Jinxuan Wang</dc:creator>
			<dc:creator>Hua Huo</dc:creator>
			<dc:creator>Chen Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040207</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-31</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-31</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>207</prism:startingPage>
		<prism:doi>10.3390/technologies14040207</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/207</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/206">

	<title>Technologies, Vol. 14, Pages 206: Use of Machine Learning for Solar Power Generation Prediction in the Field of Alternative Renewable Energy Sources</title>
	<link>https://www.mdpi.com/2227-7080/14/4/206</link>
	<description>This study focused on the application of supervised learning in the field of renewable energy, specifically for predicting daily solar irradiance in Neiva, department of Huila, Colombia. To this end, decision tree and artificial neural network (DT and ANN, respectively) models were trained and tested using the online tool Google Colab. The main objective was based on the need to optimize energy planning processes at local and regional levels, motivated by the increase in demand for the integration of non-conventional energy sources and the spatial&amp;amp;ndash;temporal variability in solar resources in the country. A dataset consisting of 366 daily records for the year 2024 was obtained from the NASA POWER database at the geographic coordinates (2.930079, &amp;amp;minus;75.255650) and used for training and evaluating the proposed models. Statistical and cleaning techniques were used, including the treatment of outliers using the moving-window median for the latter. Metrics, such as mean absolute error (MAE), root mean square error (RMSE), and coefficient of determination (R2), were used to evaluate the models. Data inclusion and exclusion criteria were applied to ensure the quality and validity of the observations. Model performance was evaluated using a randomized Hold-Out validation strategy (90% training and 10% testing), which was repeated across multiple iterations. The performance metrics reported corresponded to the 10th iteration of the validation process after outlier treatment. Under this configuration, the DT model achieved a higher predictive performance (R2 = 0.8882) compared with the ANN model (R2 = 0.7679), demonstrating its effectiveness as a reliable approach for estimating daily solar irradiance under the studied conditions. This result was also confirmed by the decreased MAE and RMSE for the DT model, which indicated that this model performed better in predicting the real values than the ANN model. Finally, the added value of the study is to consolidate national evidence and open access tools to facilitate the development of sustainable energy policies in intermediate cities such as Neiva.</description>
	<pubDate>2026-03-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 206: Use of Machine Learning for Solar Power Generation Prediction in the Field of Alternative Renewable Energy Sources</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/206">doi: 10.3390/technologies14040206</a></p>
	<p>Authors:
		Juan D. Parra-Quintero
		Daniel Ovalle-Cerquera
		Edwin Chica
		Ainhoa Rubio-Clemente
		</p>
	<p>This study focused on the application of supervised learning in the field of renewable energy, specifically for predicting daily solar irradiance in Neiva, department of Huila, Colombia. To this end, decision tree and artificial neural network (DT and ANN, respectively) models were trained and tested using the online tool Google Colab. The main objective was based on the need to optimize energy planning processes at local and regional levels, motivated by the increase in demand for the integration of non-conventional energy sources and the spatial&amp;amp;ndash;temporal variability in solar resources in the country. A dataset consisting of 366 daily records for the year 2024 was obtained from the NASA POWER database at the geographic coordinates (2.930079, &amp;amp;minus;75.255650) and used for training and evaluating the proposed models. Statistical and cleaning techniques were used, including the treatment of outliers using the moving-window median for the latter. Metrics, such as mean absolute error (MAE), root mean square error (RMSE), and coefficient of determination (R2), were used to evaluate the models. Data inclusion and exclusion criteria were applied to ensure the quality and validity of the observations. Model performance was evaluated using a randomized Hold-Out validation strategy (90% training and 10% testing), which was repeated across multiple iterations. The performance metrics reported corresponded to the 10th iteration of the validation process after outlier treatment. Under this configuration, the DT model achieved a higher predictive performance (R2 = 0.8882) compared with the ANN model (R2 = 0.7679), demonstrating its effectiveness as a reliable approach for estimating daily solar irradiance under the studied conditions. This result was also confirmed by the decreased MAE and RMSE for the DT model, which indicated that this model performed better in predicting the real values than the ANN model. Finally, the added value of the study is to consolidate national evidence and open access tools to facilitate the development of sustainable energy policies in intermediate cities such as Neiva.</p>
	]]></content:encoded>

	<dc:title>Use of Machine Learning for Solar Power Generation Prediction in the Field of Alternative Renewable Energy Sources</dc:title>
			<dc:creator>Juan D. Parra-Quintero</dc:creator>
			<dc:creator>Daniel Ovalle-Cerquera</dc:creator>
			<dc:creator>Edwin Chica</dc:creator>
			<dc:creator>Ainhoa Rubio-Clemente</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040206</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-31</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-31</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>206</prism:startingPage>
		<prism:doi>10.3390/technologies14040206</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/206</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/205">

	<title>Technologies, Vol. 14, Pages 205: Comparative Analysis of Spectrogram-Based Transformations for Acoustic Classification of SMAW Weld Quality Using Machine Learning</title>
	<link>https://www.mdpi.com/2227-7080/14/4/205</link>
	<description>This study evaluates the feasibility of acoustic signal analysis using different spectrographic transformation methods as a tool for assessing the quality of welding beads produced through the Shielded Metal Arc Welding (SMAW) process. Acoustic emissions were recorded during manual welding operations under controlled experimental conditions, using E6013 electrodes on A36 carbon steel plates. From the acoustic recordings of 400 welding samples, previously classified as accepted or rejected, two fundamental acoustic descriptors were extracted: the fundamental frequency (F0) and the harmonic-to-noise ratio (HNR). These were analysed using parametric and non-parametric metrics to evaluate their discriminative capability. In addition, multiple supervised classifiers were trained and validated using stratified eight-fold cross-validation. The proposed framework enables a systematic comparison of different signal transformations and classification models for the evaluation of SMAW welding quality. Among the evaluated models (SVC, Gradient Boosting, and Extra Trees), precision rates of 90&amp;amp;ndash;95% were observed using Spectral Contrast, MEL, and CQT transformations. The results demonstrate that the implementation of various acoustic signal-based models and transformations for welding inspection offers a scalable and cost-effective solution for industrial quality control.</description>
	<pubDate>2026-03-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 205: Comparative Analysis of Spectrogram-Based Transformations for Acoustic Classification of SMAW Weld Quality Using Machine Learning</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/205">doi: 10.3390/technologies14040205</a></p>
	<p>Authors:
		Alejandro García Rodríguez
		Sergio Eduardo Lara Munevar
		Héctor Fabio Montaño Morales
		Christian Camilo Barriga Castellanos
		</p>
	<p>This study evaluates the feasibility of acoustic signal analysis using different spectrographic transformation methods as a tool for assessing the quality of welding beads produced through the Shielded Metal Arc Welding (SMAW) process. Acoustic emissions were recorded during manual welding operations under controlled experimental conditions, using E6013 electrodes on A36 carbon steel plates. From the acoustic recordings of 400 welding samples, previously classified as accepted or rejected, two fundamental acoustic descriptors were extracted: the fundamental frequency (F0) and the harmonic-to-noise ratio (HNR). These were analysed using parametric and non-parametric metrics to evaluate their discriminative capability. In addition, multiple supervised classifiers were trained and validated using stratified eight-fold cross-validation. The proposed framework enables a systematic comparison of different signal transformations and classification models for the evaluation of SMAW welding quality. Among the evaluated models (SVC, Gradient Boosting, and Extra Trees), precision rates of 90&amp;amp;ndash;95% were observed using Spectral Contrast, MEL, and CQT transformations. The results demonstrate that the implementation of various acoustic signal-based models and transformations for welding inspection offers a scalable and cost-effective solution for industrial quality control.</p>
	]]></content:encoded>

	<dc:title>Comparative Analysis of Spectrogram-Based Transformations for Acoustic Classification of SMAW Weld Quality Using Machine Learning</dc:title>
			<dc:creator>Alejandro García Rodríguez</dc:creator>
			<dc:creator>Sergio Eduardo Lara Munevar</dc:creator>
			<dc:creator>Héctor Fabio Montaño Morales</dc:creator>
			<dc:creator>Christian Camilo Barriga Castellanos</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040205</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-31</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-31</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>205</prism:startingPage>
		<prism:doi>10.3390/technologies14040205</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/205</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/204">

	<title>Technologies, Vol. 14, Pages 204: Operational Management of Multi-Vendor Wi Fi Networks in Smart Campus Environments</title>
	<link>https://www.mdpi.com/2227-7080/14/4/204</link>
	<description>Digital transformation in higher education increasingly hinges on the robustness and governability of Information and Communication Technology (ICT) infrastructures, with campus Wi-Fi networks serving as the operational backbone of digital learning, research collaboration, and administrative services. In large universities, these networks typically evolve into heterogeneous, multi-vendor environments, introducing ongoing challenges in monitoring coherence, configuration governance, and cross-platform performance diagnosis. Despite the centrality of these issues, smart campus scholarship has paid limited attention to day-to-day operational management. This study examines the design and operational performance of a dual-platform Wi-Fi network management architecture implemented at Mahasarakham University, Thailand. The architecture strategically integrates SolarWinds and LibreNMS to combine centralized network-wide visibility with fine-grained, device-level diagnostics across a multi-vendor infrastructure. An engineering-oriented mixed-method approach was employed, drawing on production monitoring logs and semi-structured interviews with campus network engineers. Findings indicate that SolarWinds strengthens configuration oversight and campus-level situational awareness, whereas LibreNMS enhances detailed performance analytics and accelerates fault isolation. Their coordinated deployment improves operational stability, diagnostic clarity, and long-term maintainability of campus Wi-Fi systems. The study provides practical architectural guidance for managing heterogeneous ICT infrastructures in smart campus and enterprise-scale environments.</description>
	<pubDate>2026-03-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 204: Operational Management of Multi-Vendor Wi Fi Networks in Smart Campus Environments</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/204">doi: 10.3390/technologies14040204</a></p>
	<p>Authors:
		Weerapatr Ta-Armart
		Charuay Savithi
		</p>
	<p>Digital transformation in higher education increasingly hinges on the robustness and governability of Information and Communication Technology (ICT) infrastructures, with campus Wi-Fi networks serving as the operational backbone of digital learning, research collaboration, and administrative services. In large universities, these networks typically evolve into heterogeneous, multi-vendor environments, introducing ongoing challenges in monitoring coherence, configuration governance, and cross-platform performance diagnosis. Despite the centrality of these issues, smart campus scholarship has paid limited attention to day-to-day operational management. This study examines the design and operational performance of a dual-platform Wi-Fi network management architecture implemented at Mahasarakham University, Thailand. The architecture strategically integrates SolarWinds and LibreNMS to combine centralized network-wide visibility with fine-grained, device-level diagnostics across a multi-vendor infrastructure. An engineering-oriented mixed-method approach was employed, drawing on production monitoring logs and semi-structured interviews with campus network engineers. Findings indicate that SolarWinds strengthens configuration oversight and campus-level situational awareness, whereas LibreNMS enhances detailed performance analytics and accelerates fault isolation. Their coordinated deployment improves operational stability, diagnostic clarity, and long-term maintainability of campus Wi-Fi systems. The study provides practical architectural guidance for managing heterogeneous ICT infrastructures in smart campus and enterprise-scale environments.</p>
	]]></content:encoded>

	<dc:title>Operational Management of Multi-Vendor Wi Fi Networks in Smart Campus Environments</dc:title>
			<dc:creator>Weerapatr Ta-Armart</dc:creator>
			<dc:creator>Charuay Savithi</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040204</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-30</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-30</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>204</prism:startingPage>
		<prism:doi>10.3390/technologies14040204</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/204</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/203">

	<title>Technologies, Vol. 14, Pages 203: Engineering Optimisation of Combined Soil Preparation for Ridge-Based Peanut Production and Residue Biodegradation</title>
	<link>https://www.mdpi.com/2227-7080/14/4/203</link>
	<description>Sustainable ridge-based peanut production following winter wheat requires soil preparation technologies capable of simultaneously ensuring precise ridge formation, reduced energy consumption and efficient in situ utilisation of crop residues. This study aimed to develop and experimentally validate a combined soil preparation technology integrating shallow tillage, deep loosening and ridge formation within a single field pass, and to quantify its technological and biological performance. Field experiments were conducted using a prototype combined machine with analytically justified geometric parameters of the working tools, followed by multifactor optimisation and statistical modelling. Technological performance was assessed by soil fragmentation degree and draft resistance, while biological effects were evaluated using residue incorporation (Pz), biodegradation coefficient after 60 days (k60) and dehydrogenase activity after 30 days (DHA30). The results showed statistically significant nonlinear relationships between tool parameters and technological responses, with coefficients of determination exceeding 0.94 for soil fragmentation and 0.97 for draft resistance. The proposed technology increased residue incorporation efficiency by 15&amp;amp;ndash;20%, enhanced biodegradation intensity (k60) by up to 18%, and reduced energy consumption due to single-pass operation compared with conventional multi-pass systems. A strong relationship between Pz and biological indicators confirmed the key role of residue placement in controlling microbial processes. These findings demonstrate that integrated control of soil processing and residue placement enables energy-efficient single-pass technologies for ridge-based peanut production systems.</description>
	<pubDate>2026-03-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 203: Engineering Optimisation of Combined Soil Preparation for Ridge-Based Peanut Production and Residue Biodegradation</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/203">doi: 10.3390/technologies14040203</a></p>
	<p>Authors:
		Farmon M. Mamatov
		Fakhriddin U. Karshiev
		Nargiza B. Ravshanova
		Sanjar Zh. Toshtemirov
		Uchkun Kodirov
		Nurbek Sh. Rashidov
		Golib D. Shodmonov
		Nodir I. Saidov
		Mokhichekhra F. Begimkulova
		Allamurod Ismatov
		</p>
	<p>Sustainable ridge-based peanut production following winter wheat requires soil preparation technologies capable of simultaneously ensuring precise ridge formation, reduced energy consumption and efficient in situ utilisation of crop residues. This study aimed to develop and experimentally validate a combined soil preparation technology integrating shallow tillage, deep loosening and ridge formation within a single field pass, and to quantify its technological and biological performance. Field experiments were conducted using a prototype combined machine with analytically justified geometric parameters of the working tools, followed by multifactor optimisation and statistical modelling. Technological performance was assessed by soil fragmentation degree and draft resistance, while biological effects were evaluated using residue incorporation (Pz), biodegradation coefficient after 60 days (k60) and dehydrogenase activity after 30 days (DHA30). The results showed statistically significant nonlinear relationships between tool parameters and technological responses, with coefficients of determination exceeding 0.94 for soil fragmentation and 0.97 for draft resistance. The proposed technology increased residue incorporation efficiency by 15&amp;amp;ndash;20%, enhanced biodegradation intensity (k60) by up to 18%, and reduced energy consumption due to single-pass operation compared with conventional multi-pass systems. A strong relationship between Pz and biological indicators confirmed the key role of residue placement in controlling microbial processes. These findings demonstrate that integrated control of soil processing and residue placement enables energy-efficient single-pass technologies for ridge-based peanut production systems.</p>
	]]></content:encoded>

	<dc:title>Engineering Optimisation of Combined Soil Preparation for Ridge-Based Peanut Production and Residue Biodegradation</dc:title>
			<dc:creator>Farmon M. Mamatov</dc:creator>
			<dc:creator>Fakhriddin U. Karshiev</dc:creator>
			<dc:creator>Nargiza B. Ravshanova</dc:creator>
			<dc:creator>Sanjar Zh. Toshtemirov</dc:creator>
			<dc:creator>Uchkun Kodirov</dc:creator>
			<dc:creator>Nurbek Sh. Rashidov</dc:creator>
			<dc:creator>Golib D. Shodmonov</dc:creator>
			<dc:creator>Nodir I. Saidov</dc:creator>
			<dc:creator>Mokhichekhra F. Begimkulova</dc:creator>
			<dc:creator>Allamurod Ismatov</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040203</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-29</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-29</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>203</prism:startingPage>
		<prism:doi>10.3390/technologies14040203</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/203</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/202">

	<title>Technologies, Vol. 14, Pages 202: Benchmarking MARL for UAV-Assisted Mobile Edge Computing Under Realistic 3D Collision Avoidance Navigation Constraints for Periodic Task Offloading</title>
	<link>https://www.mdpi.com/2227-7080/14/4/202</link>
	<description>The rapid growth of Internet of Things (IoT) and Industrial IoT applications has intensified the demand for low-latency and reliable computation support for deadline-constrained periodic real-time tasks. While unmanned aerial vehicles (UAVs) enabling mobile edge computing (MEC) can reduce latency by bringing compute closer to data sources, terrestrial MEC deployments often suffer from limited coverage and poor adaptability to spatially heterogeneous demand. In this paper, we study a multiple-UAV-assisted MEC system serving cluster-based IoT networks, where cluster heads generate deadline-constrained periodic tasks for offloading under strict deadlines. To ensure practical feasibility in dense urban environments, we benchmark UAV mobility using a realistic 3D collision avoidance navigation graph with shortest-path execution, rather than assuming unconstrained continuous UAV motion in free space. On top of this benchmark, we systematically compare three multi-agent reinforcement learning (MARL) paradigms for joint navigation and periodic task offloading: (i) continuous 3D control MARL that outputs motion commands directly; (ii) discrete graph-based MARL that selects collision-free shortest paths; and (iii) asynchronous macro-action MARL. Using a high-fidelity 3D digital twin of San Francisco, we evaluate these paradigms under a unified protocol in terms of offloading success, end-to-end latency, and energy consumption. The results reveal clear performance trade-offs induced by realistic 3D collision avoidance constraints and provide actionable insights for designing UAV-assisted MEC systems supporting periodic real-time task offloading.</description>
	<pubDate>2026-03-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 202: Benchmarking MARL for UAV-Assisted Mobile Edge Computing Under Realistic 3D Collision Avoidance Navigation Constraints for Periodic Task Offloading</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/202">doi: 10.3390/technologies14040202</a></p>
	<p>Authors:
		Jiacheng Gu
		Qingxu Meng
		Qiurui Sun
		Bing Zhu
		Songnan Zhao
		Shaode Yu
		</p>
	<p>The rapid growth of Internet of Things (IoT) and Industrial IoT applications has intensified the demand for low-latency and reliable computation support for deadline-constrained periodic real-time tasks. While unmanned aerial vehicles (UAVs) enabling mobile edge computing (MEC) can reduce latency by bringing compute closer to data sources, terrestrial MEC deployments often suffer from limited coverage and poor adaptability to spatially heterogeneous demand. In this paper, we study a multiple-UAV-assisted MEC system serving cluster-based IoT networks, where cluster heads generate deadline-constrained periodic tasks for offloading under strict deadlines. To ensure practical feasibility in dense urban environments, we benchmark UAV mobility using a realistic 3D collision avoidance navigation graph with shortest-path execution, rather than assuming unconstrained continuous UAV motion in free space. On top of this benchmark, we systematically compare three multi-agent reinforcement learning (MARL) paradigms for joint navigation and periodic task offloading: (i) continuous 3D control MARL that outputs motion commands directly; (ii) discrete graph-based MARL that selects collision-free shortest paths; and (iii) asynchronous macro-action MARL. Using a high-fidelity 3D digital twin of San Francisco, we evaluate these paradigms under a unified protocol in terms of offloading success, end-to-end latency, and energy consumption. The results reveal clear performance trade-offs induced by realistic 3D collision avoidance constraints and provide actionable insights for designing UAV-assisted MEC systems supporting periodic real-time task offloading.</p>
	]]></content:encoded>

	<dc:title>Benchmarking MARL for UAV-Assisted Mobile Edge Computing Under Realistic 3D Collision Avoidance Navigation Constraints for Periodic Task Offloading</dc:title>
			<dc:creator>Jiacheng Gu</dc:creator>
			<dc:creator>Qingxu Meng</dc:creator>
			<dc:creator>Qiurui Sun</dc:creator>
			<dc:creator>Bing Zhu</dc:creator>
			<dc:creator>Songnan Zhao</dc:creator>
			<dc:creator>Shaode Yu</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040202</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-27</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-27</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>202</prism:startingPage>
		<prism:doi>10.3390/technologies14040202</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/202</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/201">

	<title>Technologies, Vol. 14, Pages 201: Dual-Polarized Beam-Steerable Filtering Patch Antenna</title>
	<link>https://www.mdpi.com/2227-7080/14/4/201</link>
	<description>A compact dual-polarized beam-steerable patch antennas with filtering characteristics is proposed in this paper. By digging two orthogonal coupling slots on the ground plate, dual polarization is achieved while ensuring the isolation between the ports. By constructing properly arranged parallel microstrip resonators and open-circuited stubs, the effect of suppressing a broad stopband is produced. The beam steering characteristic is accomplished through the integration of a driven patch antenna with two dual-element metallic walls, each incorporating PIN diodes for electronic tuning. A prototype antenna has been fabricated to substantiate the efficacy of the proposed methodology. The simulated and measured results agree well, demonstrating good performance in terms of impedance bandwidth, stopband suppression, isolation and beam-steering capability. Under six radiation states, the proposed antenna operates from 2.3 GHz to 2.5 GHz with isolation exceeding 20 dB. Additionally, the antenna gain remains below &amp;amp;minus;10 dBi over the 2.6 GHz to 10 GHz band, achieving out-of-band suppression greater than 15.8 dB within the wide stopband. When port 1 is excited, the antenna generates three distinct radiation patterns, enabling beam scanning at 0&amp;amp;deg; and &amp;amp;plusmn;30&amp;amp;deg; in the yoz plane. Similarly, exciting port 2 yields three radiation patterns, allowing beam scanning at 0&amp;amp;deg; and &amp;amp;plusmn;30&amp;amp;deg; in the xoz plane. This work presents the first integration of dual-polarized, beam-steering, and filtering characteristics into a single compact antenna.</description>
	<pubDate>2026-03-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 201: Dual-Polarized Beam-Steerable Filtering Patch Antenna</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/201">doi: 10.3390/technologies14040201</a></p>
	<p>Authors:
		Tian-Gui Huang
		Zheng Gan
		Kai-Ran Xiang
		Wen-Feng Zeng
		Fu-Chang Chen
		</p>
	<p>A compact dual-polarized beam-steerable patch antennas with filtering characteristics is proposed in this paper. By digging two orthogonal coupling slots on the ground plate, dual polarization is achieved while ensuring the isolation between the ports. By constructing properly arranged parallel microstrip resonators and open-circuited stubs, the effect of suppressing a broad stopband is produced. The beam steering characteristic is accomplished through the integration of a driven patch antenna with two dual-element metallic walls, each incorporating PIN diodes for electronic tuning. A prototype antenna has been fabricated to substantiate the efficacy of the proposed methodology. The simulated and measured results agree well, demonstrating good performance in terms of impedance bandwidth, stopband suppression, isolation and beam-steering capability. Under six radiation states, the proposed antenna operates from 2.3 GHz to 2.5 GHz with isolation exceeding 20 dB. Additionally, the antenna gain remains below &amp;amp;minus;10 dBi over the 2.6 GHz to 10 GHz band, achieving out-of-band suppression greater than 15.8 dB within the wide stopband. When port 1 is excited, the antenna generates three distinct radiation patterns, enabling beam scanning at 0&amp;amp;deg; and &amp;amp;plusmn;30&amp;amp;deg; in the yoz plane. Similarly, exciting port 2 yields three radiation patterns, allowing beam scanning at 0&amp;amp;deg; and &amp;amp;plusmn;30&amp;amp;deg; in the xoz plane. This work presents the first integration of dual-polarized, beam-steering, and filtering characteristics into a single compact antenna.</p>
	]]></content:encoded>

	<dc:title>Dual-Polarized Beam-Steerable Filtering Patch Antenna</dc:title>
			<dc:creator>Tian-Gui Huang</dc:creator>
			<dc:creator>Zheng Gan</dc:creator>
			<dc:creator>Kai-Ran Xiang</dc:creator>
			<dc:creator>Wen-Feng Zeng</dc:creator>
			<dc:creator>Fu-Chang Chen</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040201</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-27</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-27</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>201</prism:startingPage>
		<prism:doi>10.3390/technologies14040201</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/201</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/200">

	<title>Technologies, Vol. 14, Pages 200: Intelligent and Automated Technologies for Textile Recycling Pre-Processing: A Systematic Literature Review</title>
	<link>https://www.mdpi.com/2227-7080/14/4/200</link>
	<description>Textile-to-textile recycling is strongly constrained by upstream pre-processing, where post-consumer clothing must be identified, separated, and prepared under high variability in materials, appearance, and contamination. This paper presents a Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA)-guided systematic literature review of intelligent and automated technologies for textile recycling pre-processing covering the interval between 2015 to 2025. After screening and quality assessment, 21 primary studies published between 2020 and 2025 were included. The literature is synthesized across three task families: (i) identificationof fiber/material, composition, or color; (ii) sorting, considered only when explicit separation strategies are defined to operationalize identification outcomes into routing actions or output streams; and (iii) contaminant detection and/or removal, targeting non-recyclable items. Results show that identification dominates the field (19/21 studies), supported by Red&amp;amp;ndash;Green&amp;amp;ndash;Blue (RGB) and red&amp;amp;ndash;green&amp;amp;ndash;blue plus depth (RGB-D) imaging and material-signature sensing, including near-infrared (NIR) spectroscopy, hyperspectral imaging (HSI), and Raman spectroscopy. In contrast, sorting as a defined separation stage is less frequent (4/21), and contaminant-related automation remains sparse (3/21). Most studies are validated in laboratory conditions, with limited semi-industrial evidence, highlighting a persistent perception-to-action gap. Overall, the review indicates that robust separation strategies, representative datasets, and end-to-end system integration remain key bottlenecks for scalable automated textile recycling pre-processing.</description>
	<pubDate>2026-03-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 200: Intelligent and Automated Technologies for Textile Recycling Pre-Processing: A Systematic Literature Review</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/200">doi: 10.3390/technologies14040200</a></p>
	<p>Authors:
		Daniel Lopes
		Eduardo J. Solteiro Pires
		Vítor Filipe
		Manuel F. Silva
		Luís F. Rocha
		</p>
	<p>Textile-to-textile recycling is strongly constrained by upstream pre-processing, where post-consumer clothing must be identified, separated, and prepared under high variability in materials, appearance, and contamination. This paper presents a Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA)-guided systematic literature review of intelligent and automated technologies for textile recycling pre-processing covering the interval between 2015 to 2025. After screening and quality assessment, 21 primary studies published between 2020 and 2025 were included. The literature is synthesized across three task families: (i) identificationof fiber/material, composition, or color; (ii) sorting, considered only when explicit separation strategies are defined to operationalize identification outcomes into routing actions or output streams; and (iii) contaminant detection and/or removal, targeting non-recyclable items. Results show that identification dominates the field (19/21 studies), supported by Red&amp;amp;ndash;Green&amp;amp;ndash;Blue (RGB) and red&amp;amp;ndash;green&amp;amp;ndash;blue plus depth (RGB-D) imaging and material-signature sensing, including near-infrared (NIR) spectroscopy, hyperspectral imaging (HSI), and Raman spectroscopy. In contrast, sorting as a defined separation stage is less frequent (4/21), and contaminant-related automation remains sparse (3/21). Most studies are validated in laboratory conditions, with limited semi-industrial evidence, highlighting a persistent perception-to-action gap. Overall, the review indicates that robust separation strategies, representative datasets, and end-to-end system integration remain key bottlenecks for scalable automated textile recycling pre-processing.</p>
	]]></content:encoded>

	<dc:title>Intelligent and Automated Technologies for Textile Recycling Pre-Processing: A Systematic Literature Review</dc:title>
			<dc:creator>Daniel Lopes</dc:creator>
			<dc:creator>Eduardo J. Solteiro Pires</dc:creator>
			<dc:creator>Vítor Filipe</dc:creator>
			<dc:creator>Manuel F. Silva</dc:creator>
			<dc:creator>Luís F. Rocha</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040200</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-27</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-27</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>200</prism:startingPage>
		<prism:doi>10.3390/technologies14040200</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/200</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/199">

	<title>Technologies, Vol. 14, Pages 199: A Bi-LSTM Attention Mechanism for Monitoring Seismic Events&amp;mdash;Solving the Issue of Noise &amp;amp; Interpretability</title>
	<link>https://www.mdpi.com/2227-7080/14/4/199</link>
	<description>The nonlinearity and the extreme variability of seismic signals makes the detection of earthquakes difficult. Although the conventional deep-learning models can be used to extract useful features, they cannot be used in early-warning systems due to their non-interpretability. In this study, a Bidirectional Long Short-Memory network with an attention system (Bi-LSTM-Attn) is proposed to detect seismic events using the ConvNetQuake dataset. To improve the quality of data, the entire preprocessing pipeline, such as signal filtering, segmentation, normalization, and noise reduction is employed. The model was optimized using hyperparameter tuning of sequence length, learning rates, and attention weighting to achieve the best number of true-positive detections and a minimum number of false alarms. The accuracy, precision and recall, F1-score, MSE, and ROC curves were used to assess the performance and the attention weight visualization allowed interpreting the model. It is proven through experiments that the Bi-LSTM-Attn model provides more credible and comprehensible forecasting in relation to baseline LSTM and GRU models. Making the high-accuracy classification and the transparent decision behavior, the approach will increase the level of trust to the automated seismic surveillance, as well as help to build the reliable global networks of earthquake early-warnings.</description>
	<pubDate>2026-03-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 199: A Bi-LSTM Attention Mechanism for Monitoring Seismic Events&amp;mdash;Solving the Issue of Noise &amp;amp; Interpretability</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/199">doi: 10.3390/technologies14040199</a></p>
	<p>Authors:
		Nimra Iqbal
		Izzatdin Bin Abdul Aziz
		Muhammad Faisal Raza
		</p>
	<p>The nonlinearity and the extreme variability of seismic signals makes the detection of earthquakes difficult. Although the conventional deep-learning models can be used to extract useful features, they cannot be used in early-warning systems due to their non-interpretability. In this study, a Bidirectional Long Short-Memory network with an attention system (Bi-LSTM-Attn) is proposed to detect seismic events using the ConvNetQuake dataset. To improve the quality of data, the entire preprocessing pipeline, such as signal filtering, segmentation, normalization, and noise reduction is employed. The model was optimized using hyperparameter tuning of sequence length, learning rates, and attention weighting to achieve the best number of true-positive detections and a minimum number of false alarms. The accuracy, precision and recall, F1-score, MSE, and ROC curves were used to assess the performance and the attention weight visualization allowed interpreting the model. It is proven through experiments that the Bi-LSTM-Attn model provides more credible and comprehensible forecasting in relation to baseline LSTM and GRU models. Making the high-accuracy classification and the transparent decision behavior, the approach will increase the level of trust to the automated seismic surveillance, as well as help to build the reliable global networks of earthquake early-warnings.</p>
	]]></content:encoded>

	<dc:title>A Bi-LSTM Attention Mechanism for Monitoring Seismic Events&amp;amp;mdash;Solving the Issue of Noise &amp;amp;amp; Interpretability</dc:title>
			<dc:creator>Nimra Iqbal</dc:creator>
			<dc:creator>Izzatdin Bin Abdul Aziz</dc:creator>
			<dc:creator>Muhammad Faisal Raza</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040199</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-26</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-26</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>199</prism:startingPage>
		<prism:doi>10.3390/technologies14040199</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/199</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/198">

	<title>Technologies, Vol. 14, Pages 198: Beyond Short-Frame Acoustic Features: Capturing Long-Term Speech Patterns for Depression Detection</title>
	<link>https://www.mdpi.com/2227-7080/14/4/198</link>
	<description>Speech-based depression detection is promising for objective mental health assessment. However, conventional methods relying on short-frame acoustic features often fail to capture long-term temporal and behavioral characteristics of speech essential for modeling depression-specific speaking patterns. Herein, four novel acoustic feature sets extracted from long-term speech are proposed: utterance interval feature set (UIFS), pause interval feature set (PIFS), response interval feature set (RIFS), and speech density (SD). These features explicitly characterize temporal structures and session-level speech behaviors beyond short-frame analysis. These features are combined with conventional acoustic features, including standard features extracted using openSMILE and voice level features, and evaluated using support vector machines under subject-independent conditions for the binary classification of depressed and nondepressed speakers. Incorporating the proposed features improves classification performance compared with baseline features (accuracy: 0.54 for openSMILE and 0.52 for openSMILE + voice level features). The configuration integrating all four proposed feature sets achieves an accuracy of 0.58, a precision of 0.56, a recall of 0.58, and a specificity of 0.58, indicating consistent performance gains under subject-independent and strictly controlled evaluation conditions. Thus, depression-related speech patterns can be captured by explicitly modeling temporal and behavioral speech characteristics across entire dialog sessions. This study contributes to advancing acoustic feature design for speech-based depression detection and developing clinically supportive screening and monitoring technologies.</description>
	<pubDate>2026-03-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 198: Beyond Short-Frame Acoustic Features: Capturing Long-Term Speech Patterns for Depression Detection</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/198">doi: 10.3390/technologies14040198</a></p>
	<p>Authors:
		Shizuku Fushimi
		Mohammad Aiman Azani
		Mizuto Chiba
		Yoshifumi Okada
		</p>
	<p>Speech-based depression detection is promising for objective mental health assessment. However, conventional methods relying on short-frame acoustic features often fail to capture long-term temporal and behavioral characteristics of speech essential for modeling depression-specific speaking patterns. Herein, four novel acoustic feature sets extracted from long-term speech are proposed: utterance interval feature set (UIFS), pause interval feature set (PIFS), response interval feature set (RIFS), and speech density (SD). These features explicitly characterize temporal structures and session-level speech behaviors beyond short-frame analysis. These features are combined with conventional acoustic features, including standard features extracted using openSMILE and voice level features, and evaluated using support vector machines under subject-independent conditions for the binary classification of depressed and nondepressed speakers. Incorporating the proposed features improves classification performance compared with baseline features (accuracy: 0.54 for openSMILE and 0.52 for openSMILE + voice level features). The configuration integrating all four proposed feature sets achieves an accuracy of 0.58, a precision of 0.56, a recall of 0.58, and a specificity of 0.58, indicating consistent performance gains under subject-independent and strictly controlled evaluation conditions. Thus, depression-related speech patterns can be captured by explicitly modeling temporal and behavioral speech characteristics across entire dialog sessions. This study contributes to advancing acoustic feature design for speech-based depression detection and developing clinically supportive screening and monitoring technologies.</p>
	]]></content:encoded>

	<dc:title>Beyond Short-Frame Acoustic Features: Capturing Long-Term Speech Patterns for Depression Detection</dc:title>
			<dc:creator>Shizuku Fushimi</dc:creator>
			<dc:creator>Mohammad Aiman Azani</dc:creator>
			<dc:creator>Mizuto Chiba</dc:creator>
			<dc:creator>Yoshifumi Okada</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040198</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-25</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-25</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>198</prism:startingPage>
		<prism:doi>10.3390/technologies14040198</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/198</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/197">

	<title>Technologies, Vol. 14, Pages 197: Safety-Enforcing and Occlusion-Aware Camera View Planning for Full-Body Imaging</title>
	<link>https://www.mdpi.com/2227-7080/14/4/197</link>
	<description>Most camera view planning algorithms are employed in exploration tasks that maximise information gain, but few address the specific challenge of observing targeted surface areas with optimal image quality. This paper presents a novel camera view planning algorithm designed for dermoscopic mole mapping, which is crucial for early melanoma detection. Traditional full-body scanners, though beneficial, suffer from fixed camera positions that can compromise image quality due to varying body contours and patient sizes. Our algorithm addresses this limitation by dynamically optimizing the camera position on a set of collaborative robot (cobot) arms to enhance image resolution, safety, and viewing angles during skin examinations. The proposed method formulates the problem as a non-linear least-squares optimisation that ensures no camera occlusion and a safe distance from the end effector encapsulating the camera to the patient while adjusting the pose of the camera based on the topography of the body. This approach not only maintains optimal imaging conditions by considering resolution and angle of incidence but also prioritises patient safety by preventing physical contact between the camera and the patient. Extensive testing demonstrates that our algorithm adapts effectively to different body shapes and sizes, ensuring high-resolution images across various patient demographics. Moreover, the integration of our camera view planning algorithm into an intelligent dermoscopy system has shown promising results in improving the efficiency and geometric quality of dermoscopic image acquisition, which could lead to more reliable and faster diagnoses. This technology holds significant potential to transform melanoma screening and diagnosis, providing a scalable, safer, and more precise approach to dermatological imaging.</description>
	<pubDate>2026-03-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 197: Safety-Enforcing and Occlusion-Aware Camera View Planning for Full-Body Imaging</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/197">doi: 10.3390/technologies14040197</a></p>
	<p>Authors:
		Valerio Franchi
		Ricard Campos
		Josep Quintana
		Nuno Gracias
		Rafael Garcia
		</p>
	<p>Most camera view planning algorithms are employed in exploration tasks that maximise information gain, but few address the specific challenge of observing targeted surface areas with optimal image quality. This paper presents a novel camera view planning algorithm designed for dermoscopic mole mapping, which is crucial for early melanoma detection. Traditional full-body scanners, though beneficial, suffer from fixed camera positions that can compromise image quality due to varying body contours and patient sizes. Our algorithm addresses this limitation by dynamically optimizing the camera position on a set of collaborative robot (cobot) arms to enhance image resolution, safety, and viewing angles during skin examinations. The proposed method formulates the problem as a non-linear least-squares optimisation that ensures no camera occlusion and a safe distance from the end effector encapsulating the camera to the patient while adjusting the pose of the camera based on the topography of the body. This approach not only maintains optimal imaging conditions by considering resolution and angle of incidence but also prioritises patient safety by preventing physical contact between the camera and the patient. Extensive testing demonstrates that our algorithm adapts effectively to different body shapes and sizes, ensuring high-resolution images across various patient demographics. Moreover, the integration of our camera view planning algorithm into an intelligent dermoscopy system has shown promising results in improving the efficiency and geometric quality of dermoscopic image acquisition, which could lead to more reliable and faster diagnoses. This technology holds significant potential to transform melanoma screening and diagnosis, providing a scalable, safer, and more precise approach to dermatological imaging.</p>
	]]></content:encoded>

	<dc:title>Safety-Enforcing and Occlusion-Aware Camera View Planning for Full-Body Imaging</dc:title>
			<dc:creator>Valerio Franchi</dc:creator>
			<dc:creator>Ricard Campos</dc:creator>
			<dc:creator>Josep Quintana</dc:creator>
			<dc:creator>Nuno Gracias</dc:creator>
			<dc:creator>Rafael Garcia</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040197</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-24</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-24</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>197</prism:startingPage>
		<prism:doi>10.3390/technologies14040197</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/197</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/196">

	<title>Technologies, Vol. 14, Pages 196: The Future of Africa&amp;rsquo;s Digitalisation: Evidence from Phillips&amp;ndash;Sul Convergence Clubbing and Predictive ML Models</title>
	<link>https://www.mdpi.com/2227-7080/14/4/196</link>
	<description>Globalisation, accompanied by the rapid advancement of digital technologies, has become the bedrock of contemporary economies. However, the global digital divide has hindered many economies from enjoying the benefits of enhanced digitalisation. This study addresses the question: to what extent is there evidence of digital convergence or divergence among global economies, and what specific patterns of club clustering exist within the African continent? Employing a quantitative research design with secondary panel data from 123 countries (38 African), the study applies the Phillips and Sul convergence and club clustering algorithm to analyse digitalisation trends. The findings reveal that African countries exhibit significantly stronger within-club convergence dynamics than broader developing country groups, with Africa&amp;amp;rsquo;s adjustment speed (&amp;amp;sigma; = 2.5624) exceeding the Global South average (&amp;amp;sigma; = 0.8394) by more than threefold. This indicates that African nations are following a similar ICT development trajectory and catching up with other global regions at an accelerated rate. However, the study identifies substantial digital inequality within Africa itself, as countries fail to converge to a single steady state, instead forming distinct convergence clubs. These results underscore that digitalisation follows a systematic and continuous process determined by both technological advancement and countries&amp;amp;rsquo; absorptive capacity to adopt these technologies.</description>
	<pubDate>2026-03-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 196: The Future of Africa&amp;rsquo;s Digitalisation: Evidence from Phillips&amp;ndash;Sul Convergence Clubbing and Predictive ML Models</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/196">doi: 10.3390/technologies14040196</a></p>
	<p>Authors:
		Thapelo Chauke
		Olalekan Oladipo David
		Afees Oluwashina Noah
		</p>
	<p>Globalisation, accompanied by the rapid advancement of digital technologies, has become the bedrock of contemporary economies. However, the global digital divide has hindered many economies from enjoying the benefits of enhanced digitalisation. This study addresses the question: to what extent is there evidence of digital convergence or divergence among global economies, and what specific patterns of club clustering exist within the African continent? Employing a quantitative research design with secondary panel data from 123 countries (38 African), the study applies the Phillips and Sul convergence and club clustering algorithm to analyse digitalisation trends. The findings reveal that African countries exhibit significantly stronger within-club convergence dynamics than broader developing country groups, with Africa&amp;amp;rsquo;s adjustment speed (&amp;amp;sigma; = 2.5624) exceeding the Global South average (&amp;amp;sigma; = 0.8394) by more than threefold. This indicates that African nations are following a similar ICT development trajectory and catching up with other global regions at an accelerated rate. However, the study identifies substantial digital inequality within Africa itself, as countries fail to converge to a single steady state, instead forming distinct convergence clubs. These results underscore that digitalisation follows a systematic and continuous process determined by both technological advancement and countries&amp;amp;rsquo; absorptive capacity to adopt these technologies.</p>
	]]></content:encoded>

	<dc:title>The Future of Africa&amp;amp;rsquo;s Digitalisation: Evidence from Phillips&amp;amp;ndash;Sul Convergence Clubbing and Predictive ML Models</dc:title>
			<dc:creator>Thapelo Chauke</dc:creator>
			<dc:creator>Olalekan Oladipo David</dc:creator>
			<dc:creator>Afees Oluwashina Noah</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040196</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-24</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-24</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>196</prism:startingPage>
		<prism:doi>10.3390/technologies14040196</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/196</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/195">

	<title>Technologies, Vol. 14, Pages 195: High-Precision Force Tracking Under Uncertainty: A Fuzzy-Adaptive Sliding-Mode Impedance Control Approach</title>
	<link>https://www.mdpi.com/2227-7080/14/4/195</link>
	<description>Achieving high-precision force tracking in robotic physical interaction remains challenging in the presence of environmental and dynamic model uncertainties. Conventional impedance control strategies often exhibit excessive force overshoot at contact onset and persistent steady-state errors under uncertain or time-varying interaction conditions. To overcome these limitations, this paper proposes a fuzzy-adaptive sliding-mode impedance control approach. During the initial contact phase, a tracking differentiator (TD) is employed to generate a smooth and dynamically feasible force reference, effectively suppressing impulsive force transients without requiring explicit contact detection. Furthermore, a fuzzy-logic-modulated adaptive law is developed to adjust online the adaptation gains of the impedance controller, thereby asymptotically eliminating steady-state tracking errors while preserving Lyapunov stability. In addition, a composite PD&amp;amp;ndash;suboptimal sliding-mode control law is embedded within the impedance loop to enhance robustness against external disturbances while ensuring continuous, chattering-free control action. The proposed architecture requires no prior knowledge of environmental stiffness and is provably robust to model inaccuracies and unstructured disturbance. Simulation and experimental results conducted on a 6-DOF robotic manipulator demonstrate that, under realistic uncertain contact scenarios and in comparison with three benchmark methods, the proposed approach reduces overshoot by 26%, shortens settling time by 30%, and decreases steady-state error by 48%.</description>
	<pubDate>2026-03-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 195: High-Precision Force Tracking Under Uncertainty: A Fuzzy-Adaptive Sliding-Mode Impedance Control Approach</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/195">doi: 10.3390/technologies14040195</a></p>
	<p>Authors:
		Zengpeng Lu
		Jiarui Li
		Jianlei Fan
		Xirui Fan
		</p>
	<p>Achieving high-precision force tracking in robotic physical interaction remains challenging in the presence of environmental and dynamic model uncertainties. Conventional impedance control strategies often exhibit excessive force overshoot at contact onset and persistent steady-state errors under uncertain or time-varying interaction conditions. To overcome these limitations, this paper proposes a fuzzy-adaptive sliding-mode impedance control approach. During the initial contact phase, a tracking differentiator (TD) is employed to generate a smooth and dynamically feasible force reference, effectively suppressing impulsive force transients without requiring explicit contact detection. Furthermore, a fuzzy-logic-modulated adaptive law is developed to adjust online the adaptation gains of the impedance controller, thereby asymptotically eliminating steady-state tracking errors while preserving Lyapunov stability. In addition, a composite PD&amp;amp;ndash;suboptimal sliding-mode control law is embedded within the impedance loop to enhance robustness against external disturbances while ensuring continuous, chattering-free control action. The proposed architecture requires no prior knowledge of environmental stiffness and is provably robust to model inaccuracies and unstructured disturbance. Simulation and experimental results conducted on a 6-DOF robotic manipulator demonstrate that, under realistic uncertain contact scenarios and in comparison with three benchmark methods, the proposed approach reduces overshoot by 26%, shortens settling time by 30%, and decreases steady-state error by 48%.</p>
	]]></content:encoded>

	<dc:title>High-Precision Force Tracking Under Uncertainty: A Fuzzy-Adaptive Sliding-Mode Impedance Control Approach</dc:title>
			<dc:creator>Zengpeng Lu</dc:creator>
			<dc:creator>Jiarui Li</dc:creator>
			<dc:creator>Jianlei Fan</dc:creator>
			<dc:creator>Xirui Fan</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040195</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-24</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-24</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>195</prism:startingPage>
		<prism:doi>10.3390/technologies14040195</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/195</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/4/194">

	<title>Technologies, Vol. 14, Pages 194: Integrating Machine Learning and Business Intelligence into Supply Chain Risk Management for a Comprehensive Cybersecurity Framework: A Systematic Literature Review</title>
	<link>https://www.mdpi.com/2227-7080/14/4/194</link>
	<description>Supply chain cybersecurity is a growing concern for businesses as they utilize increasingly interconnected digital systems. This systematic literature review examines how machine learning (ML) and business intelligence (BI) may be used in conjunctions to improve supply chain cyber security risk management. This review followed PRISMA guidelines. A quality evaluation was performed based on CASP to evaluate 35 peer-reviewed articles published in 2016&amp;amp;ndash;2025. The review analysis indicates that although ML has been extensively utilized for threat detection, BI utilization is fragmented. Additionally, there is a lack of integrated ML-BI frameworks, specifically for small&amp;amp;ndash;medium enterprises (SMEs) and developing economies. As such, this literature review provides a conceptual four-layer framework of predictive and analytical capabilities for threat detection, risk assessment, and decision-making. It also identifies a structured research agenda with which to advance the field of research.</description>
	<pubDate>2026-03-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 194: Integrating Machine Learning and Business Intelligence into Supply Chain Risk Management for a Comprehensive Cybersecurity Framework: A Systematic Literature Review</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/4/194">doi: 10.3390/technologies14040194</a></p>
	<p>Authors:
		Rasha Aljaafreh
		Firas Al-Doghman
		Farookh Hussain
		Fazlullah Khan
		Ali Aljaafreh
		</p>
	<p>Supply chain cybersecurity is a growing concern for businesses as they utilize increasingly interconnected digital systems. This systematic literature review examines how machine learning (ML) and business intelligence (BI) may be used in conjunctions to improve supply chain cyber security risk management. This review followed PRISMA guidelines. A quality evaluation was performed based on CASP to evaluate 35 peer-reviewed articles published in 2016&amp;amp;ndash;2025. The review analysis indicates that although ML has been extensively utilized for threat detection, BI utilization is fragmented. Additionally, there is a lack of integrated ML-BI frameworks, specifically for small&amp;amp;ndash;medium enterprises (SMEs) and developing economies. As such, this literature review provides a conceptual four-layer framework of predictive and analytical capabilities for threat detection, risk assessment, and decision-making. It also identifies a structured research agenda with which to advance the field of research.</p>
	]]></content:encoded>

	<dc:title>Integrating Machine Learning and Business Intelligence into Supply Chain Risk Management for a Comprehensive Cybersecurity Framework: A Systematic Literature Review</dc:title>
			<dc:creator>Rasha Aljaafreh</dc:creator>
			<dc:creator>Firas Al-Doghman</dc:creator>
			<dc:creator>Farookh Hussain</dc:creator>
			<dc:creator>Fazlullah Khan</dc:creator>
			<dc:creator>Ali Aljaafreh</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14040194</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-24</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-24</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>194</prism:startingPage>
		<prism:doi>10.3390/technologies14040194</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/4/194</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/3/193">

	<title>Technologies, Vol. 14, Pages 193: Adopting MOD-API in a Modern Dataset Catalog Platform: Opportunities, Challenges and Limitations</title>
	<link>https://www.mdpi.com/2227-7080/14/3/193</link>
	<description>As data exploitation continues to demonstrate its value, ontologies, thesauri, and other semantic datasets are increasingly recognized for enabling semantically meaningful data integration across disparate domains. With the proliferation of dataset catalogs, the MOD ontology (Metadata for Ontology Description and publication) was adopted, and an associated API was developed to support the future European Open Science Cloud (EOSC). Their aim is to harmonize catalogs of semantic datasets with respect to metadata vocabularies and access mechanisms, thereby ensuring compliance with the FAIR principles. Within an implementation action involving developers of prominent dataset catalogs, we were selected to integrate the MOD-API into ShowVoc, our platform for publishing and consuming ontologies, thesauri, lexicons, and other Semantic Web datasets. However, ShowVoc already relied on an expressive metadata model, the MDR (acronym for &amp;amp;ldquo;Metadata Registry&amp;amp;rdquo;), named after the component responsible for managing the platform&amp;amp;rsquo;s internal catalog. Due to precise dissemination requirements, the MDR provides multiple abstraction levels and detailed specifications concerning the distributions and formats in which a dataset may be made available. In this article, we report on the challenges that we faced and the trade-offs that we made while reconciling these metadata models, highlighting limitations in the current MOD standard that may inform future enhancements.</description>
	<pubDate>2026-03-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 193: Adopting MOD-API in a Modern Dataset Catalog Platform: Opportunities, Challenges and Limitations</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/3/193">doi: 10.3390/technologies14030193</a></p>
	<p>Authors:
		Manuel Fiorelli
		Paolo Bocciarelli
		Armando Stellato
		</p>
	<p>As data exploitation continues to demonstrate its value, ontologies, thesauri, and other semantic datasets are increasingly recognized for enabling semantically meaningful data integration across disparate domains. With the proliferation of dataset catalogs, the MOD ontology (Metadata for Ontology Description and publication) was adopted, and an associated API was developed to support the future European Open Science Cloud (EOSC). Their aim is to harmonize catalogs of semantic datasets with respect to metadata vocabularies and access mechanisms, thereby ensuring compliance with the FAIR principles. Within an implementation action involving developers of prominent dataset catalogs, we were selected to integrate the MOD-API into ShowVoc, our platform for publishing and consuming ontologies, thesauri, lexicons, and other Semantic Web datasets. However, ShowVoc already relied on an expressive metadata model, the MDR (acronym for &amp;amp;ldquo;Metadata Registry&amp;amp;rdquo;), named after the component responsible for managing the platform&amp;amp;rsquo;s internal catalog. Due to precise dissemination requirements, the MDR provides multiple abstraction levels and detailed specifications concerning the distributions and formats in which a dataset may be made available. In this article, we report on the challenges that we faced and the trade-offs that we made while reconciling these metadata models, highlighting limitations in the current MOD standard that may inform future enhancements.</p>
	]]></content:encoded>

	<dc:title>Adopting MOD-API in a Modern Dataset Catalog Platform: Opportunities, Challenges and Limitations</dc:title>
			<dc:creator>Manuel Fiorelli</dc:creator>
			<dc:creator>Paolo Bocciarelli</dc:creator>
			<dc:creator>Armando Stellato</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14030193</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-23</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-23</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>193</prism:startingPage>
		<prism:doi>10.3390/technologies14030193</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/3/193</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/3/192">

	<title>Technologies, Vol. 14, Pages 192: WASO as a Stage-Resolved Window for Detectable HRV Differences in Paradoxical Insomnia</title>
	<link>https://www.mdpi.com/2227-7080/14/3/192</link>
	<description>Paradoxical insomnia (PI) is characterized by a discrepancy between subjective sleep complaints and objectively preserved sleep, yet its autonomic mechanisms remain poorly understood. This study examined stage-specific autonomic characteristics of PI using heart rate variability (HRV) analyses in a large population-based cohort. HRV features were extracted from non-overlapping five-minute windows across non-rapid eye movement (NREM) sleep, rapid eye movement (REM) sleep, and wake after sleep onset (WASO). Group differences were evaluated using FDR-corrected univariate analysis, multivariate embedding, and supervised machine learning. Whole-night, NREM, and REM features showed substantial overlap among groups. In contrast, the most consistent between-group differences emerged during WASO. Multivariate analysis showed the greatest group displacement during WASO, with UMAP centroid distances exceeding those observed during NREM and REM sleep. Supervised models trained on WASO-specific features achieved the highest classification performance, yielding an accuracy of 0.629 and an F1-score of 0.683 for PI versus normal sleep. Taken together, these findings suggest that WASO is the stage in which between-group HRV differences are most consistently detectable across complementary analyses, although several dispersion-based findings were substantially influenced by WASO window count.</description>
	<pubDate>2026-03-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 192: WASO as a Stage-Resolved Window for Detectable HRV Differences in Paradoxical Insomnia</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/3/192">doi: 10.3390/technologies14030192</a></p>
	<p>Authors:
		Ye Eun Kong
		A Hyun Jung
		Se Dong Min
		</p>
	<p>Paradoxical insomnia (PI) is characterized by a discrepancy between subjective sleep complaints and objectively preserved sleep, yet its autonomic mechanisms remain poorly understood. This study examined stage-specific autonomic characteristics of PI using heart rate variability (HRV) analyses in a large population-based cohort. HRV features were extracted from non-overlapping five-minute windows across non-rapid eye movement (NREM) sleep, rapid eye movement (REM) sleep, and wake after sleep onset (WASO). Group differences were evaluated using FDR-corrected univariate analysis, multivariate embedding, and supervised machine learning. Whole-night, NREM, and REM features showed substantial overlap among groups. In contrast, the most consistent between-group differences emerged during WASO. Multivariate analysis showed the greatest group displacement during WASO, with UMAP centroid distances exceeding those observed during NREM and REM sleep. Supervised models trained on WASO-specific features achieved the highest classification performance, yielding an accuracy of 0.629 and an F1-score of 0.683 for PI versus normal sleep. Taken together, these findings suggest that WASO is the stage in which between-group HRV differences are most consistently detectable across complementary analyses, although several dispersion-based findings were substantially influenced by WASO window count.</p>
	]]></content:encoded>

	<dc:title>WASO as a Stage-Resolved Window for Detectable HRV Differences in Paradoxical Insomnia</dc:title>
			<dc:creator>Ye Eun Kong</dc:creator>
			<dc:creator>A Hyun Jung</dc:creator>
			<dc:creator>Se Dong Min</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14030192</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-22</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-22</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>192</prism:startingPage>
		<prism:doi>10.3390/technologies14030192</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/3/192</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/3/191">

	<title>Technologies, Vol. 14, Pages 191: Total Variational Indoor Localization Algorithm for Signal Manifolds in the Energy Domain</title>
	<link>https://www.mdpi.com/2227-7080/14/3/191</link>
	<description>To address the topological mismatch between signal space and physical space caused by uneven signal feature distribution in indoor non-line-of-sight and complex topological environments, this paper proposes an indoor positioning algorithm based on Energy-domain Fingerprint Manifold Graph Total Variation (EFM-GTV). To mitigate neighborhood distortion caused by uneven high-dimensional signal feature distribution, a UMAP manifold topology graph construction method based on fuzzy simplicial sets is designed to establish a graph basis consistent with physical space topology. To reduce false matching risks in global search, a physical topology pruning strategy combining Jaccard similarity is proposed, effectively eliminating pseudo-connections. Building upon this foundation, we introduced an optimization model based on graph total variation, reformulating the positioning problem as a graph signal recovery task. This approach effectively overcomes signal fluctuation interference in complex topologies like U-shaped corridors, achieving robust position estimation. Experiments demonstrate that this algorithm effectively leverages manifold structure constraints to correct NLOS errors. On real-world field test datasets, compared to traditional weighted algorithms, the average positioning accuracy improves to 1.4267 m, with maximum positioning error reduced by over 50%, achieving high-precision robust positioning.</description>
	<pubDate>2026-03-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 191: Total Variational Indoor Localization Algorithm for Signal Manifolds in the Energy Domain</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/3/191">doi: 10.3390/technologies14030191</a></p>
	<p>Authors:
		Yunliang Wang
		Ningning Qin
		Shunyuan Sun
		</p>
	<p>To address the topological mismatch between signal space and physical space caused by uneven signal feature distribution in indoor non-line-of-sight and complex topological environments, this paper proposes an indoor positioning algorithm based on Energy-domain Fingerprint Manifold Graph Total Variation (EFM-GTV). To mitigate neighborhood distortion caused by uneven high-dimensional signal feature distribution, a UMAP manifold topology graph construction method based on fuzzy simplicial sets is designed to establish a graph basis consistent with physical space topology. To reduce false matching risks in global search, a physical topology pruning strategy combining Jaccard similarity is proposed, effectively eliminating pseudo-connections. Building upon this foundation, we introduced an optimization model based on graph total variation, reformulating the positioning problem as a graph signal recovery task. This approach effectively overcomes signal fluctuation interference in complex topologies like U-shaped corridors, achieving robust position estimation. Experiments demonstrate that this algorithm effectively leverages manifold structure constraints to correct NLOS errors. On real-world field test datasets, compared to traditional weighted algorithms, the average positioning accuracy improves to 1.4267 m, with maximum positioning error reduced by over 50%, achieving high-precision robust positioning.</p>
	]]></content:encoded>

	<dc:title>Total Variational Indoor Localization Algorithm for Signal Manifolds in the Energy Domain</dc:title>
			<dc:creator>Yunliang Wang</dc:creator>
			<dc:creator>Ningning Qin</dc:creator>
			<dc:creator>Shunyuan Sun</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14030191</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-21</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-21</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>191</prism:startingPage>
		<prism:doi>10.3390/technologies14030191</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/3/191</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/3/190">

	<title>Technologies, Vol. 14, Pages 190: A Sensitivity Study on the Effect of Voxel Human Model Deformation and Radionuclide Accumulation for Internal Dose Assessment in Nuclear Emergency</title>
	<link>https://www.mdpi.com/2227-7080/14/3/190</link>
	<description>Current internal dose assessments in nuclear emergencies rely on static, upright voxel phantoms, often neglecting realistic human postures and physiological factors&amp;amp;mdash;such as breathing rates specific to emergency scenarios&amp;amp;mdash;that influence radionuclide intake and biokinetics. We present a voxel deformation method based on an improved as-rigid-as-possible (ARAP) algorithm incorporating a novel smoothing term to generate anatomically consistent stooping and swivelling models. Coupled with Geant4 Monte Carlo simulations using the full decay spectra of radionuclides relevant to simulated nuclear accident scenarios (i.e., 131I and 137Cs), and incorporating scenario-specific respiratory parameters into activity calculations, our results demonstrate that body posture significantly influences internal dose distributions: for 137Cs, the specific absorbed fraction (SAF) of the liver increases by up to 24.9% in the stooping posture, while swivelling induces variations of up to 15.1%. In contrast, dose metrics for 131I show minimal sensitivity to posture (&amp;amp;lt;5%). These findings highlight the importance of incorporating realistic postures and context-aware physiological parameters into emergency dosimetry. Our method enables behaviorally realistic internal dose reconstruction and provides a robust foundation for integrating human motion and respiratory data into rapid triage and risk assessment protocols.</description>
	<pubDate>2026-03-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 190: A Sensitivity Study on the Effect of Voxel Human Model Deformation and Radionuclide Accumulation for Internal Dose Assessment in Nuclear Emergency</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/3/190">doi: 10.3390/technologies14030190</a></p>
	<p>Authors:
		Chenze He
		Chunhua Chen
		Qing Luo
		Yi Li
		Yuan Cheng
		Liwei Chen
		Fang Ruan
		</p>
	<p>Current internal dose assessments in nuclear emergencies rely on static, upright voxel phantoms, often neglecting realistic human postures and physiological factors&amp;amp;mdash;such as breathing rates specific to emergency scenarios&amp;amp;mdash;that influence radionuclide intake and biokinetics. We present a voxel deformation method based on an improved as-rigid-as-possible (ARAP) algorithm incorporating a novel smoothing term to generate anatomically consistent stooping and swivelling models. Coupled with Geant4 Monte Carlo simulations using the full decay spectra of radionuclides relevant to simulated nuclear accident scenarios (i.e., 131I and 137Cs), and incorporating scenario-specific respiratory parameters into activity calculations, our results demonstrate that body posture significantly influences internal dose distributions: for 137Cs, the specific absorbed fraction (SAF) of the liver increases by up to 24.9% in the stooping posture, while swivelling induces variations of up to 15.1%. In contrast, dose metrics for 131I show minimal sensitivity to posture (&amp;amp;lt;5%). These findings highlight the importance of incorporating realistic postures and context-aware physiological parameters into emergency dosimetry. Our method enables behaviorally realistic internal dose reconstruction and provides a robust foundation for integrating human motion and respiratory data into rapid triage and risk assessment protocols.</p>
	]]></content:encoded>

	<dc:title>A Sensitivity Study on the Effect of Voxel Human Model Deformation and Radionuclide Accumulation for Internal Dose Assessment in Nuclear Emergency</dc:title>
			<dc:creator>Chenze He</dc:creator>
			<dc:creator>Chunhua Chen</dc:creator>
			<dc:creator>Qing Luo</dc:creator>
			<dc:creator>Yi Li</dc:creator>
			<dc:creator>Yuan Cheng</dc:creator>
			<dc:creator>Liwei Chen</dc:creator>
			<dc:creator>Fang Ruan</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14030190</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-21</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-21</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>190</prism:startingPage>
		<prism:doi>10.3390/technologies14030190</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/3/190</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/3/189">

	<title>Technologies, Vol. 14, Pages 189: Design and Pilot Feasibility of a Low-Cost Wearable for Mexican Sign Language in Inclusive Higher Education</title>
	<link>https://www.mdpi.com/2227-7080/14/3/189</link>
	<description>A substantial number of students with hearing impairments are enrolled in higher education, motivating the development of inclusive assistive technologies that reduce communication barriers. This study developed and evaluated a prototype electronic glove that translates Mexican Sign Language (LSM) signs into Spanish text using machine learning. Eight participants (four deaf and four hearing with LSM proficiency) completed four sessions involving 12 signs; three sessions (S1&amp;amp;ndash;S3) were used for model development and one session (T) was held out for evaluation. Models were trained on S1&amp;amp;ndash;S3 and tested on T using a session-level split without window mixing across sessions; therefore, results represent a speaker-dependent, inter-session pilot assessment rather than a speaker-independent generalization test.&amp;amp;nbsp;The glove integrates flex sensors and an inertial measurement unit IMU MPU6050 connected to an ESP32-C3 SuperMini microcontroller. These components were selected due to their low cost, availability, and ease of integration, making them suitable for the development of accessible wearable assistive technologies.&amp;amp;nbsp;Under this protocol, the system achieved a window-level overall test accuracy of 97.0% (95% CI computed at the window level: 96.00&amp;amp;ndash;97.00), with higher performance for the dynamic subset (98.0%) than for the static subset (95.0%), and an algorithmic decision delay of 1.2 s. Usability and acceptance were evaluated using the System Usability Scale (SUS) and a Technology Acceptance Model (TAM)-based questionnaire. The mean SUS score was 50.6 &amp;amp;plusmn; 1.8 (marginal usability), while participants reported positive perceptions across TAM constructs. Overall, findings demonstrate technical feasibility under controlled inter-session conditions and provide a foundation for iterative user-centered refinement, followed by strict speaker-independent validation and classroom deployment studies in future work.</description>
	<pubDate>2026-03-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 189: Design and Pilot Feasibility of a Low-Cost Wearable for Mexican Sign Language in Inclusive Higher Education</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/3/189">doi: 10.3390/technologies14030189</a></p>
	<p>Authors:
		Juan Carlos Ramírez-Vázquez
		Guadalupe Esmeralda Rivera-García
		Marco Antonio Gómez-Guzmán
		Marco Antonio Díaz-Martínez
		Miriam Janet Cervantes-López
		Mariel Abigail Cruz-Nájera
		</p>
	<p>A substantial number of students with hearing impairments are enrolled in higher education, motivating the development of inclusive assistive technologies that reduce communication barriers. This study developed and evaluated a prototype electronic glove that translates Mexican Sign Language (LSM) signs into Spanish text using machine learning. Eight participants (four deaf and four hearing with LSM proficiency) completed four sessions involving 12 signs; three sessions (S1&amp;amp;ndash;S3) were used for model development and one session (T) was held out for evaluation. Models were trained on S1&amp;amp;ndash;S3 and tested on T using a session-level split without window mixing across sessions; therefore, results represent a speaker-dependent, inter-session pilot assessment rather than a speaker-independent generalization test.&amp;amp;nbsp;The glove integrates flex sensors and an inertial measurement unit IMU MPU6050 connected to an ESP32-C3 SuperMini microcontroller. These components were selected due to their low cost, availability, and ease of integration, making them suitable for the development of accessible wearable assistive technologies.&amp;amp;nbsp;Under this protocol, the system achieved a window-level overall test accuracy of 97.0% (95% CI computed at the window level: 96.00&amp;amp;ndash;97.00), with higher performance for the dynamic subset (98.0%) than for the static subset (95.0%), and an algorithmic decision delay of 1.2 s. Usability and acceptance were evaluated using the System Usability Scale (SUS) and a Technology Acceptance Model (TAM)-based questionnaire. The mean SUS score was 50.6 &amp;amp;plusmn; 1.8 (marginal usability), while participants reported positive perceptions across TAM constructs. Overall, findings demonstrate technical feasibility under controlled inter-session conditions and provide a foundation for iterative user-centered refinement, followed by strict speaker-independent validation and classroom deployment studies in future work.</p>
	]]></content:encoded>

	<dc:title>Design and Pilot Feasibility of a Low-Cost Wearable for Mexican Sign Language in Inclusive Higher Education</dc:title>
			<dc:creator>Juan Carlos Ramírez-Vázquez</dc:creator>
			<dc:creator>Guadalupe Esmeralda Rivera-García</dc:creator>
			<dc:creator>Marco Antonio Gómez-Guzmán</dc:creator>
			<dc:creator>Marco Antonio Díaz-Martínez</dc:creator>
			<dc:creator>Miriam Janet Cervantes-López</dc:creator>
			<dc:creator>Mariel Abigail Cruz-Nájera</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14030189</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-20</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-20</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>189</prism:startingPage>
		<prism:doi>10.3390/technologies14030189</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/3/189</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/3/188">

	<title>Technologies, Vol. 14, Pages 188: Affective Intelligent Systems in Healthcare: A Systematic Review</title>
	<link>https://www.mdpi.com/2227-7080/14/3/188</link>
	<description>Objectives: To investigate the current state of affective computing in healthcare, focusing on its application contexts, algorithmic trends, and the technical&amp;amp;ndash;ethical duality involving data privacy and security. Methods and Results: A systematic review was conducted in two phases (2013&amp;amp;ndash;2025) following PRISMA guidelines. A total of 170 peer-reviewed articles were selected from PubMed, IEEE Xplore, Scopus, and Web of Science based on predefined inclusion and exclusion criteria, with the sample restricted to full-text studies in English addressing affective computing in healthcare. No formal risk-of-bias tool was applied due to the computational nature of the studies, and the findings were synthesized descriptively. Discussion: The findings reveal a clear shift from classical machine learning (e.g., SVM, k-NN) toward deep learning and hybrid architectures such as CNN&amp;amp;ndash;LSTM and attention-based models for processing complex physiological signals. Recent years have shown a growing interest in multimodal data fusion and privacy-preserving mechanisms such as homomorphic encryption. Evidence remains limited by methodological heterogeneity and inconsistent reporting across studies. A significant gap persists in regulatory compliance, as 57% of recent publications do not adequately address data security or ethical risks associated with sensitive biometric footprints. Conclusions: Although affective computing has reached a certain level of technical maturity, future research must prioritize lightweight, secure, and privacy-by-design architectures to enable ethically aligned and trustworthy deployment in real-world healthcare scenarios.</description>
	<pubDate>2026-03-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 188: Affective Intelligent Systems in Healthcare: A Systematic Review</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/3/188">doi: 10.3390/technologies14030188</a></p>
	<p>Authors:
		Analúcia Schiaffino Morales
		Thiago de Luca Reis
		Alison R. Panisson
		Fabrício Ourique
		Iwens G. Sene
		</p>
	<p>Objectives: To investigate the current state of affective computing in healthcare, focusing on its application contexts, algorithmic trends, and the technical&amp;amp;ndash;ethical duality involving data privacy and security. Methods and Results: A systematic review was conducted in two phases (2013&amp;amp;ndash;2025) following PRISMA guidelines. A total of 170 peer-reviewed articles were selected from PubMed, IEEE Xplore, Scopus, and Web of Science based on predefined inclusion and exclusion criteria, with the sample restricted to full-text studies in English addressing affective computing in healthcare. No formal risk-of-bias tool was applied due to the computational nature of the studies, and the findings were synthesized descriptively. Discussion: The findings reveal a clear shift from classical machine learning (e.g., SVM, k-NN) toward deep learning and hybrid architectures such as CNN&amp;amp;ndash;LSTM and attention-based models for processing complex physiological signals. Recent years have shown a growing interest in multimodal data fusion and privacy-preserving mechanisms such as homomorphic encryption. Evidence remains limited by methodological heterogeneity and inconsistent reporting across studies. A significant gap persists in regulatory compliance, as 57% of recent publications do not adequately address data security or ethical risks associated with sensitive biometric footprints. Conclusions: Although affective computing has reached a certain level of technical maturity, future research must prioritize lightweight, secure, and privacy-by-design architectures to enable ethically aligned and trustworthy deployment in real-world healthcare scenarios.</p>
	]]></content:encoded>

	<dc:title>Affective Intelligent Systems in Healthcare: A Systematic Review</dc:title>
			<dc:creator>Analúcia Schiaffino Morales</dc:creator>
			<dc:creator>Thiago de Luca Reis</dc:creator>
			<dc:creator>Alison R. Panisson</dc:creator>
			<dc:creator>Fabrício Ourique</dc:creator>
			<dc:creator>Iwens G. Sene</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14030188</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-20</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-20</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>188</prism:startingPage>
		<prism:doi>10.3390/technologies14030188</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/3/188</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/3/187">

	<title>Technologies, Vol. 14, Pages 187: Multipoint Temperature-Based Depth Analysis of a U-Tube Borehole Heat Exchanger</title>
	<link>https://www.mdpi.com/2227-7080/14/3/187</link>
	<description>In ground-source heat-pump (GSHP) systems equipped with a single U-tube borehole heat exchanger (BHE), the heat-carrier fluid in the return leg may release heat to the surrounding ground in the shallow part of the borehole. From a fluid energy balance perspective, this is an exothermic process; however, it is detrimental during heating operation: It lowers the effective source temperature available to the heat pump and therefore degrades the overall coefficient of performance (COP). This study proposes a measurement-driven procedure to determine the exothermic transition depth z&amp;amp;uarr;* from temperature profiles recorded at multiple depths along the ascending (return) pipe. The borehole is discretized into axial segments and, assuming a constant mass flow rate, the linear heat-exchange rate is estimated from the segment-wise enthalpy change. Time integration yields the segment-wise net energy exchange Q&amp;amp;uarr;,i, which is then classified as exothermic or endothermic using an uncertainty-based threshold derived from the standard uncertainty of the temperature sensors. The exothermic transition depth z&amp;amp;uarr;* is defined as the first statistically stable sign change in the integrated segment energy (from exothermic to endothermic) and is obtained by linear interpolation between adjacent segment centres. By summing the exothermic energy exchange and the corresponding average loss power, an equivalent change in source-side outlet temperature &amp;amp;#8710;Tout is estimated and interpreted in terms of COP impact using a Carnot-scaled surrogate model. For two representative operating conditions, z&amp;amp;uarr;* was found at 31.17&amp;amp;nbsp;m and 24.01&amp;amp;nbsp;m, respectively, while the average exothermic loss power remained approximately 0.48 kW. The estimated &amp;amp;#8710;Tout ranged from 0.52 to 0.75&amp;amp;nbsp;K, corresponding to a diagnostic COP improvement if this parasitic exothermic exchange could be mitigated. The present results should therefore be interpreted as a case study-based demonstration of the method on one instrumented borehole rather than as a universal quantitative prediction for other sites or borehole fields.</description>
	<pubDate>2026-03-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 187: Multipoint Temperature-Based Depth Analysis of a U-Tube Borehole Heat Exchanger</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/3/187">doi: 10.3390/technologies14030187</a></p>
	<p>Authors:
		Viktor Zonai
		Laszlo Garbai
		Robert Santa
		</p>
	<p>In ground-source heat-pump (GSHP) systems equipped with a single U-tube borehole heat exchanger (BHE), the heat-carrier fluid in the return leg may release heat to the surrounding ground in the shallow part of the borehole. From a fluid energy balance perspective, this is an exothermic process; however, it is detrimental during heating operation: It lowers the effective source temperature available to the heat pump and therefore degrades the overall coefficient of performance (COP). This study proposes a measurement-driven procedure to determine the exothermic transition depth z&amp;amp;uarr;* from temperature profiles recorded at multiple depths along the ascending (return) pipe. The borehole is discretized into axial segments and, assuming a constant mass flow rate, the linear heat-exchange rate is estimated from the segment-wise enthalpy change. Time integration yields the segment-wise net energy exchange Q&amp;amp;uarr;,i, which is then classified as exothermic or endothermic using an uncertainty-based threshold derived from the standard uncertainty of the temperature sensors. The exothermic transition depth z&amp;amp;uarr;* is defined as the first statistically stable sign change in the integrated segment energy (from exothermic to endothermic) and is obtained by linear interpolation between adjacent segment centres. By summing the exothermic energy exchange and the corresponding average loss power, an equivalent change in source-side outlet temperature &amp;amp;#8710;Tout is estimated and interpreted in terms of COP impact using a Carnot-scaled surrogate model. For two representative operating conditions, z&amp;amp;uarr;* was found at 31.17&amp;amp;nbsp;m and 24.01&amp;amp;nbsp;m, respectively, while the average exothermic loss power remained approximately 0.48 kW. The estimated &amp;amp;#8710;Tout ranged from 0.52 to 0.75&amp;amp;nbsp;K, corresponding to a diagnostic COP improvement if this parasitic exothermic exchange could be mitigated. The present results should therefore be interpreted as a case study-based demonstration of the method on one instrumented borehole rather than as a universal quantitative prediction for other sites or borehole fields.</p>
	]]></content:encoded>

	<dc:title>Multipoint Temperature-Based Depth Analysis of a U-Tube Borehole Heat Exchanger</dc:title>
			<dc:creator>Viktor Zonai</dc:creator>
			<dc:creator>Laszlo Garbai</dc:creator>
			<dc:creator>Robert Santa</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14030187</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-20</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-20</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>187</prism:startingPage>
		<prism:doi>10.3390/technologies14030187</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/3/187</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/3/186">

	<title>Technologies, Vol. 14, Pages 186: Seamlessly Natural: Image Stitching with Natural Appearance Preservation</title>
	<link>https://www.mdpi.com/2227-7080/14/3/186</link>
	<description>Conventional image stitching pipelines predominantly rely on homographic alignment, whose planar assumption often breaks down in dual-camera configurations capturing non-planar scenes, producing geometric warping, bulging, and structural distortion. To address these limitations, this paper presents SENA (Seamlessly Natural), a geometry-driven image stitching approach with three complementary contributions. First, we propose a hierarchical affine-based warping strategy that combines global affine initialization, local affine refinement, and a smooth free-form deformation field regulated by seamguard adaptive smoothing. This multi-scale design preserves local shape, parallelism, and aspect ratios, thereby reducing the hallucinated distortions commonly associated with homography-based models. Second, SENA incorporates a geometry-driven adequate zone detection mechanism that identifies regions with reduced parallax directly from the disparity consistency of correspondences filtered by RANSAC, without relying on semantic segmentation or depth estimation. Third, within this zone, anchor-based seamline cutting and segmentation enforce one-to-one geometric correspondence between image pairs, reducing ghosting and smearing artifacts. Extensive experiments demonstrate that SENA achieves 26.2 dB PSNR and 0.84 SSIM, obtains the lowest BRISQUE score (33.4) among compared methods, and reduces runtime by 79% on average across resolutions. These results confirm improved structural fidelity and computational efficiency while maintaining competitive alignment accuracy.</description>
	<pubDate>2026-03-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 186: Seamlessly Natural: Image Stitching with Natural Appearance Preservation</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/3/186">doi: 10.3390/technologies14030186</a></p>
	<p>Authors:
		Gaetane Lorna N. Tchana
		Damaris Belle M. Fotso
		Antonio Hendricks
		Christophe Bobda
		</p>
	<p>Conventional image stitching pipelines predominantly rely on homographic alignment, whose planar assumption often breaks down in dual-camera configurations capturing non-planar scenes, producing geometric warping, bulging, and structural distortion. To address these limitations, this paper presents SENA (Seamlessly Natural), a geometry-driven image stitching approach with three complementary contributions. First, we propose a hierarchical affine-based warping strategy that combines global affine initialization, local affine refinement, and a smooth free-form deformation field regulated by seamguard adaptive smoothing. This multi-scale design preserves local shape, parallelism, and aspect ratios, thereby reducing the hallucinated distortions commonly associated with homography-based models. Second, SENA incorporates a geometry-driven adequate zone detection mechanism that identifies regions with reduced parallax directly from the disparity consistency of correspondences filtered by RANSAC, without relying on semantic segmentation or depth estimation. Third, within this zone, anchor-based seamline cutting and segmentation enforce one-to-one geometric correspondence between image pairs, reducing ghosting and smearing artifacts. Extensive experiments demonstrate that SENA achieves 26.2 dB PSNR and 0.84 SSIM, obtains the lowest BRISQUE score (33.4) among compared methods, and reduces runtime by 79% on average across resolutions. These results confirm improved structural fidelity and computational efficiency while maintaining competitive alignment accuracy.</p>
	]]></content:encoded>

	<dc:title>Seamlessly Natural: Image Stitching with Natural Appearance Preservation</dc:title>
			<dc:creator>Gaetane Lorna N. Tchana</dc:creator>
			<dc:creator>Damaris Belle M. Fotso</dc:creator>
			<dc:creator>Antonio Hendricks</dc:creator>
			<dc:creator>Christophe Bobda</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14030186</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-19</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-19</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>186</prism:startingPage>
		<prism:doi>10.3390/technologies14030186</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/3/186</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/3/185">

	<title>Technologies, Vol. 14, Pages 185: An Integrated Forecasting and Scheduling Energy Management Framework for Renewable-Supported Grids with Aggregated Electric Vehicles</title>
	<link>https://www.mdpi.com/2227-7080/14/3/185</link>
	<description>The global transition towards sustainable and resilient energy systems has emphasized the need for efficient utilization of renewable energy sources (RESs) and rapid electrification of transportation. However, smart grids must address the intermittency of solar and wind power while accommodating the growing demand from electric vehicles (EVs). Hence, in this paper, a data-driven energy management system (EMS) is proposed that combines multivariable forecasting, generation scheduling, and EV charging coordination in a dual-level decentralized framework to increase the efficiency, reliability, and scalability of modern power grids. First, short-term forecasts of solar irradiance, wind speed, and load demand are addressed via five machine learning models ranging from nonlinear to ensemble models. Accordingly, a unified CatBoost-based platform for forecasting these three variables is selected because of its better performance and accuracy. These forecasts are subsequently utilized in a mixed-integer linear programming (MILP) framework for optimal generation scheduling in the considered network, fulfilling load demand at reduced electricity and emission costs while maintaining grid stability. Finally, a priority-based scheme is proposed for charging/discharging coordination of the aggregated EVs, minimizing demand variability while fulfilling vehicles&amp;amp;rsquo; charging needs and maintaining their batteries&amp;amp;rsquo; lifetime. The superiority of the proposed method lies in integrating a multivariable forecasting pipeline, linear MILP generation scheduling, and battery-health-aware V2G coordination in a unified decoupled framework, unlike many recent frontier works that treat these capabilities independently. Simulation results, under different scenarios, confirm that the proposed intelligent EMS can significantly reduce operational fluctuations, satisfy load and EV demands, optimize RES utilization, and support system cost-effectiveness, sustainability, and resilience.</description>
	<pubDate>2026-03-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 185: An Integrated Forecasting and Scheduling Energy Management Framework for Renewable-Supported Grids with Aggregated Electric Vehicles</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/3/185">doi: 10.3390/technologies14030185</a></p>
	<p>Authors:
		Rania A. Ibrahim
		Ahmed M. Abdelrahim
		Abdelaziz Elwakil
		Nahla E. Zakzouk
		</p>
	<p>The global transition towards sustainable and resilient energy systems has emphasized the need for efficient utilization of renewable energy sources (RESs) and rapid electrification of transportation. However, smart grids must address the intermittency of solar and wind power while accommodating the growing demand from electric vehicles (EVs). Hence, in this paper, a data-driven energy management system (EMS) is proposed that combines multivariable forecasting, generation scheduling, and EV charging coordination in a dual-level decentralized framework to increase the efficiency, reliability, and scalability of modern power grids. First, short-term forecasts of solar irradiance, wind speed, and load demand are addressed via five machine learning models ranging from nonlinear to ensemble models. Accordingly, a unified CatBoost-based platform for forecasting these three variables is selected because of its better performance and accuracy. These forecasts are subsequently utilized in a mixed-integer linear programming (MILP) framework for optimal generation scheduling in the considered network, fulfilling load demand at reduced electricity and emission costs while maintaining grid stability. Finally, a priority-based scheme is proposed for charging/discharging coordination of the aggregated EVs, minimizing demand variability while fulfilling vehicles&amp;amp;rsquo; charging needs and maintaining their batteries&amp;amp;rsquo; lifetime. The superiority of the proposed method lies in integrating a multivariable forecasting pipeline, linear MILP generation scheduling, and battery-health-aware V2G coordination in a unified decoupled framework, unlike many recent frontier works that treat these capabilities independently. Simulation results, under different scenarios, confirm that the proposed intelligent EMS can significantly reduce operational fluctuations, satisfy load and EV demands, optimize RES utilization, and support system cost-effectiveness, sustainability, and resilience.</p>
	]]></content:encoded>

	<dc:title>An Integrated Forecasting and Scheduling Energy Management Framework for Renewable-Supported Grids with Aggregated Electric Vehicles</dc:title>
			<dc:creator>Rania A. Ibrahim</dc:creator>
			<dc:creator>Ahmed M. Abdelrahim</dc:creator>
			<dc:creator>Abdelaziz Elwakil</dc:creator>
			<dc:creator>Nahla E. Zakzouk</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14030185</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-19</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-19</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>185</prism:startingPage>
		<prism:doi>10.3390/technologies14030185</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/3/185</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/3/184">

	<title>Technologies, Vol. 14, Pages 184: Artificial Intelligence in the Diagnosis and Prognosis of Osteosarcoma: A Decade of Progress and Future Directions</title>
	<link>https://www.mdpi.com/2227-7080/14/3/184</link>
	<description>Osteosarcoma is the most frequent cause of primary malignant bone tumors in childhood and adolescence. It is aggressive and may be associated with early metastasis, making patient management difficult. In this research, modern AI models for the diagnosis and prognosis of osteosarcoma were screened and analyzed. Our review searched for articles that used AI for the diagnosis and prognosis of osteosarcoma over the past 10 years, including AI in predicting the staging of tumors, predicting chemotherapy response, identifying prognostic biomarkers and assessing risk of metastasis. The models performed well based on AUC and C-index, with considerable discriminatory power, and were superior to the classical clinical methods analyzed. Through the identification of already existing deficiencies in the literature, this review pointed out a need for future research trends to explore with respect to prospective validation, multimodal data fusion and translation of AI tools into clinical routine.</description>
	<pubDate>2026-03-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 184: Artificial Intelligence in the Diagnosis and Prognosis of Osteosarcoma: A Decade of Progress and Future Directions</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/3/184">doi: 10.3390/technologies14030184</a></p>
	<p>Authors:
		Ralph Abou Ghayda
		Karim Kalout
		Joudy Eter
		Mario Abdelnour
		Hilda E. Ghadieh
		Sami Azar
		Frederic Harb
		</p>
	<p>Osteosarcoma is the most frequent cause of primary malignant bone tumors in childhood and adolescence. It is aggressive and may be associated with early metastasis, making patient management difficult. In this research, modern AI models for the diagnosis and prognosis of osteosarcoma were screened and analyzed. Our review searched for articles that used AI for the diagnosis and prognosis of osteosarcoma over the past 10 years, including AI in predicting the staging of tumors, predicting chemotherapy response, identifying prognostic biomarkers and assessing risk of metastasis. The models performed well based on AUC and C-index, with considerable discriminatory power, and were superior to the classical clinical methods analyzed. Through the identification of already existing deficiencies in the literature, this review pointed out a need for future research trends to explore with respect to prospective validation, multimodal data fusion and translation of AI tools into clinical routine.</p>
	]]></content:encoded>

	<dc:title>Artificial Intelligence in the Diagnosis and Prognosis of Osteosarcoma: A Decade of Progress and Future Directions</dc:title>
			<dc:creator>Ralph Abou Ghayda</dc:creator>
			<dc:creator>Karim Kalout</dc:creator>
			<dc:creator>Joudy Eter</dc:creator>
			<dc:creator>Mario Abdelnour</dc:creator>
			<dc:creator>Hilda E. Ghadieh</dc:creator>
			<dc:creator>Sami Azar</dc:creator>
			<dc:creator>Frederic Harb</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14030184</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-19</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-19</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>184</prism:startingPage>
		<prism:doi>10.3390/technologies14030184</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/3/184</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/3/183">

	<title>Technologies, Vol. 14, Pages 183: The Role of Artificial Intelligence in the Detection and Diagnosis of Neurocognitive Disorders: A Systematic Review</title>
	<link>https://www.mdpi.com/2227-7080/14/3/183</link>
	<description>Dementia represents a major healthcare challenge, as pathological changes often occur years before overt symptoms. Early manifestations such as mild cognitive impairment (MCI) and subjective cognitive decline (SCD) represent critical transitional stages between normal aging and dementia. Thus, distinguishing these conditions (i.e., MCI and SCD) and determining their potential evolution into dementia remains crucial. However, current clinical tools, mainly neuroimaging and neuropsychological assessments, are not always clearly interpretable and are often resource-intensive. In recent years, artificial intelligence (AI), including machine learning (ML) and deep learning (DL), has demonstrated promising potential in early detection, progression prediction, and differential diagnosis of neurocognitive disorders. This systematic review aims to synthesize current evidence on the application of AI-based approaches to improve diagnostic accuracy and prognostic assessments in dementia. A comprehensive literature search of studies published between 2015 and 2025 was conducted across PubMed/MEDLINE, Scopus, and Web of Science, following PRISMA 2020 guidelines. Studies were evaluated for data modality, methodological rigor, performance metrics, and clinical applicability. Seventeen (17) studies, of which twelve (12) are primary studies and five (5) are secondary studies, examining AI applications in detecting and diagnosing neurocognitive disorders (NCDs) in adults with dementia, MCI, or SCD were included. Results indicate that AI models, particularly DL applied to neuroimaging, electrophysiological data, speech and language features, biomarkers, and digital behavioral data, achieve high diagnostic accuracy in distinguishing MCI, Alzheimer&amp;amp;rsquo;s disease, and healthy aging. Predictive models also show potential in forecasting conversion from MCI to dementia and monitoring cognitive trajectories via wearable or smart-home technologies. Nonetheless, heterogeneity, limited external validation, and methodological inconsistencies hinder clinical translation. In conclusion, AI represents a rapidly evolving and promising tool for early detection and monitoring of neurocognitive disorders. Collectively, the reviewed studies underscore the need for standardized pipelines, larger multicenter datasets, and explainable AI frameworks to enable effective clinical implementation.</description>
	<pubDate>2026-03-18</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 183: The Role of Artificial Intelligence in the Detection and Diagnosis of Neurocognitive Disorders: A Systematic Review</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/3/183">doi: 10.3390/technologies14030183</a></p>
	<p>Authors:
		Pasqualina Perna
		Alessandra Claudi
		Fabrizio Stasolla
		Raffaele Nappo
		</p>
	<p>Dementia represents a major healthcare challenge, as pathological changes often occur years before overt symptoms. Early manifestations such as mild cognitive impairment (MCI) and subjective cognitive decline (SCD) represent critical transitional stages between normal aging and dementia. Thus, distinguishing these conditions (i.e., MCI and SCD) and determining their potential evolution into dementia remains crucial. However, current clinical tools, mainly neuroimaging and neuropsychological assessments, are not always clearly interpretable and are often resource-intensive. In recent years, artificial intelligence (AI), including machine learning (ML) and deep learning (DL), has demonstrated promising potential in early detection, progression prediction, and differential diagnosis of neurocognitive disorders. This systematic review aims to synthesize current evidence on the application of AI-based approaches to improve diagnostic accuracy and prognostic assessments in dementia. A comprehensive literature search of studies published between 2015 and 2025 was conducted across PubMed/MEDLINE, Scopus, and Web of Science, following PRISMA 2020 guidelines. Studies were evaluated for data modality, methodological rigor, performance metrics, and clinical applicability. Seventeen (17) studies, of which twelve (12) are primary studies and five (5) are secondary studies, examining AI applications in detecting and diagnosing neurocognitive disorders (NCDs) in adults with dementia, MCI, or SCD were included. Results indicate that AI models, particularly DL applied to neuroimaging, electrophysiological data, speech and language features, biomarkers, and digital behavioral data, achieve high diagnostic accuracy in distinguishing MCI, Alzheimer&amp;amp;rsquo;s disease, and healthy aging. Predictive models also show potential in forecasting conversion from MCI to dementia and monitoring cognitive trajectories via wearable or smart-home technologies. Nonetheless, heterogeneity, limited external validation, and methodological inconsistencies hinder clinical translation. In conclusion, AI represents a rapidly evolving and promising tool for early detection and monitoring of neurocognitive disorders. Collectively, the reviewed studies underscore the need for standardized pipelines, larger multicenter datasets, and explainable AI frameworks to enable effective clinical implementation.</p>
	]]></content:encoded>

	<dc:title>The Role of Artificial Intelligence in the Detection and Diagnosis of Neurocognitive Disorders: A Systematic Review</dc:title>
			<dc:creator>Pasqualina Perna</dc:creator>
			<dc:creator>Alessandra Claudi</dc:creator>
			<dc:creator>Fabrizio Stasolla</dc:creator>
			<dc:creator>Raffaele Nappo</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14030183</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-18</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-18</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>183</prism:startingPage>
		<prism:doi>10.3390/technologies14030183</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/3/183</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-7080/14/3/182">

	<title>Technologies, Vol. 14, Pages 182: Artificial Intelligence Applications for Cleaner Production and Sustainable Development in Southeast Asia: A Systematic Review and Future Research Directions</title>
	<link>https://www.mdpi.com/2227-7080/14/3/182</link>
	<description>Artificial intelligence (AI) has reshaped various aspects of human lives, particularly through its capabilities to address complex sustainability challenges. Despite the rapid expansion of AI applications, their contribution to cleaner production and sustainable development remains underexplored, especially in developing nations. In Southeast Asia (SEA), where AI adoption has grown substantially across environmental, economic, and social dimensions, research that examines its role in cleaner production outcomes remains fragmented. In view of this gap, this study conducts a systematic literature review (SLR) of AI applications related to cleaner production and sustainable development by examining relevant themes, application areas, and sustainability dimensions addressed by AI, while evaluating the maturity of AI methodologies, alignment with cleaner production outcomes, and integration with circular economy and resource efficiency goals. Moreover, it investigates the barriers and challenges that constrain AI application and offers future research directions to advance AI deployment for cleaner production and sustainable development across SEA countries.</description>
	<pubDate>2026-03-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Technologies, Vol. 14, Pages 182: Artificial Intelligence Applications for Cleaner Production and Sustainable Development in Southeast Asia: A Systematic Review and Future Research Directions</b></p>
	<p>Technologies <a href="https://www.mdpi.com/2227-7080/14/3/182">doi: 10.3390/technologies14030182</a></p>
	<p>Authors:
		Victor James C. Escolano
		Yann-Mey Yee
		Alexander A. Hernandez
		Charmine Sheena R. Saflor
		Do Van Nang
		Ace C. Lagman
		</p>
	<p>Artificial intelligence (AI) has reshaped various aspects of human lives, particularly through its capabilities to address complex sustainability challenges. Despite the rapid expansion of AI applications, their contribution to cleaner production and sustainable development remains underexplored, especially in developing nations. In Southeast Asia (SEA), where AI adoption has grown substantially across environmental, economic, and social dimensions, research that examines its role in cleaner production outcomes remains fragmented. In view of this gap, this study conducts a systematic literature review (SLR) of AI applications related to cleaner production and sustainable development by examining relevant themes, application areas, and sustainability dimensions addressed by AI, while evaluating the maturity of AI methodologies, alignment with cleaner production outcomes, and integration with circular economy and resource efficiency goals. Moreover, it investigates the barriers and challenges that constrain AI application and offers future research directions to advance AI deployment for cleaner production and sustainable development across SEA countries.</p>
	]]></content:encoded>

	<dc:title>Artificial Intelligence Applications for Cleaner Production and Sustainable Development in Southeast Asia: A Systematic Review and Future Research Directions</dc:title>
			<dc:creator>Victor James C. Escolano</dc:creator>
			<dc:creator>Yann-Mey Yee</dc:creator>
			<dc:creator>Alexander A. Hernandez</dc:creator>
			<dc:creator>Charmine Sheena R. Saflor</dc:creator>
			<dc:creator>Do Van Nang</dc:creator>
			<dc:creator>Ace C. Lagman</dc:creator>
		<dc:identifier>doi: 10.3390/technologies14030182</dc:identifier>
	<dc:source>Technologies</dc:source>
	<dc:date>2026-03-17</dc:date>

	<prism:publicationName>Technologies</prism:publicationName>
	<prism:publicationDate>2026-03-17</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>182</prism:startingPage>
		<prism:doi>10.3390/technologies14030182</prism:doi>
	<prism:url>https://www.mdpi.com/2227-7080/14/3/182</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
    
<cc:License rdf:about="https://creativecommons.org/licenses/by/4.0/">
	<cc:permits rdf:resource="https://creativecommons.org/ns#Reproduction" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#Distribution" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#DerivativeWorks" />
</cc:License>

</rdf:RDF>
