<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF xmlns="http://purl.org/rss/1.0/"
 xmlns:dc="http://purl.org/dc/elements/1.1/"
 xmlns:dcterms="http://purl.org/dc/terms/"
 xmlns:cc="http://web.resource.org/cc/"
 xmlns:prism="http://prismstandard.org/namespaces/basic/2.0/"
 xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
 xmlns:admin="http://webns.net/mvcb/"
 xmlns:content="http://purl.org/rss/1.0/modules/content/">
    <channel rdf:about="https://www.mdpi.com/rss/journal/computation">
		<title>Computation</title>
		<description>Latest open access articles published in Computation at https://www.mdpi.com/journal/computation</description>
		<link>https://www.mdpi.com/journal/computation</link>
		<admin:generatorAgent rdf:resource="https://www.mdpi.com/journal/computation"/>
		<admin:errorReportsTo rdf:resource="mailto:support@mdpi.com"/>
		<dc:publisher>MDPI</dc:publisher>
		<dc:language>en</dc:language>
		<dc:rights>Creative Commons Attribution (CC-BY)</dc:rights>
						<prism:copyright>MDPI</prism:copyright>
		<prism:rightsAgent>support@mdpi.com</prism:rightsAgent>
		<image rdf:resource="https://pub.mdpi-res.com/img/design/mdpi-pub-logo.png?13cf3b5bd783e021?1777290039"/>
				<items>
			<rdf:Seq>
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/5/101" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/5/100" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/5/99" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/5/98" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/5/97" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/5/96" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/95" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/94" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/93" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/92" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/91" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/90" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/89" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/88" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/87" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/86" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/85" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/84" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/83" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/82" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/81" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/80" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/79" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/78" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/77" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/76" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/4/75" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/3/74" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/3/73" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/3/72" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/3/71" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/3/70" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/3/69" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/3/68" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/3/67" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/3/66" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/3/65" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/3/64" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/3/63" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/3/62" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/3/61" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/3/57" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/3/60" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/3/59" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/3/58" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/3/56" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/55" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/54" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/53" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/52" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/51" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/50" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/49" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/48" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/47" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/46" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/45" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/44" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/43" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/42" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/41" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/40" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/37" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/39" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/38" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/35" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/36" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/34" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/33" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/32" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/31" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/30" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/29" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/2/28" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/27" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/26" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/25" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/24" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/23" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/22" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/21" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/20" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/19" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/18" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/17" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/16" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/15" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/14" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/13" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/12" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/11" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/10" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/9" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/8" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/7" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/6" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/5" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/4" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/3" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2079-3197/14/1/2" />
                    	</rdf:Seq>
		</items>
				<cc:license rdf:resource="https://creativecommons.org/licenses/by/4.0/" />
	</channel>

        <item rdf:about="https://www.mdpi.com/2079-3197/14/5/101">

	<title>Computation, Vol. 14, Pages 101: What Is an Oval, Officially and Overall? Old and New Mathematical Descriptions</title>
	<link>https://www.mdpi.com/2079-3197/14/5/101</link>
	<description>Deriving from the Latin &amp;amp;ldquo;ovum&amp;amp;rdquo; (egg), the oval is a commonly used term, but does not have the status of a standard geometric figure like a circle or ellipse. Consequently, the oval lacks both a mathematical descriptive basis to attribute a set of key geometric parameters and an elegant formula to describe its contours. Herein, we consider the basis for deriving the formula of an oval for typical egg profiles. Specifically, these are round, ellipsoid, classic oval, pyriform (conical) and biconical shapes. To do this, we adhered to four basic postulates: (i) the ability to describe all possible egg shapes; (ii) a minimum set of measurable geometric parameters; (iii) the application of some universal indices (ratios of key geometric dimensions) to describe mathematical models; (iv) conformity with the &amp;amp;ldquo;Main Axiom of the Mathematical Formula of the Bird&amp;amp;rsquo;s Egg.&amp;amp;rdquo; Additionally, we sought to comply with the principles of mathematical elegance. Following these theoretical assumptions and practical verification, we obtained a mathematically supported, elegant formula for this well-known but non-standardized geometric figure. The derived oval geometry equation will find use in applied problems of biology, construction, engineering and school curricula, alongside the classical figures of the circle and ellipse.</description>
	<pubDate>2026-04-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 101: What Is an Oval, Officially and Overall? Old and New Mathematical Descriptions</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/5/101">doi: 10.3390/computation14050101</a></p>
	<p>Authors:
		Valeriy G. Narushin
		Stefan T. Orszulik
		Michael N. Romanov
		Darren K. Griffin
		</p>
	<p>Deriving from the Latin &amp;amp;ldquo;ovum&amp;amp;rdquo; (egg), the oval is a commonly used term, but does not have the status of a standard geometric figure like a circle or ellipse. Consequently, the oval lacks both a mathematical descriptive basis to attribute a set of key geometric parameters and an elegant formula to describe its contours. Herein, we consider the basis for deriving the formula of an oval for typical egg profiles. Specifically, these are round, ellipsoid, classic oval, pyriform (conical) and biconical shapes. To do this, we adhered to four basic postulates: (i) the ability to describe all possible egg shapes; (ii) a minimum set of measurable geometric parameters; (iii) the application of some universal indices (ratios of key geometric dimensions) to describe mathematical models; (iv) conformity with the &amp;amp;ldquo;Main Axiom of the Mathematical Formula of the Bird&amp;amp;rsquo;s Egg.&amp;amp;rdquo; Additionally, we sought to comply with the principles of mathematical elegance. Following these theoretical assumptions and practical verification, we obtained a mathematically supported, elegant formula for this well-known but non-standardized geometric figure. The derived oval geometry equation will find use in applied problems of biology, construction, engineering and school curricula, alongside the classical figures of the circle and ellipse.</p>
	]]></content:encoded>

	<dc:title>What Is an Oval, Officially and Overall? Old and New Mathematical Descriptions</dc:title>
			<dc:creator>Valeriy G. Narushin</dc:creator>
			<dc:creator>Stefan T. Orszulik</dc:creator>
			<dc:creator>Michael N. Romanov</dc:creator>
			<dc:creator>Darren K. Griffin</dc:creator>
		<dc:identifier>doi: 10.3390/computation14050101</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-27</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-27</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>101</prism:startingPage>
		<prism:doi>10.3390/computation14050101</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/5/101</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/5/100">

	<title>Computation, Vol. 14, Pages 100: Micro-Macro Modeling of Inherent Cognitive Biases in 5-Point Likert Scales: Uncovering the Non-Linearity of Critical Sample Sizes for Capturing Identical Statistical Populations</title>
	<link>https://www.mdpi.com/2079-3197/14/5/100</link>
	<description>As social infrastructure intensively developed during the high economic growth period of the 1970s faces simultaneous aging, there is an urgent need to transition from conventional reactive maintenance to preventive maintenance utilizing various data (data-driven asset management. However, the greatest barrier in practice is that inspection data is unevenly distributed in analog formats such as paper and unstructured files, and heavily relies on the subjective visual evaluation of expert engineers (e.g., discrete graded evaluations from A to D). The intervention of this &amp;amp;ldquo;Assessor Bias&amp;amp;rdquo; makes it difficult to ensure the robustness required for direct statistical analysis. This paper serves as a bridge between this analog expert knowledge and quantitative data science. It formulates human cognitive conflicts (true state, peer pressure, avoidance of cognitive load) using the distance-decay model of the Analytic Hierarchy Process (AHP) and the Softmax function, constructing a micro-macro link model accompanied by stochastic variations. Through large-scale multi-agent simulations (N=107) validating the model&amp;amp;rsquo;s convergence, it was demonstrated that in long-tail distributions formed under peer pressure, macroscopic statistical distance metrics such as the Kullback-Leibler (KL) divergence ignore the fact that a small number of true signals are non-linearly suppressed, causing a statistical misinterpretation that &amp;amp;ldquo;the error is within an acceptable range&amp;amp;rdquo;. This implies that as long as macroscopic statistical indicators are over-trusted, signs of critical deterioration (minorities) will be structurally marginalized. Returning to the debate on &amp;amp;ldquo;Homogeneity (Homogenit&amp;amp;auml;t)&amp;amp;rdquo; in German social statistics, this paper advocates that in order to realize objective &amp;amp;ldquo;Micro-segmentation of Homogeneous Statistical Populations,&amp;amp;rdquo; a paradigm shift from qualitative methods relying on human intuition to quantitative methods incorporating multi-criteria decision making is essential, rather than simply expanding the sample size.</description>
	<pubDate>2026-04-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 100: Micro-Macro Modeling of Inherent Cognitive Biases in 5-Point Likert Scales: Uncovering the Non-Linearity of Critical Sample Sizes for Capturing Identical Statistical Populations</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/5/100">doi: 10.3390/computation14050100</a></p>
	<p>Authors:
		Yasuko Kawahata
		</p>
	<p>As social infrastructure intensively developed during the high economic growth period of the 1970s faces simultaneous aging, there is an urgent need to transition from conventional reactive maintenance to preventive maintenance utilizing various data (data-driven asset management. However, the greatest barrier in practice is that inspection data is unevenly distributed in analog formats such as paper and unstructured files, and heavily relies on the subjective visual evaluation of expert engineers (e.g., discrete graded evaluations from A to D). The intervention of this &amp;amp;ldquo;Assessor Bias&amp;amp;rdquo; makes it difficult to ensure the robustness required for direct statistical analysis. This paper serves as a bridge between this analog expert knowledge and quantitative data science. It formulates human cognitive conflicts (true state, peer pressure, avoidance of cognitive load) using the distance-decay model of the Analytic Hierarchy Process (AHP) and the Softmax function, constructing a micro-macro link model accompanied by stochastic variations. Through large-scale multi-agent simulations (N=107) validating the model&amp;amp;rsquo;s convergence, it was demonstrated that in long-tail distributions formed under peer pressure, macroscopic statistical distance metrics such as the Kullback-Leibler (KL) divergence ignore the fact that a small number of true signals are non-linearly suppressed, causing a statistical misinterpretation that &amp;amp;ldquo;the error is within an acceptable range&amp;amp;rdquo;. This implies that as long as macroscopic statistical indicators are over-trusted, signs of critical deterioration (minorities) will be structurally marginalized. Returning to the debate on &amp;amp;ldquo;Homogeneity (Homogenit&amp;amp;auml;t)&amp;amp;rdquo; in German social statistics, this paper advocates that in order to realize objective &amp;amp;ldquo;Micro-segmentation of Homogeneous Statistical Populations,&amp;amp;rdquo; a paradigm shift from qualitative methods relying on human intuition to quantitative methods incorporating multi-criteria decision making is essential, rather than simply expanding the sample size.</p>
	]]></content:encoded>

	<dc:title>Micro-Macro Modeling of Inherent Cognitive Biases in 5-Point Likert Scales: Uncovering the Non-Linearity of Critical Sample Sizes for Capturing Identical Statistical Populations</dc:title>
			<dc:creator>Yasuko Kawahata</dc:creator>
		<dc:identifier>doi: 10.3390/computation14050100</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-27</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-27</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>100</prism:startingPage>
		<prism:doi>10.3390/computation14050100</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/5/100</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/5/99">

	<title>Computation, Vol. 14, Pages 99: A Spectrum-Driven Hierarchical Learning Network for Aero-Engine Defect Segmentation</title>
	<link>https://www.mdpi.com/2079-3197/14/5/99</link>
	<description>Aero-engine defects often exhibit micro-scale and high-frequency characteristics under complex metallic textures, which makes precise segmentation difficult. Most existing pixel-level methods rely on spatial-domain modeling and lack frequency-domain decoupling. As a result, high-frequency details are easily hidden by low-frequency background information. In addition, repeated downsampling weakens the representation of fine-grained structures, leading to inaccurate boundary localization and limited robustness. To address these issues, a spectrum-driven hierarchical learning network is proposed for aero-engine defect segmentation. First, a dual-band spectral module is constructed using the discrete cosine transform to separate high-frequency and low-frequency components, providing stable and physically meaningful frequency-domain priors for the network. Second, a detail-guided module is designed where high-frequency features adaptively guide skip connections, compensating information loss during encoding and improving boundary recovery. Furthermore, a low-frequency-driven region-aware modeling module is developed. The internal defect regions, boundary areas, and background regions are modeled hierarchically. A dynamic hyper-kernel generation mechanism performs region-sensitive convolutional modeling, improving adaptation to complex structural variations. Extensive experiments on the Turbo19 and NEU-Seg datasets demonstrate that the proposed method produces accurate defect boundaries and achieves mIoU scores of 89.82% and 91.44%, improving over the second-best method by 5.22% and 4.42%, respectively.</description>
	<pubDate>2026-04-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 99: A Spectrum-Driven Hierarchical Learning Network for Aero-Engine Defect Segmentation</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/5/99">doi: 10.3390/computation14050099</a></p>
	<p>Authors:
		Yining Xie
		Aoqi Shen
		Haochen Qi
		Jing Zhao
		Jianpeng Li
		Xichun Pan
		Anlong Zhang
		</p>
	<p>Aero-engine defects often exhibit micro-scale and high-frequency characteristics under complex metallic textures, which makes precise segmentation difficult. Most existing pixel-level methods rely on spatial-domain modeling and lack frequency-domain decoupling. As a result, high-frequency details are easily hidden by low-frequency background information. In addition, repeated downsampling weakens the representation of fine-grained structures, leading to inaccurate boundary localization and limited robustness. To address these issues, a spectrum-driven hierarchical learning network is proposed for aero-engine defect segmentation. First, a dual-band spectral module is constructed using the discrete cosine transform to separate high-frequency and low-frequency components, providing stable and physically meaningful frequency-domain priors for the network. Second, a detail-guided module is designed where high-frequency features adaptively guide skip connections, compensating information loss during encoding and improving boundary recovery. Furthermore, a low-frequency-driven region-aware modeling module is developed. The internal defect regions, boundary areas, and background regions are modeled hierarchically. A dynamic hyper-kernel generation mechanism performs region-sensitive convolutional modeling, improving adaptation to complex structural variations. Extensive experiments on the Turbo19 and NEU-Seg datasets demonstrate that the proposed method produces accurate defect boundaries and achieves mIoU scores of 89.82% and 91.44%, improving over the second-best method by 5.22% and 4.42%, respectively.</p>
	]]></content:encoded>

	<dc:title>A Spectrum-Driven Hierarchical Learning Network for Aero-Engine Defect Segmentation</dc:title>
			<dc:creator>Yining Xie</dc:creator>
			<dc:creator>Aoqi Shen</dc:creator>
			<dc:creator>Haochen Qi</dc:creator>
			<dc:creator>Jing Zhao</dc:creator>
			<dc:creator>Jianpeng Li</dc:creator>
			<dc:creator>Xichun Pan</dc:creator>
			<dc:creator>Anlong Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/computation14050099</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-25</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-25</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>99</prism:startingPage>
		<prism:doi>10.3390/computation14050099</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/5/99</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/5/98">

	<title>Computation, Vol. 14, Pages 98: Securing Tool-Using AI Agents Against Injection and Authority Misuse</title>
	<link>https://www.mdpi.com/2079-3197/14/5/98</link>
	<description>Tool-using AI agents couple a language model with controller logic, memory, and external tools such as browsers, email, calendars, file systems, and transaction APIs. This architecture expands capability, but it also enlarges the security boundary: agents routinely ingest untrusted content while holding privileges that can reveal private data and trigger external side effects. The resulting failures are not limited to poor text generation; they include prompt injection, indirect injection through tool outputs, confused-deputy behavior, unauthorized actions, and misleading claims about the tool state. Because large-scale testing on deployed products is difficult, vendor-specific, and ethically sensitive, we present a transparent, theoretical simulation-based framework for evaluating user-facing risk in tool-using agents. The methodological contribution is a formal threat model that separates compromise, harm, and severity, and a Monte Carlo evaluation pipeline that maps architectural choices (permissions, retrieval, memory exposure, and approvals) and defensive controls to comparable outcome metrics. We instantiate the framework for six representative threat scenarios and nine defense configurations, reporting attack success rate (ASR), benign task success, latency overhead, and severity-weighted harm. Across scenarios, the least-privilege tool design is the strongest single broad control, human-in-the-loop approvals sharply reduce high-impact actions and exports but degrade under user error and habituation, retrieval allowlisting nearly eliminates indirect injection while leaving other channels largely unaffected, and rate limiting reduces tail severity more than ASR. These results position agent safety as an architectural and operational problem and because they arise from an assumption-explicit simulator rather than field measurements, should be read as comparative design guidance rather than incident-rate estimates for any deployed product.</description>
	<pubDate>2026-04-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 98: Securing Tool-Using AI Agents Against Injection and Authority Misuse</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/5/98">doi: 10.3390/computation14050098</a></p>
	<p>Authors:
		Hasan Kanaker
		Hussam Fakhouri
		Nader Abdel Karim
		Maher Abuhamdeh
		Nurul Halimatul Asmak Ismail
		Sandi Fakhouri
		</p>
	<p>Tool-using AI agents couple a language model with controller logic, memory, and external tools such as browsers, email, calendars, file systems, and transaction APIs. This architecture expands capability, but it also enlarges the security boundary: agents routinely ingest untrusted content while holding privileges that can reveal private data and trigger external side effects. The resulting failures are not limited to poor text generation; they include prompt injection, indirect injection through tool outputs, confused-deputy behavior, unauthorized actions, and misleading claims about the tool state. Because large-scale testing on deployed products is difficult, vendor-specific, and ethically sensitive, we present a transparent, theoretical simulation-based framework for evaluating user-facing risk in tool-using agents. The methodological contribution is a formal threat model that separates compromise, harm, and severity, and a Monte Carlo evaluation pipeline that maps architectural choices (permissions, retrieval, memory exposure, and approvals) and defensive controls to comparable outcome metrics. We instantiate the framework for six representative threat scenarios and nine defense configurations, reporting attack success rate (ASR), benign task success, latency overhead, and severity-weighted harm. Across scenarios, the least-privilege tool design is the strongest single broad control, human-in-the-loop approvals sharply reduce high-impact actions and exports but degrade under user error and habituation, retrieval allowlisting nearly eliminates indirect injection while leaving other channels largely unaffected, and rate limiting reduces tail severity more than ASR. These results position agent safety as an architectural and operational problem and because they arise from an assumption-explicit simulator rather than field measurements, should be read as comparative design guidance rather than incident-rate estimates for any deployed product.</p>
	]]></content:encoded>

	<dc:title>Securing Tool-Using AI Agents Against Injection and Authority Misuse</dc:title>
			<dc:creator>Hasan Kanaker</dc:creator>
			<dc:creator>Hussam Fakhouri</dc:creator>
			<dc:creator>Nader Abdel Karim</dc:creator>
			<dc:creator>Maher Abuhamdeh</dc:creator>
			<dc:creator>Nurul Halimatul Asmak Ismail</dc:creator>
			<dc:creator>Sandi Fakhouri</dc:creator>
		<dc:identifier>doi: 10.3390/computation14050098</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-25</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-25</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>98</prism:startingPage>
		<prism:doi>10.3390/computation14050098</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/5/98</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/5/97">

	<title>Computation, Vol. 14, Pages 97: AI-Enabled Governance: Board Gender Diversity and Corporate Tax Avoidance</title>
	<link>https://www.mdpi.com/2079-3197/14/5/97</link>
	<description>Corporate tax avoidance has become a major governance and fiscal sustainability concern, particularly in developing economies where corporate tax revenues constitute a critical source of public financing. While prior research suggests that board gender diversity (BGD) enhances ethical oversight and monitoring, its effectiveness in constraining aggressive tax planning may depend on firms&amp;amp;rsquo; informational and technological environments. This study examines whether artificial intelligence (AI) capability strengthens the governance role of BGD in reducing corporate tax avoidance. Using a balanced panel of 1586 non-financial firms from developing economies over the period 2009&amp;amp;ndash;2023, the analysis employs firm FE models and dynamic two-step System GMM estimations to address unobserved heterogeneity, endogeneity, and the persistence of corporate tax behavior. The results indicate that BGD is positively associated with effective tax rates, implying lower levels of corporate tax avoidance. Furthermore, AI capability&amp;amp;mdash;measured using a lagged specification&amp;amp;mdash;significantly strengthens this relationship, suggesting that firms with higher AI adoption exhibit a stronger governance effect of gender-diverse boards on tax compliance. Additional robustness tests&amp;amp;mdash;including alternative tax avoidance measures, alternative BGD specifications, heterogeneity analysis, and selection-bias corrections using Heckman, propensity score matching (PSM), and instrumental variable (2SLS) approaches&amp;amp;mdash;confirm the stability of the findings. Overall, the results highlight the complementary role of technological capability and board diversity in strengthening corporate governance (CG) and fiscal discipline in developing economies.</description>
	<pubDate>2026-04-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 97: AI-Enabled Governance: Board Gender Diversity and Corporate Tax Avoidance</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/5/97">doi: 10.3390/computation14050097</a></p>
	<p>Authors:
		Marwan Mansour
		Mo’taz Al Zobi
		Ahmad Marei
		Luay Daoud
		Nour Ibrahim Kurdi
		</p>
	<p>Corporate tax avoidance has become a major governance and fiscal sustainability concern, particularly in developing economies where corporate tax revenues constitute a critical source of public financing. While prior research suggests that board gender diversity (BGD) enhances ethical oversight and monitoring, its effectiveness in constraining aggressive tax planning may depend on firms&amp;amp;rsquo; informational and technological environments. This study examines whether artificial intelligence (AI) capability strengthens the governance role of BGD in reducing corporate tax avoidance. Using a balanced panel of 1586 non-financial firms from developing economies over the period 2009&amp;amp;ndash;2023, the analysis employs firm FE models and dynamic two-step System GMM estimations to address unobserved heterogeneity, endogeneity, and the persistence of corporate tax behavior. The results indicate that BGD is positively associated with effective tax rates, implying lower levels of corporate tax avoidance. Furthermore, AI capability&amp;amp;mdash;measured using a lagged specification&amp;amp;mdash;significantly strengthens this relationship, suggesting that firms with higher AI adoption exhibit a stronger governance effect of gender-diverse boards on tax compliance. Additional robustness tests&amp;amp;mdash;including alternative tax avoidance measures, alternative BGD specifications, heterogeneity analysis, and selection-bias corrections using Heckman, propensity score matching (PSM), and instrumental variable (2SLS) approaches&amp;amp;mdash;confirm the stability of the findings. Overall, the results highlight the complementary role of technological capability and board diversity in strengthening corporate governance (CG) and fiscal discipline in developing economies.</p>
	]]></content:encoded>

	<dc:title>AI-Enabled Governance: Board Gender Diversity and Corporate Tax Avoidance</dc:title>
			<dc:creator>Marwan Mansour</dc:creator>
			<dc:creator>Mo’taz Al Zobi</dc:creator>
			<dc:creator>Ahmad Marei</dc:creator>
			<dc:creator>Luay Daoud</dc:creator>
			<dc:creator>Nour Ibrahim Kurdi</dc:creator>
		<dc:identifier>doi: 10.3390/computation14050097</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-23</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-23</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>97</prism:startingPage>
		<prism:doi>10.3390/computation14050097</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/5/97</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/5/96">

	<title>Computation, Vol. 14, Pages 96: Object Re-Identification Method for Air-to-Ground Targets Based on Neighborhood Feature Centralization Attention</title>
	<link>https://www.mdpi.com/2079-3197/14/5/96</link>
	<description>To address the core challenges in air-to-ground target re-identification (ReID), including network focus on invalid background information, poor adaptability to nonlinear feature distribution, and insufficient cross-domain generalization, this paper proposes a novel air-to-ground ReID framework based on Neighborhood Feature Centralization Attention (NFCA). On the basis of Coordinate Attention, the framework introduces a parameter-free Neighborhood Feature Centralization mechanism to build a lightweight attention module, which enhances cross-feature semantic interaction and suppresses background noise while retaining precise position encoding. It achieves end-to-end direct optimization of sample pair similarity through binary cross-entropy loss, eliminating the proxy task bias of traditional classification loss and adapting to the nonlinear structure of feature space. A multi-source data-driven training strategy is constructed by fusing ReID datasets and general classification datasets, which expands the coverage of feature space and narrows the distribution gap between training data and real air-to-ground scenarios without additional manual annotation. Experiments show that the proposed method achieves leading mAP values on the self-developed UAV air-to-ground dataset JC-1, the public person ReID dataset Market-1501, and the public vehicle ReID dataset VehicleID. Sufficient statistical validation, ablation experiments and cross-domain tests verify the advancement, reliability and generalization of the proposed method in complex air-to-ground scenarios.</description>
	<pubDate>2026-04-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 96: Object Re-Identification Method for Air-to-Ground Targets Based on Neighborhood Feature Centralization Attention</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/5/96">doi: 10.3390/computation14050096</a></p>
	<p>Authors:
		Tian Yao
		Yong Xu
		Yue Ma
		Hongtao Yan
		Haihang Xu
		An Wang
		</p>
	<p>To address the core challenges in air-to-ground target re-identification (ReID), including network focus on invalid background information, poor adaptability to nonlinear feature distribution, and insufficient cross-domain generalization, this paper proposes a novel air-to-ground ReID framework based on Neighborhood Feature Centralization Attention (NFCA). On the basis of Coordinate Attention, the framework introduces a parameter-free Neighborhood Feature Centralization mechanism to build a lightweight attention module, which enhances cross-feature semantic interaction and suppresses background noise while retaining precise position encoding. It achieves end-to-end direct optimization of sample pair similarity through binary cross-entropy loss, eliminating the proxy task bias of traditional classification loss and adapting to the nonlinear structure of feature space. A multi-source data-driven training strategy is constructed by fusing ReID datasets and general classification datasets, which expands the coverage of feature space and narrows the distribution gap between training data and real air-to-ground scenarios without additional manual annotation. Experiments show that the proposed method achieves leading mAP values on the self-developed UAV air-to-ground dataset JC-1, the public person ReID dataset Market-1501, and the public vehicle ReID dataset VehicleID. Sufficient statistical validation, ablation experiments and cross-domain tests verify the advancement, reliability and generalization of the proposed method in complex air-to-ground scenarios.</p>
	]]></content:encoded>

	<dc:title>Object Re-Identification Method for Air-to-Ground Targets Based on Neighborhood Feature Centralization Attention</dc:title>
			<dc:creator>Tian Yao</dc:creator>
			<dc:creator>Yong Xu</dc:creator>
			<dc:creator>Yue Ma</dc:creator>
			<dc:creator>Hongtao Yan</dc:creator>
			<dc:creator>Haihang Xu</dc:creator>
			<dc:creator>An Wang</dc:creator>
		<dc:identifier>doi: 10.3390/computation14050096</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-22</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-22</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>96</prism:startingPage>
		<prism:doi>10.3390/computation14050096</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/5/96</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/95">

	<title>Computation, Vol. 14, Pages 95: SOC-Dependent Soft Current Limiting for Second-Life Lithium-Ion Batteries in Off-Grid Photovoltaic Battery Energy Storage Systems</title>
	<link>https://www.mdpi.com/2079-3197/14/4/95</link>
	<description>The increasing deployment of off-grid photovoltaic&amp;amp;ndash;battery energy storage systems (PV&amp;amp;ndash;BESSs) has intensified operational demands on battery energy storage, particularly when second-life lithium-ion batteries are employed. Due to aging-induced increases in internal resistance and reduced thermal margins, second-life batteries are more vulnerable to high-current operation at a low state-of-charge (SOC), which aggravates heat generation and accelerates degradation. In this study, an SOC-dependent soft current limiting strategy is proposed that reshapes the discharge current reference under low-SOC conditions while maintaining fixed SOC limits, thereby targeting current-domain protection rather than SOC-boundary adaptation for reliable off-grid operation. The proposed method introduces two SOC thresholds to gradually derate the allowable discharge current, preventing abrupt current changes near the lower SOC bound. A unified MATLAB/Simulink-based framework is developed for a 24 h representative off-grid PV&amp;amp;ndash;BESS scenario using a second-order equivalent circuit model coupled with a lumped thermal model. Simulation results show that the proposed current shaping reduces low-SOC current stress and associated Joule heating, leading to moderated temperature rise, while only slightly affecting the unmet load under the tested conditions. These findings indicate that SOC-dependent current shaping can provide a control-oriented means to reduce low-SOC electro-thermal stress in second-life batteries within the studied off-grid PV&amp;amp;ndash;BESS framework.</description>
	<pubDate>2026-04-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 95: SOC-Dependent Soft Current Limiting for Second-Life Lithium-Ion Batteries in Off-Grid Photovoltaic Battery Energy Storage Systems</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/95">doi: 10.3390/computation14040095</a></p>
	<p>Authors:
		Hongyan Wang
		Pathomthat Chiradeja
		Atthapol Ngaopitakkul
		Suntiti Yoomak
		</p>
	<p>The increasing deployment of off-grid photovoltaic&amp;amp;ndash;battery energy storage systems (PV&amp;amp;ndash;BESSs) has intensified operational demands on battery energy storage, particularly when second-life lithium-ion batteries are employed. Due to aging-induced increases in internal resistance and reduced thermal margins, second-life batteries are more vulnerable to high-current operation at a low state-of-charge (SOC), which aggravates heat generation and accelerates degradation. In this study, an SOC-dependent soft current limiting strategy is proposed that reshapes the discharge current reference under low-SOC conditions while maintaining fixed SOC limits, thereby targeting current-domain protection rather than SOC-boundary adaptation for reliable off-grid operation. The proposed method introduces two SOC thresholds to gradually derate the allowable discharge current, preventing abrupt current changes near the lower SOC bound. A unified MATLAB/Simulink-based framework is developed for a 24 h representative off-grid PV&amp;amp;ndash;BESS scenario using a second-order equivalent circuit model coupled with a lumped thermal model. Simulation results show that the proposed current shaping reduces low-SOC current stress and associated Joule heating, leading to moderated temperature rise, while only slightly affecting the unmet load under the tested conditions. These findings indicate that SOC-dependent current shaping can provide a control-oriented means to reduce low-SOC electro-thermal stress in second-life batteries within the studied off-grid PV&amp;amp;ndash;BESS framework.</p>
	]]></content:encoded>

	<dc:title>SOC-Dependent Soft Current Limiting for Second-Life Lithium-Ion Batteries in Off-Grid Photovoltaic Battery Energy Storage Systems</dc:title>
			<dc:creator>Hongyan Wang</dc:creator>
			<dc:creator>Pathomthat Chiradeja</dc:creator>
			<dc:creator>Atthapol Ngaopitakkul</dc:creator>
			<dc:creator>Suntiti Yoomak</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040095</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-19</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-19</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>95</prism:startingPage>
		<prism:doi>10.3390/computation14040095</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/95</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/94">

	<title>Computation, Vol. 14, Pages 94: Sequential H2 Adsorption on the Aromatic Li6 Superatom: Field-Activated Physisorption and Thermodynamic Limits</title>
	<link>https://www.mdpi.com/2079-3197/14/4/94</link>
	<description>Understanding the intrinsic Li&amp;amp;ndash;H2 interaction, decoupled from substrate effects, is essential to rationalize the performance of lithium-decorated hydrogen storage materials. To address the current lack of a clean theoretical baseline, we characterized the sequential H2 adsorption on the gas-phase Li6 superatomic cluster using high-level density functional theory (DFT), complemented by Energy Decomposition Analysis (EDA), QTAIM, and NICS(0) calculations. Li6 acts as a structurally rigid platform (RMSD &amp;amp;lt; 0.032 &amp;amp;Aring;) where ligand-induced polarization progressively strengthens its &amp;amp;sigma;-aromaticity (NICS(0) from &amp;amp;minus;2.917 to &amp;amp;minus;13.98 ppm) and increases the HOMO&amp;amp;ndash;LUMO gap up to 5.05 eV. EDA identifies the binding as field-activated physisorption, electrostatically dominated (65&amp;amp;ndash;67%) and mechanistically distinct from Kubas coordination, as confirmed by QTAIM closed-shell interaction parameters. Negative cooperativity governs an effective loading capacity of n = 2 molecules under cryogenic conditions (Teq = 143.76 and 114.64 K), while an entropic bottleneck renders higher loading non-spontaneous at all temperatures. These results establish Li6(H2)n as a foundational gas-phase reference, providing a systematic, contamination-free descriptor set for the intrinsic Li&amp;amp;ndash;H2 interaction. This framework is essential for isolating the electronic role of the lithium superatom and unambiguously identifying substrate-induced modulations in supported hydrogen storage materials.</description>
	<pubDate>2026-04-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 94: Sequential H2 Adsorption on the Aromatic Li6 Superatom: Field-Activated Physisorption and Thermodynamic Limits</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/94">doi: 10.3390/computation14040094</a></p>
	<p>Authors:
		Karen Ochoa Lara
		Jancarlo Gomez-Vega
		Rafael Pacheco-Contreras
		Octavio Juárez-Sánchez
		</p>
	<p>Understanding the intrinsic Li&amp;amp;ndash;H2 interaction, decoupled from substrate effects, is essential to rationalize the performance of lithium-decorated hydrogen storage materials. To address the current lack of a clean theoretical baseline, we characterized the sequential H2 adsorption on the gas-phase Li6 superatomic cluster using high-level density functional theory (DFT), complemented by Energy Decomposition Analysis (EDA), QTAIM, and NICS(0) calculations. Li6 acts as a structurally rigid platform (RMSD &amp;amp;lt; 0.032 &amp;amp;Aring;) where ligand-induced polarization progressively strengthens its &amp;amp;sigma;-aromaticity (NICS(0) from &amp;amp;minus;2.917 to &amp;amp;minus;13.98 ppm) and increases the HOMO&amp;amp;ndash;LUMO gap up to 5.05 eV. EDA identifies the binding as field-activated physisorption, electrostatically dominated (65&amp;amp;ndash;67%) and mechanistically distinct from Kubas coordination, as confirmed by QTAIM closed-shell interaction parameters. Negative cooperativity governs an effective loading capacity of n = 2 molecules under cryogenic conditions (Teq = 143.76 and 114.64 K), while an entropic bottleneck renders higher loading non-spontaneous at all temperatures. These results establish Li6(H2)n as a foundational gas-phase reference, providing a systematic, contamination-free descriptor set for the intrinsic Li&amp;amp;ndash;H2 interaction. This framework is essential for isolating the electronic role of the lithium superatom and unambiguously identifying substrate-induced modulations in supported hydrogen storage materials.</p>
	]]></content:encoded>

	<dc:title>Sequential H2 Adsorption on the Aromatic Li6 Superatom: Field-Activated Physisorption and Thermodynamic Limits</dc:title>
			<dc:creator>Karen Ochoa Lara</dc:creator>
			<dc:creator>Jancarlo Gomez-Vega</dc:creator>
			<dc:creator>Rafael Pacheco-Contreras</dc:creator>
			<dc:creator>Octavio Juárez-Sánchez</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040094</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-17</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-17</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>94</prism:startingPage>
		<prism:doi>10.3390/computation14040094</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/94</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/93">

	<title>Computation, Vol. 14, Pages 93: Attention-Based Transformer Framework with Predictive Uncertainty Quantification for Multi-Crop Yield Forecasting</title>
	<link>https://www.mdpi.com/2079-3197/14/4/93</link>
	<description>Accurate crop yield forecasting is essential for ensuring food security, optimizing agricultural resource allocation, and supporting climate-resilient farming systems. Recent advances in deep learning have improved yield prediction accuracy; however, most existing models provide deterministic estimates without quantifying predictive uncertainty. This limitation restricts their reliability under climatic variability, missing data, and real-world decision-making scenarios where risk awareness is critical. This study utilizes two publicly available multi-crop datasets comprising historical yield records integrated with weather and soil attributes across multiple growing seasons. An attention-based Transformer framework is proposed, augmented with uncertainty quantification through Monte Carlo Dropout, Quantile Regression, and Bayesian Attention mechanisms. The proposed approach represents an integrated uncertainty-aware Transformer framework that combines temporal self-attention with complementary uncertainty estimation strategies. The contribution of this work lies in the systematic integration and comparative evaluation of multiple uncertainty quantification mechanisms within a unified deep learning framework for multi-crop yield forecasting. Experimental results demonstrate improved predictive accuracy and calibration compared to deterministic baselines. However, these findings are bounded by the scope of the datasets, which consist of coarse tabular climatic and soil variables, and should be interpreted accordingly.</description>
	<pubDate>2026-04-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 93: Attention-Based Transformer Framework with Predictive Uncertainty Quantification for Multi-Crop Yield Forecasting</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/93">doi: 10.3390/computation14040093</a></p>
	<p>Authors:
		Bharat Lal
		Abhinav Shukla
		Ayush Kumar Agrawal
		R Kanesaraj Ramasamy
		Parul Dubey
		</p>
	<p>Accurate crop yield forecasting is essential for ensuring food security, optimizing agricultural resource allocation, and supporting climate-resilient farming systems. Recent advances in deep learning have improved yield prediction accuracy; however, most existing models provide deterministic estimates without quantifying predictive uncertainty. This limitation restricts their reliability under climatic variability, missing data, and real-world decision-making scenarios where risk awareness is critical. This study utilizes two publicly available multi-crop datasets comprising historical yield records integrated with weather and soil attributes across multiple growing seasons. An attention-based Transformer framework is proposed, augmented with uncertainty quantification through Monte Carlo Dropout, Quantile Regression, and Bayesian Attention mechanisms. The proposed approach represents an integrated uncertainty-aware Transformer framework that combines temporal self-attention with complementary uncertainty estimation strategies. The contribution of this work lies in the systematic integration and comparative evaluation of multiple uncertainty quantification mechanisms within a unified deep learning framework for multi-crop yield forecasting. Experimental results demonstrate improved predictive accuracy and calibration compared to deterministic baselines. However, these findings are bounded by the scope of the datasets, which consist of coarse tabular climatic and soil variables, and should be interpreted accordingly.</p>
	]]></content:encoded>

	<dc:title>Attention-Based Transformer Framework with Predictive Uncertainty Quantification for Multi-Crop Yield Forecasting</dc:title>
			<dc:creator>Bharat Lal</dc:creator>
			<dc:creator>Abhinav Shukla</dc:creator>
			<dc:creator>Ayush Kumar Agrawal</dc:creator>
			<dc:creator>R Kanesaraj Ramasamy</dc:creator>
			<dc:creator>Parul Dubey</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040093</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-15</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-15</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>93</prism:startingPage>
		<prism:doi>10.3390/computation14040093</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/93</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/92">

	<title>Computation, Vol. 14, Pages 92: Comparative Analysis of Supervised and Unsupervised Learning for Intrusion Detection in Network Logs</title>
	<link>https://www.mdpi.com/2079-3197/14/4/92</link>
	<description>The escalating complexity of network infrastructures and the increasing sophistication of cyber threats require increasingly robust and automated Intrusion Detection Systems (IDS). This article presents a comparative investigation of the effectiveness of various Machine Learning and Deep Learning architectures in detecting network anomalies in network logs. The methodology encompassed classic supervised and ensemble algorithms, such as Random Forest and XGBoost, to sequential Deep Learning approaches (LSTM, GRU) and unsupervised models based on latent reconstruction (VAE, DeepLog). The results demonstrate that supervised approaches significantly outperformed unsupervised methods in the analyzed context. The optimized XGBoost model established a performance benchmark, achieving a Recall of 0.96 and a Precision of 0.85, thereby offering an optimal balance between detecting rare threats and minimizing false alarms. In contrast, unsupervised models revealed critical limitations, suggesting that statistical mimicry between normal and anomalous traffic hinders detection based solely on reconstruction error. Additionally, the study documents the technical interoperability challenges when attempting to integrate state-of-the-art language models, such as BERT. In conclusion, this work validates the effectiveness of Gradient Boosting algorithms and recurrent networks as viable and scalable solutions for critical network security, providing guidelines for model selection in real monitoring environments.</description>
	<pubDate>2026-04-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 92: Comparative Analysis of Supervised and Unsupervised Learning for Intrusion Detection in Network Logs</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/92">doi: 10.3390/computation14040092</a></p>
	<p>Authors:
		Paulo Castro
		Fernando Santos
		Pedro Lopes
		</p>
	<p>The escalating complexity of network infrastructures and the increasing sophistication of cyber threats require increasingly robust and automated Intrusion Detection Systems (IDS). This article presents a comparative investigation of the effectiveness of various Machine Learning and Deep Learning architectures in detecting network anomalies in network logs. The methodology encompassed classic supervised and ensemble algorithms, such as Random Forest and XGBoost, to sequential Deep Learning approaches (LSTM, GRU) and unsupervised models based on latent reconstruction (VAE, DeepLog). The results demonstrate that supervised approaches significantly outperformed unsupervised methods in the analyzed context. The optimized XGBoost model established a performance benchmark, achieving a Recall of 0.96 and a Precision of 0.85, thereby offering an optimal balance between detecting rare threats and minimizing false alarms. In contrast, unsupervised models revealed critical limitations, suggesting that statistical mimicry between normal and anomalous traffic hinders detection based solely on reconstruction error. Additionally, the study documents the technical interoperability challenges when attempting to integrate state-of-the-art language models, such as BERT. In conclusion, this work validates the effectiveness of Gradient Boosting algorithms and recurrent networks as viable and scalable solutions for critical network security, providing guidelines for model selection in real monitoring environments.</p>
	]]></content:encoded>

	<dc:title>Comparative Analysis of Supervised and Unsupervised Learning for Intrusion Detection in Network Logs</dc:title>
			<dc:creator>Paulo Castro</dc:creator>
			<dc:creator>Fernando Santos</dc:creator>
			<dc:creator>Pedro Lopes</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040092</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-15</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-15</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>92</prism:startingPage>
		<prism:doi>10.3390/computation14040092</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/92</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/91">

	<title>Computation, Vol. 14, Pages 91: Reinforcement Learning-Based Inverse Design of Multilayer Particles</title>
	<link>https://www.mdpi.com/2079-3197/14/4/91</link>
	<description>Multilayered particles possess exceptional optical properties and hold significant potential for applications in chemical analysis, life sciences, optical sensing, and photonic integration. In practical applications, however, it is often necessary to perform inverse design of multilayered particles with given optical characteristics to meet specific requirements, a process that remains time-consuming. To overcome this challenge, we propose a reinforcement learning-based method for the automated design of multilayered particles. Leveraging the self-learning capacity of reinforcement learning models in combination with an optical characteristics calculation model, the method iteratively determines particle parameters that fulfill the desired optical responses. This method effectively addresses the many-to-one parameter mapping problem in inverse design, eliminates the need for extensive pre-computations, and provides an innovative approach to the automated design of complex nanostructures.</description>
	<pubDate>2026-04-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 91: Reinforcement Learning-Based Inverse Design of Multilayer Particles</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/91">doi: 10.3390/computation14040091</a></p>
	<p>Authors:
		Zhaohui Li
		Fang Gao
		Delian Liu
		</p>
	<p>Multilayered particles possess exceptional optical properties and hold significant potential for applications in chemical analysis, life sciences, optical sensing, and photonic integration. In practical applications, however, it is often necessary to perform inverse design of multilayered particles with given optical characteristics to meet specific requirements, a process that remains time-consuming. To overcome this challenge, we propose a reinforcement learning-based method for the automated design of multilayered particles. Leveraging the self-learning capacity of reinforcement learning models in combination with an optical characteristics calculation model, the method iteratively determines particle parameters that fulfill the desired optical responses. This method effectively addresses the many-to-one parameter mapping problem in inverse design, eliminates the need for extensive pre-computations, and provides an innovative approach to the automated design of complex nanostructures.</p>
	]]></content:encoded>

	<dc:title>Reinforcement Learning-Based Inverse Design of Multilayer Particles</dc:title>
			<dc:creator>Zhaohui Li</dc:creator>
			<dc:creator>Fang Gao</dc:creator>
			<dc:creator>Delian Liu</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040091</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-10</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-10</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>91</prism:startingPage>
		<prism:doi>10.3390/computation14040091</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/91</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/90">

	<title>Computation, Vol. 14, Pages 90: Two-Dimensional Anomalous Solute Transport in a Two-Zone Fractal Porous Medium</title>
	<link>https://www.mdpi.com/2079-3197/14/4/90</link>
	<description>This study addresses a two-dimensional anomalous solute transport process within a two-zone fractal porous medium. A mathematical formulation is developed to characterise transport phenomena in a non-homogeneous porous domain. The medium consists of two interacting regions: one containing mobile fluid and the other containing immobile fluid, between which mass transfer occurs. In the mobile-fluid region, solute transport is governed by the convection&amp;amp;ndash;diffusion equation. In contrast, the immobile-fluid region is described using a first-order kinetic model. The problem of solute injection through a designated boundary point is formulated and numerically implemented. The effects of anomalous transport behaviour on solute migration and filtration characteristics are examined. The study further evaluates the pressure field, filtration velocity distribution, and solute concentration in both zones.</description>
	<pubDate>2026-04-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 90: Two-Dimensional Anomalous Solute Transport in a Two-Zone Fractal Porous Medium</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/90">doi: 10.3390/computation14040090</a></p>
	<p>Authors:
		B. Kh. Khuzhayorov
		F. B. Kholliev
		A. I. Usmonov
		B. Rushi Kumar
		K. K. Viswanathan
		</p>
	<p>This study addresses a two-dimensional anomalous solute transport process within a two-zone fractal porous medium. A mathematical formulation is developed to characterise transport phenomena in a non-homogeneous porous domain. The medium consists of two interacting regions: one containing mobile fluid and the other containing immobile fluid, between which mass transfer occurs. In the mobile-fluid region, solute transport is governed by the convection&amp;amp;ndash;diffusion equation. In contrast, the immobile-fluid region is described using a first-order kinetic model. The problem of solute injection through a designated boundary point is formulated and numerically implemented. The effects of anomalous transport behaviour on solute migration and filtration characteristics are examined. The study further evaluates the pressure field, filtration velocity distribution, and solute concentration in both zones.</p>
	]]></content:encoded>

	<dc:title>Two-Dimensional Anomalous Solute Transport in a Two-Zone Fractal Porous Medium</dc:title>
			<dc:creator>B. Kh. Khuzhayorov</dc:creator>
			<dc:creator>F. B. Kholliev</dc:creator>
			<dc:creator>A. I. Usmonov</dc:creator>
			<dc:creator>B. Rushi Kumar</dc:creator>
			<dc:creator>K. K. Viswanathan</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040090</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-09</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-09</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>90</prism:startingPage>
		<prism:doi>10.3390/computation14040090</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/90</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/89">

	<title>Computation, Vol. 14, Pages 89: Feature-Based Population Initialization for Evolutionary Optimization of Machine Learning Models in Short-Term Solar Power Forecasting</title>
	<link>https://www.mdpi.com/2079-3197/14/4/89</link>
	<description>Nowadays, solar energy is becoming one of the most popular sources of renewable energy worldwide. Traditional fossil fuels cause pollution and climate change, while solar power offers a clean and sustainable alternative. However, effective planning requires accurate prediction of the amount of solar energy that can be produced. Prediction accuracy directly depends on two factors: the model&amp;amp;rsquo;s hyperparameters and the feature set. In this study, we use boosting models, such as LightGBM, XGBoost, and CatBoost, to forecast solar power production. The prediction horizon is 60 min, which corresponds to short-term forecasting. Model tuning is performed using the NSGA-II multi-objective optimization algorithm. In this study, NSGA-II simultaneously tunes hyperparameters and a feature set of boosting models. We aim to enhance the performance of the NSGA-II algorithm in the early stages using the proposed method to generate the initial population. The initialization is based on an ensemble of filtering methods. The proposed approach promotes faster convergence in the early stages of the algorithm compared to the traditional initialization method. The results of numerical experiments are proven by the Wilcoxon test.</description>
	<pubDate>2026-04-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 89: Feature-Based Population Initialization for Evolutionary Optimization of Machine Learning Models in Short-Term Solar Power Forecasting</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/89">doi: 10.3390/computation14040089</a></p>
	<p>Authors:
		Aleksei Vakhnin
		Harri Niska
		Anders V. Lindfors
		Mikko Kolehmainen
		</p>
	<p>Nowadays, solar energy is becoming one of the most popular sources of renewable energy worldwide. Traditional fossil fuels cause pollution and climate change, while solar power offers a clean and sustainable alternative. However, effective planning requires accurate prediction of the amount of solar energy that can be produced. Prediction accuracy directly depends on two factors: the model&amp;amp;rsquo;s hyperparameters and the feature set. In this study, we use boosting models, such as LightGBM, XGBoost, and CatBoost, to forecast solar power production. The prediction horizon is 60 min, which corresponds to short-term forecasting. Model tuning is performed using the NSGA-II multi-objective optimization algorithm. In this study, NSGA-II simultaneously tunes hyperparameters and a feature set of boosting models. We aim to enhance the performance of the NSGA-II algorithm in the early stages using the proposed method to generate the initial population. The initialization is based on an ensemble of filtering methods. The proposed approach promotes faster convergence in the early stages of the algorithm compared to the traditional initialization method. The results of numerical experiments are proven by the Wilcoxon test.</p>
	]]></content:encoded>

	<dc:title>Feature-Based Population Initialization for Evolutionary Optimization of Machine Learning Models in Short-Term Solar Power Forecasting</dc:title>
			<dc:creator>Aleksei Vakhnin</dc:creator>
			<dc:creator>Harri Niska</dc:creator>
			<dc:creator>Anders V. Lindfors</dc:creator>
			<dc:creator>Mikko Kolehmainen</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040089</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-08</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-08</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>89</prism:startingPage>
		<prism:doi>10.3390/computation14040089</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/89</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/88">

	<title>Computation, Vol. 14, Pages 88: A Comparative Study of Imbalance-Handling Methods in Multiclass Predictive Maintenance</title>
	<link>https://www.mdpi.com/2079-3197/14/4/88</link>
	<description>Predictive maintenance plays a key role in digitalization initiatives; however, in real settings, issues related to failure prediction occur when failure instances are rare compared to normal instances, leading to class imbalance. In this study, we systematically compare five machine learning (ML) models&amp;amp;mdash;random forest, XGBoost, support vector machine, k-nearest neighbors, and multinomial logistic regression (MLR)&amp;amp;mdash;to detect multiclass rare failures using four imbalance-handling approaches (i.e., no handling, manual oversampling, selective manual oversampling, and class weighting), forming 20 configurations. Using the AI4I 2020 predictive maintenance dataset, which contains five failure types, we determined that XGBoost with no handling achieved the highest macro-averaged F1 (macro-F1) score (0.842) but obtained 0% recall for tool wear failure (TWF). MLR with selective manual oversampling achieved approximately 50% TWF recall with lower overall performance (0.636 macro-F1) than top-performing models such as XGBoost. We also found that very rare classes remain difficult to detect. Even high-performing models fail to consistently detect all five failure types. Overall, no single strategy can achieve a high detection rate across all performance measures.</description>
	<pubDate>2026-04-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 88: A Comparative Study of Imbalance-Handling Methods in Multiclass Predictive Maintenance</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/88">doi: 10.3390/computation14040088</a></p>
	<p>Authors:
		Mohammed Alnahhal
		Mosab I. Tabash
		Samir K. Safi
		Mujeeb Saif Mohsen Al-Absy
		Zokir Mamadiyarov
		</p>
	<p>Predictive maintenance plays a key role in digitalization initiatives; however, in real settings, issues related to failure prediction occur when failure instances are rare compared to normal instances, leading to class imbalance. In this study, we systematically compare five machine learning (ML) models&amp;amp;mdash;random forest, XGBoost, support vector machine, k-nearest neighbors, and multinomial logistic regression (MLR)&amp;amp;mdash;to detect multiclass rare failures using four imbalance-handling approaches (i.e., no handling, manual oversampling, selective manual oversampling, and class weighting), forming 20 configurations. Using the AI4I 2020 predictive maintenance dataset, which contains five failure types, we determined that XGBoost with no handling achieved the highest macro-averaged F1 (macro-F1) score (0.842) but obtained 0% recall for tool wear failure (TWF). MLR with selective manual oversampling achieved approximately 50% TWF recall with lower overall performance (0.636 macro-F1) than top-performing models such as XGBoost. We also found that very rare classes remain difficult to detect. Even high-performing models fail to consistently detect all five failure types. Overall, no single strategy can achieve a high detection rate across all performance measures.</p>
	]]></content:encoded>

	<dc:title>A Comparative Study of Imbalance-Handling Methods in Multiclass Predictive Maintenance</dc:title>
			<dc:creator>Mohammed Alnahhal</dc:creator>
			<dc:creator>Mosab I. Tabash</dc:creator>
			<dc:creator>Samir K. Safi</dc:creator>
			<dc:creator>Mujeeb Saif Mohsen Al-Absy</dc:creator>
			<dc:creator>Zokir Mamadiyarov</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040088</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-07</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-07</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>88</prism:startingPage>
		<prism:doi>10.3390/computation14040088</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/88</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/87">

	<title>Computation, Vol. 14, Pages 87: Spatiotemporal Modelling of CAR-T Cell Therapy in Solid Tumours: Mechanisms of Antigen Escape and Immunosuppression</title>
	<link>https://www.mdpi.com/2079-3197/14/4/87</link>
	<description>CAR-T cell therapy has shown substantial efficacy in haematological malignancies, but its application to solid tumours remains limited by poor effector-cell infiltration, functional exhaustion, antigenic heterogeneity, and an immunosuppressive microenvironment. In this study, we develop a new spatiotemporal mathematical model of CAR-T therapy for solid tumours that integrates these resistance mechanisms within a single reaction&amp;amp;ndash;diffusion framework. The model is formulated as a system of partial differential equations describing functional and exhausted CAR-T cells, antigen-positive and antigen-low tumour subpopulations, and chemokine, immunosuppressive, and hypoxic fields. Steady-state analysis and finite-difference simulations showed that therapeutic outcome is governed by the interplay between CAR-T cell infiltration, exhaustion, and antigen escape. The model reproduces partial tumour regression followed by residual tumour persistence, therapy-driven enrichment of antigen-low cells, and reduced efficacy under stronger immunosuppressive and hypoxic conditions. In the combination therapy scenario considered here, repeated simulated CAR-T cell administration together with attenuation of the suppressive microenvironment improves tumour control. The proposed model provides a mechanistic basis for analysing resistance and for future optimisation studies of CAR-T therapy in solid tumours.</description>
	<pubDate>2026-04-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 87: Spatiotemporal Modelling of CAR-T Cell Therapy in Solid Tumours: Mechanisms of Antigen Escape and Immunosuppression</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/87">doi: 10.3390/computation14040087</a></p>
	<p>Authors:
		Maxim Polyakov
		</p>
	<p>CAR-T cell therapy has shown substantial efficacy in haematological malignancies, but its application to solid tumours remains limited by poor effector-cell infiltration, functional exhaustion, antigenic heterogeneity, and an immunosuppressive microenvironment. In this study, we develop a new spatiotemporal mathematical model of CAR-T therapy for solid tumours that integrates these resistance mechanisms within a single reaction&amp;amp;ndash;diffusion framework. The model is formulated as a system of partial differential equations describing functional and exhausted CAR-T cells, antigen-positive and antigen-low tumour subpopulations, and chemokine, immunosuppressive, and hypoxic fields. Steady-state analysis and finite-difference simulations showed that therapeutic outcome is governed by the interplay between CAR-T cell infiltration, exhaustion, and antigen escape. The model reproduces partial tumour regression followed by residual tumour persistence, therapy-driven enrichment of antigen-low cells, and reduced efficacy under stronger immunosuppressive and hypoxic conditions. In the combination therapy scenario considered here, repeated simulated CAR-T cell administration together with attenuation of the suppressive microenvironment improves tumour control. The proposed model provides a mechanistic basis for analysing resistance and for future optimisation studies of CAR-T therapy in solid tumours.</p>
	]]></content:encoded>

	<dc:title>Spatiotemporal Modelling of CAR-T Cell Therapy in Solid Tumours: Mechanisms of Antigen Escape and Immunosuppression</dc:title>
			<dc:creator>Maxim Polyakov</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040087</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-07</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-07</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>87</prism:startingPage>
		<prism:doi>10.3390/computation14040087</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/87</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/86">

	<title>Computation, Vol. 14, Pages 86: Python-Assisted Development of High-Performance Fortran Codes: A Hybrid Methodology Integrating Symbolic Mathematics and Large Language Models</title>
	<link>https://www.mdpi.com/2079-3197/14/4/86</link>
	<description>The development of high-performance Fortran code for large-scale scientific simulations is inherently challenging: direct Fortran implementation demands substantial expertise in numerical methods, optimization and system architecture. Manual derivation of numerical schemes is error-prone and time-consuming. This paper advocates a four-stage development methodology involving Python prototyping and symbolic derivation. Systematic validation at each step of incremental transition from symbolic specification to Fortran code produces numerically correct maintainable code faster than by direct manual implementation without sacrificing the resultant performance or code quality. Large Language Models effectively accelerate Python prototyping and boilerplate generation but require rigorous verification of the generated Fortran code. We suggest practical implementation guidelines including validation strategies. Python prototyping and symbolic code generation provide effective instruments for developing efficient production-ready Fortran implementations.</description>
	<pubDate>2026-04-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 86: Python-Assisted Development of High-Performance Fortran Codes: A Hybrid Methodology Integrating Symbolic Mathematics and Large Language Models</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/86">doi: 10.3390/computation14040086</a></p>
	<p>Authors:
		Daniil Tolmachev
		Roman Chertovskih
		</p>
	<p>The development of high-performance Fortran code for large-scale scientific simulations is inherently challenging: direct Fortran implementation demands substantial expertise in numerical methods, optimization and system architecture. Manual derivation of numerical schemes is error-prone and time-consuming. This paper advocates a four-stage development methodology involving Python prototyping and symbolic derivation. Systematic validation at each step of incremental transition from symbolic specification to Fortran code produces numerically correct maintainable code faster than by direct manual implementation without sacrificing the resultant performance or code quality. Large Language Models effectively accelerate Python prototyping and boilerplate generation but require rigorous verification of the generated Fortran code. We suggest practical implementation guidelines including validation strategies. Python prototyping and symbolic code generation provide effective instruments for developing efficient production-ready Fortran implementations.</p>
	]]></content:encoded>

	<dc:title>Python-Assisted Development of High-Performance Fortran Codes: A Hybrid Methodology Integrating Symbolic Mathematics and Large Language Models</dc:title>
			<dc:creator>Daniil Tolmachev</dc:creator>
			<dc:creator>Roman Chertovskih</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040086</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-06</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-06</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>86</prism:startingPage>
		<prism:doi>10.3390/computation14040086</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/86</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/85">

	<title>Computation, Vol. 14, Pages 85: Computational Assessment of Shear Stress-Driven Flow Alterations at the Renal Artery Origin Under Varying Pressure Conditions</title>
	<link>https://www.mdpi.com/2079-3197/14/4/85</link>
	<description>The use of computational fluid dynamics (CFD) to study hemodynamics in arteries offers significant potential for addressing complex flow problems. Due to its enhanced performance hardware and software, CFD has become an important approach for studying hemodynamics in human arteries. This approach is utilized to investigate hemodynamics and forecast risk factors for atherosclerotic lesion development and progression, including circulatory flow, and to analyze local flow fields and flow profiles resulting from geometric changes. This foundational study will aid in analyzing blood flow behavior through the abdominal aorta and the origin and courses of renal arteries, as well as investigating the causes of disorders such as atherosclerosis and hypertension. The current study investigates three idealized abdominal aorta&amp;amp;ndash;renal artery junction models under varying blood pressure settings. Materialise software V19 was used to extract the geometry data to create idealized 3D abdominal aorta&amp;amp;ndash;renal branching models. Unsteady flow simulations were performed in ANSYS Fluent, utilizing rigid walls and Newtonian and Carreau&amp;amp;ndash;Yasuda viscosity conditions. Oscillatory shear index (OSI) and Time-averaged wall shear stress (TAWSS) were measured to enhance understanding of atherosclerotic plaque formation and progression. Also, the effect of geometric change at the bifurcation area was explored, and it was discovered that this location causes considerable vortex forming zones. The evident velocity reduction and backflow development were seen, reducing shear stress. The findings indicate that low TAWSS &amp;amp;lt; 0.4 Pa and OSI &amp;amp;gt; 0.15 areas within the bifurcation region are more susceptible to atherosclerosis development.</description>
	<pubDate>2026-04-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 85: Computational Assessment of Shear Stress-Driven Flow Alterations at the Renal Artery Origin Under Varying Pressure Conditions</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/85">doi: 10.3390/computation14040085</a></p>
	<p>Authors:
		Gowrava Shenoy Beloor
		Raghuvir Pai Ballambat
		Kevin Amith Mathias
		Mohammad Zuber
		Manjunath Mallashetty Shivamallaiah
		Ravindra Prabhu Attur
		Dharshan Rangaswamy
		Prakashini Koteshwar
		Masaaki Tamagawa
		Shah Mohammed Abdul Khader
		</p>
	<p>The use of computational fluid dynamics (CFD) to study hemodynamics in arteries offers significant potential for addressing complex flow problems. Due to its enhanced performance hardware and software, CFD has become an important approach for studying hemodynamics in human arteries. This approach is utilized to investigate hemodynamics and forecast risk factors for atherosclerotic lesion development and progression, including circulatory flow, and to analyze local flow fields and flow profiles resulting from geometric changes. This foundational study will aid in analyzing blood flow behavior through the abdominal aorta and the origin and courses of renal arteries, as well as investigating the causes of disorders such as atherosclerosis and hypertension. The current study investigates three idealized abdominal aorta&amp;amp;ndash;renal artery junction models under varying blood pressure settings. Materialise software V19 was used to extract the geometry data to create idealized 3D abdominal aorta&amp;amp;ndash;renal branching models. Unsteady flow simulations were performed in ANSYS Fluent, utilizing rigid walls and Newtonian and Carreau&amp;amp;ndash;Yasuda viscosity conditions. Oscillatory shear index (OSI) and Time-averaged wall shear stress (TAWSS) were measured to enhance understanding of atherosclerotic plaque formation and progression. Also, the effect of geometric change at the bifurcation area was explored, and it was discovered that this location causes considerable vortex forming zones. The evident velocity reduction and backflow development were seen, reducing shear stress. The findings indicate that low TAWSS &amp;amp;lt; 0.4 Pa and OSI &amp;amp;gt; 0.15 areas within the bifurcation region are more susceptible to atherosclerosis development.</p>
	]]></content:encoded>

	<dc:title>Computational Assessment of Shear Stress-Driven Flow Alterations at the Renal Artery Origin Under Varying Pressure Conditions</dc:title>
			<dc:creator>Gowrava Shenoy Beloor</dc:creator>
			<dc:creator>Raghuvir Pai Ballambat</dc:creator>
			<dc:creator>Kevin Amith Mathias</dc:creator>
			<dc:creator>Mohammad Zuber</dc:creator>
			<dc:creator>Manjunath Mallashetty Shivamallaiah</dc:creator>
			<dc:creator>Ravindra Prabhu Attur</dc:creator>
			<dc:creator>Dharshan Rangaswamy</dc:creator>
			<dc:creator>Prakashini Koteshwar</dc:creator>
			<dc:creator>Masaaki Tamagawa</dc:creator>
			<dc:creator>Shah Mohammed Abdul Khader</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040085</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-03</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-03</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>85</prism:startingPage>
		<prism:doi>10.3390/computation14040085</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/85</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/84">

	<title>Computation, Vol. 14, Pages 84: EdgeRescue: Lightweight AI-Based Self-Healing for Energy-Constrained IoT Meshes</title>
	<link>https://www.mdpi.com/2079-3197/14/4/84</link>
	<description>As the scale and complexity of Internet of Things (IoT) deployments increase, maintaining resilience in resource-constrained mesh networks becomes a significant challenge. Frequent node failures due to battery depletion, environmental interference, or hardware degradation can disrupt data flows and lead to operational downtime. To address this, we propose EdgeRescue, a novel lightweight AI-driven framework for self-healing in energy-constrained IoT mesh environments. EdgeRescue enables each node to perform local anomaly detection using compact 1D Convolutional Neural Networks (1D-CNNs) and initiates distributed, energy-aware routing reconfiguration when faults are detected. Unlike cloud-dependent methods, EdgeRescue operates entirely at the edge, requiring minimal computation, memory, and communication overhead. Extensive simulations on a 100-node testbed demonstrate that EdgeRescue improves packet delivery by 13.2%, reduces recovery latency by 57%, and lowers average node energy consumption by 18.8% compared to state-of-the-art baselines. These results establish EdgeRescue as a scalable and practical solution for achieving real-time resilience in next-generation IoT mesh networks.</description>
	<pubDate>2026-04-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 84: EdgeRescue: Lightweight AI-Based Self-Healing for Energy-Constrained IoT Meshes</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/84">doi: 10.3390/computation14040084</a></p>
	<p>Authors:
		Haifa A. Alanazi
		Abdulaziz G. Alanazi
		Nasser S. Albalawi
		</p>
	<p>As the scale and complexity of Internet of Things (IoT) deployments increase, maintaining resilience in resource-constrained mesh networks becomes a significant challenge. Frequent node failures due to battery depletion, environmental interference, or hardware degradation can disrupt data flows and lead to operational downtime. To address this, we propose EdgeRescue, a novel lightweight AI-driven framework for self-healing in energy-constrained IoT mesh environments. EdgeRescue enables each node to perform local anomaly detection using compact 1D Convolutional Neural Networks (1D-CNNs) and initiates distributed, energy-aware routing reconfiguration when faults are detected. Unlike cloud-dependent methods, EdgeRescue operates entirely at the edge, requiring minimal computation, memory, and communication overhead. Extensive simulations on a 100-node testbed demonstrate that EdgeRescue improves packet delivery by 13.2%, reduces recovery latency by 57%, and lowers average node energy consumption by 18.8% compared to state-of-the-art baselines. These results establish EdgeRescue as a scalable and practical solution for achieving real-time resilience in next-generation IoT mesh networks.</p>
	]]></content:encoded>

	<dc:title>EdgeRescue: Lightweight AI-Based Self-Healing for Energy-Constrained IoT Meshes</dc:title>
			<dc:creator>Haifa A. Alanazi</dc:creator>
			<dc:creator>Abdulaziz G. Alanazi</dc:creator>
			<dc:creator>Nasser S. Albalawi</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040084</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-03</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-03</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>84</prism:startingPage>
		<prism:doi>10.3390/computation14040084</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/84</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/83">

	<title>Computation, Vol. 14, Pages 83: Advanced Computational Investigation of Brush Seal Thermo-Fluid&amp;ndash;Mechanical Performance Through Novel Porous Media Coefficient Derivation</title>
	<link>https://www.mdpi.com/2079-3197/14/4/83</link>
	<description>Brush seals represent the most effective sealing technology, offering 5 to 10 times lower leakage flow rates, resulting in an 80% to 90% increase in sealing efficiency. However, key challenges remain in optimizing brush seal performance, including managing high frictional heat, maintaining consistent leakage flow, and preventing mechanical deformation failures within the bristle pack. This study uses a fluid&amp;amp;ndash;mechanical coupling method to establish and refine numerical investigation procedures. Using porous media and local thermal non-equilibrium (LTNE) approaches, the effects of the pressure ratio on seal performance are analyzed. The results reveal that the difference between the maximum directional and total deformations is 0.9108 mm, with the total deformation being approximately 79,666% larger than the directional deformation. These findings highlight that the bristle pack must be designed with primary consideration of total deformation to enhance performance and efficiency. The proposed methodologies enable more robust comparative evaluations of alternative brush seal configurations, including two-stage bristle packs and inline structural models. This facilitates the identification of optimized structures that minimize leakage, enhance energy dissipation, and improve the overall seal performance, thereby advancing the porous media model from a general approximation to a design-optimized tool.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 83: Advanced Computational Investigation of Brush Seal Thermo-Fluid&amp;ndash;Mechanical Performance Through Novel Porous Media Coefficient Derivation</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/83">doi: 10.3390/computation14040083</a></p>
	<p>Authors:
		Altyib Abdallah Mahmoud Ahmed
		Juan Wang
		Meihong Liu
		Aboubaker I. B. Idriss
		Abdelgalal O. I. Abaker
		</p>
	<p>Brush seals represent the most effective sealing technology, offering 5 to 10 times lower leakage flow rates, resulting in an 80% to 90% increase in sealing efficiency. However, key challenges remain in optimizing brush seal performance, including managing high frictional heat, maintaining consistent leakage flow, and preventing mechanical deformation failures within the bristle pack. This study uses a fluid&amp;amp;ndash;mechanical coupling method to establish and refine numerical investigation procedures. Using porous media and local thermal non-equilibrium (LTNE) approaches, the effects of the pressure ratio on seal performance are analyzed. The results reveal that the difference between the maximum directional and total deformations is 0.9108 mm, with the total deformation being approximately 79,666% larger than the directional deformation. These findings highlight that the bristle pack must be designed with primary consideration of total deformation to enhance performance and efficiency. The proposed methodologies enable more robust comparative evaluations of alternative brush seal configurations, including two-stage bristle packs and inline structural models. This facilitates the identification of optimized structures that minimize leakage, enhance energy dissipation, and improve the overall seal performance, thereby advancing the porous media model from a general approximation to a design-optimized tool.</p>
	]]></content:encoded>

	<dc:title>Advanced Computational Investigation of Brush Seal Thermo-Fluid&amp;amp;ndash;Mechanical Performance Through Novel Porous Media Coefficient Derivation</dc:title>
			<dc:creator>Altyib Abdallah Mahmoud Ahmed</dc:creator>
			<dc:creator>Juan Wang</dc:creator>
			<dc:creator>Meihong Liu</dc:creator>
			<dc:creator>Aboubaker I. B. Idriss</dc:creator>
			<dc:creator>Abdelgalal O. I. Abaker</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040083</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>83</prism:startingPage>
		<prism:doi>10.3390/computation14040083</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/83</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/82">

	<title>Computation, Vol. 14, Pages 82: Heterogeneous Layout-Aware Cross-Modal Knowledge Point Classification for Exam Questions</title>
	<link>https://www.mdpi.com/2079-3197/14/4/82</link>
	<description>With the continuous emergence of exam question types, accurate classification of knowledge points is crucial for intelligent exam analysis. Existing methods focus on text or text&amp;amp;ndash;image fusion but largely ignore spatial layout. To address this limitation, we propose a heterogeneous layout-aware cross-modal framework for knowledge point classification. The architecture begins with an encoding module where independent text and layout encoders extract semantic content and spatial configurations, respectively. We then design a layout-aware enhancing module consisting of two parallel cross-modal blocks, namely a Layout-Aware Text-Enhancing block and a Context-Aware Layout-Enhancing block. This module supports the bidirectional fusion of text and layout features and generates a comprehensive representation that integrates both semantic and spatial information. Furthermore, a dynamic router with top-k expert selection is introduced to dynamically adapt to question-specific knowledge distributions and focus on core knowledge points for precise classification. Experimental results demonstrate that our method effectively integrates text and layout information, significantly enhancing performance on the proposed QType-EDU dataset. The approach achieves 91.56% accuracy for coarse-grained classification and 80.58% for fine-grained classification, with an overall F1-score of 91.39%, surpassing all baseline models.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 82: Heterogeneous Layout-Aware Cross-Modal Knowledge Point Classification for Exam Questions</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/82">doi: 10.3390/computation14040082</a></p>
	<p>Authors:
		Zhushun Su
		Bi Zeng
		Pengfei Wei
		Keyun Wang
		Zhentao Lin
		</p>
	<p>With the continuous emergence of exam question types, accurate classification of knowledge points is crucial for intelligent exam analysis. Existing methods focus on text or text&amp;amp;ndash;image fusion but largely ignore spatial layout. To address this limitation, we propose a heterogeneous layout-aware cross-modal framework for knowledge point classification. The architecture begins with an encoding module where independent text and layout encoders extract semantic content and spatial configurations, respectively. We then design a layout-aware enhancing module consisting of two parallel cross-modal blocks, namely a Layout-Aware Text-Enhancing block and a Context-Aware Layout-Enhancing block. This module supports the bidirectional fusion of text and layout features and generates a comprehensive representation that integrates both semantic and spatial information. Furthermore, a dynamic router with top-k expert selection is introduced to dynamically adapt to question-specific knowledge distributions and focus on core knowledge points for precise classification. Experimental results demonstrate that our method effectively integrates text and layout information, significantly enhancing performance on the proposed QType-EDU dataset. The approach achieves 91.56% accuracy for coarse-grained classification and 80.58% for fine-grained classification, with an overall F1-score of 91.39%, surpassing all baseline models.</p>
	]]></content:encoded>

	<dc:title>Heterogeneous Layout-Aware Cross-Modal Knowledge Point Classification for Exam Questions</dc:title>
			<dc:creator>Zhushun Su</dc:creator>
			<dc:creator>Bi Zeng</dc:creator>
			<dc:creator>Pengfei Wei</dc:creator>
			<dc:creator>Keyun Wang</dc:creator>
			<dc:creator>Zhentao Lin</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040082</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>82</prism:startingPage>
		<prism:doi>10.3390/computation14040082</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/82</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/81">

	<title>Computation, Vol. 14, Pages 81: XGBoost vs. LightGBM: An XAI Approach to National Vehicle Fleet Analysis</title>
	<link>https://www.mdpi.com/2079-3197/14/4/81</link>
	<description>This study analyzes the factors associated with vehicle technology classification in Ecuador, using fuel category (electric, hybrid, and internal combustion) as the dependent variable under an Explainable Artificial Intelligence (XAI) approach. Following the CRISP-DM methodology, we compared the performance of XGBoost and LightGBM algorithms using a dataset of 482,754 administrative records from the Internal Revenue Service (SRI). Both models achieved outstanding predictive performance with a Macro F1-score of 0.987, demonstrating robustness despite the severe class imbalance (electric vehicles represent only 1.3% of the total). The integration of SHAP (SHapley Additive exPlanations) values identified tax appraisal and engine displacement as the most influential features in the model predictions in the adoption of electric vehicles. In contrast, territorial factors exert a more significant influence on the acquisition of hybrid vehicles. Finally, the findings demonstrate that boosting models, combined with XAI techniques, provide transparent analytical tools that can support evidence-based transport decarbonization strategies in emerging economies.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 81: XGBoost vs. LightGBM: An XAI Approach to National Vehicle Fleet Analysis</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/81">doi: 10.3390/computation14040081</a></p>
	<p>Authors:
		Wilson Gustavo Chango-Sailema
		Homero Velasteguí-Izurieta
		William Paul Pazuña-Naranjo
		Joffre Stalin Monar
		Rebeca Mariana Moposita-Lasso
		Santiago Israel Logroño-Naranjo
		Carlos Roberto López-Paredes
		Jacqueline Elizabeth Ponce
		Geovanny Euclides Silva-Peñafiel
		Angel Patricio Flores-Orozco
		Cindy Johanna Choez-Calderón
		Marcelo Vladimir Garcia
		</p>
	<p>This study analyzes the factors associated with vehicle technology classification in Ecuador, using fuel category (electric, hybrid, and internal combustion) as the dependent variable under an Explainable Artificial Intelligence (XAI) approach. Following the CRISP-DM methodology, we compared the performance of XGBoost and LightGBM algorithms using a dataset of 482,754 administrative records from the Internal Revenue Service (SRI). Both models achieved outstanding predictive performance with a Macro F1-score of 0.987, demonstrating robustness despite the severe class imbalance (electric vehicles represent only 1.3% of the total). The integration of SHAP (SHapley Additive exPlanations) values identified tax appraisal and engine displacement as the most influential features in the model predictions in the adoption of electric vehicles. In contrast, territorial factors exert a more significant influence on the acquisition of hybrid vehicles. Finally, the findings demonstrate that boosting models, combined with XAI techniques, provide transparent analytical tools that can support evidence-based transport decarbonization strategies in emerging economies.</p>
	]]></content:encoded>

	<dc:title>XGBoost vs. LightGBM: An XAI Approach to National Vehicle Fleet Analysis</dc:title>
			<dc:creator>Wilson Gustavo Chango-Sailema</dc:creator>
			<dc:creator>Homero Velasteguí-Izurieta</dc:creator>
			<dc:creator>William Paul Pazuña-Naranjo</dc:creator>
			<dc:creator>Joffre Stalin Monar</dc:creator>
			<dc:creator>Rebeca Mariana Moposita-Lasso</dc:creator>
			<dc:creator>Santiago Israel Logroño-Naranjo</dc:creator>
			<dc:creator>Carlos Roberto López-Paredes</dc:creator>
			<dc:creator>Jacqueline Elizabeth Ponce</dc:creator>
			<dc:creator>Geovanny Euclides Silva-Peñafiel</dc:creator>
			<dc:creator>Angel Patricio Flores-Orozco</dc:creator>
			<dc:creator>Cindy Johanna Choez-Calderón</dc:creator>
			<dc:creator>Marcelo Vladimir Garcia</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040081</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>81</prism:startingPage>
		<prism:doi>10.3390/computation14040081</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/81</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/80">

	<title>Computation, Vol. 14, Pages 80: Evaluating Psychometric Clustering Methods: A Machine-Learning Comparison of EFA and NCD</title>
	<link>https://www.mdpi.com/2079-3197/14/4/80</link>
	<description>Classification methods such as exploratory factor analysis (EFA) and network community detection (NCD) are widely used to identify latent item groupings in multidimensional psychological assessments. However, direct comparisons between these approaches remain limited. In addition, evaluations of clustering methods often rely on overall classification metrics, which may obscure systematic differences in how well distinct types of items are recovered. Item characteristics&amp;amp;mdash;such as core&amp;amp;ndash;peripheral positions and loading patterns&amp;amp;mdash;may influence classification outcomes, yet few studies have examined how these item types interact with clustering methods. The present study addresses these gaps by comparing EFA and NCD within a unified machine-learning evaluation framework that varies sample size, latent structure, preprocessing strategy, and machine-learning classifier choice (Random Forests vs. Support Vector Machines). Results show that the performance of both EFA and NCD is influenced by sample size, item type, latent structure, and classifier choice. Moreover, the downstream classifier moderates how sensitive each method is to differences among item types. These findings highlight the importance of considering item-type heterogeneity when evaluating clustering methods and demonstrate the value of machine-learning-based frameworks for advancing psychometric classification approaches.</description>
	<pubDate>2026-03-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 80: Evaluating Psychometric Clustering Methods: A Machine-Learning Comparison of EFA and NCD</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/80">doi: 10.3390/computation14040080</a></p>
	<p>Authors:
		Jingyang Li
		Zhenqiu (Laura) Lu
		</p>
	<p>Classification methods such as exploratory factor analysis (EFA) and network community detection (NCD) are widely used to identify latent item groupings in multidimensional psychological assessments. However, direct comparisons between these approaches remain limited. In addition, evaluations of clustering methods often rely on overall classification metrics, which may obscure systematic differences in how well distinct types of items are recovered. Item characteristics&amp;amp;mdash;such as core&amp;amp;ndash;peripheral positions and loading patterns&amp;amp;mdash;may influence classification outcomes, yet few studies have examined how these item types interact with clustering methods. The present study addresses these gaps by comparing EFA and NCD within a unified machine-learning evaluation framework that varies sample size, latent structure, preprocessing strategy, and machine-learning classifier choice (Random Forests vs. Support Vector Machines). Results show that the performance of both EFA and NCD is influenced by sample size, item type, latent structure, and classifier choice. Moreover, the downstream classifier moderates how sensitive each method is to differences among item types. These findings highlight the importance of considering item-type heterogeneity when evaluating clustering methods and demonstrate the value of machine-learning-based frameworks for advancing psychometric classification approaches.</p>
	]]></content:encoded>

	<dc:title>Evaluating Psychometric Clustering Methods: A Machine-Learning Comparison of EFA and NCD</dc:title>
			<dc:creator>Jingyang Li</dc:creator>
			<dc:creator>Zhenqiu (Laura) Lu</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040080</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-31</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-31</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>80</prism:startingPage>
		<prism:doi>10.3390/computation14040080</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/80</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/79">

	<title>Computation, Vol. 14, Pages 79: Heat Transfer Mixing in Closed Domain with Circular and Elliptical Cross-Sections</title>
	<link>https://www.mdpi.com/2079-3197/14/4/79</link>
	<description>Rayleigh&amp;amp;ndash;B&amp;amp;eacute;nard convection (RBC) provides a benchmark for studying buoyancy-driven instabilities and heat transport in confined fluids. Heat transfer scaling in cylindrical geometries is well established, whereas the role of the anisotropy induced by the domain geometry, such as elliptical shapes, has not fully explored. This study presents direct numerical simulations of RBC in two domains of equal height, H=0.0124 m, and different cross-sections: a circular cylinder with radius R=3.11&amp;amp;times;10&amp;amp;minus;3 m and an elliptical cylinder with semi-axes equal to Rmax=3.11&amp;amp;times;10&amp;amp;minus;3 m, Rmin=1.55&amp;amp;times;10&amp;amp;minus;3 m, respectively. The simulations, performed at Rayleigh number Ra=2&amp;amp;times;106 and Prandtl number Pr=1.68 (for water) under the Boussinesq approximation, reveal that (i) the average Nusselt number is comparable in both cases (&amp;amp;#10216;Nu&amp;amp;#10217;&amp;amp;asymp;38.23 for the circular case and &amp;amp;#10216;Nu&amp;amp;#10217;&amp;amp;asymp;39.22 for the elliptical one) and (ii) the different domain geometries influence the thermal transport mechanism and flow organization. Specifically, in the cylindrical cell, heat transfer is regulated by a large-scale circulation roll, whereas in the case of the elliptical shape, the domain is populated by thermal plumes driving the convective dynamics. The latter phenomenon is evidenced by larger Nusselt number fluctuations at the lower and upper plates, with a standard deviation increasing from &amp;amp;sigma;&amp;amp;asymp;2.21 in the circular cylinder to &amp;amp;sigma;&amp;amp;asymp;4.57 in the elliptical domain. These results highlight that the geometric anisotropy modifies the coupling between boundary layers and the core flow dynamics, leading to enhanced intermittency without affecting the magnitude of the heat flux. Therefore, the elliptical domain is suitable for applications characterized by enhanced mixing.</description>
	<pubDate>2026-03-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 79: Heat Transfer Mixing in Closed Domain with Circular and Elliptical Cross-Sections</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/79">doi: 10.3390/computation14040079</a></p>
	<p>Authors:
		Myriam E. Bruno
		Alessandro Nobile
		Paolo Oresta
		</p>
	<p>Rayleigh&amp;amp;ndash;B&amp;amp;eacute;nard convection (RBC) provides a benchmark for studying buoyancy-driven instabilities and heat transport in confined fluids. Heat transfer scaling in cylindrical geometries is well established, whereas the role of the anisotropy induced by the domain geometry, such as elliptical shapes, has not fully explored. This study presents direct numerical simulations of RBC in two domains of equal height, H=0.0124 m, and different cross-sections: a circular cylinder with radius R=3.11&amp;amp;times;10&amp;amp;minus;3 m and an elliptical cylinder with semi-axes equal to Rmax=3.11&amp;amp;times;10&amp;amp;minus;3 m, Rmin=1.55&amp;amp;times;10&amp;amp;minus;3 m, respectively. The simulations, performed at Rayleigh number Ra=2&amp;amp;times;106 and Prandtl number Pr=1.68 (for water) under the Boussinesq approximation, reveal that (i) the average Nusselt number is comparable in both cases (&amp;amp;#10216;Nu&amp;amp;#10217;&amp;amp;asymp;38.23 for the circular case and &amp;amp;#10216;Nu&amp;amp;#10217;&amp;amp;asymp;39.22 for the elliptical one) and (ii) the different domain geometries influence the thermal transport mechanism and flow organization. Specifically, in the cylindrical cell, heat transfer is regulated by a large-scale circulation roll, whereas in the case of the elliptical shape, the domain is populated by thermal plumes driving the convective dynamics. The latter phenomenon is evidenced by larger Nusselt number fluctuations at the lower and upper plates, with a standard deviation increasing from &amp;amp;sigma;&amp;amp;asymp;2.21 in the circular cylinder to &amp;amp;sigma;&amp;amp;asymp;4.57 in the elliptical domain. These results highlight that the geometric anisotropy modifies the coupling between boundary layers and the core flow dynamics, leading to enhanced intermittency without affecting the magnitude of the heat flux. Therefore, the elliptical domain is suitable for applications characterized by enhanced mixing.</p>
	]]></content:encoded>

	<dc:title>Heat Transfer Mixing in Closed Domain with Circular and Elliptical Cross-Sections</dc:title>
			<dc:creator>Myriam E. Bruno</dc:creator>
			<dc:creator>Alessandro Nobile</dc:creator>
			<dc:creator>Paolo Oresta</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040079</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-31</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-31</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>79</prism:startingPage>
		<prism:doi>10.3390/computation14040079</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/79</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/78">

	<title>Computation, Vol. 14, Pages 78: Multiregional Forecasting of Traffic Accidents Using Prophet Models with Statistical Residual Validation</title>
	<link>https://www.mdpi.com/2079-3197/14/4/78</link>
	<description>This study develops a multiregional forecasting framework for road traffic accidents in Ecuador, addressing a critical limitation in existing predictive approaches that rely predominantly on point error metrics without validating the statistical assumptions underlying forecast uncertainty. Although the analysis is conducted at the provincial level, the spatial dimension is used primarily for cross-regional comparison and risk classification rather than for explicit spatial interaction modeling. Using a dataset of 27,648 monthly observations covering all 24 provinces from 2014 to 2025, the study applies the Prophet model within a Design Science Research paradigm and a CRISP-DM implementation cycle. Separate provincial models are estimated with a 24-month forecasting horizon, and methodological rigor is ensured through systematic residual diagnostics using the Shapiro&amp;amp;ndash;Wilk test for normality and the Ljung&amp;amp;ndash;Box test for temporal independence. Empirical results indicate that the Prophet-based artifact outperforms a na&amp;amp;iuml;ve seasonal benchmark in 70.8% of the provinces, demonstrating excellent predictive accuracy in structurally stable regions such as Tungurahua (MAPE = 10.9%). At the same time, the framework enables the identification of critical emerging risks in provinces such as Santo Domingo and Cotopaxi, where projected increases exceed 49% despite acceptable point forecasts. The findings confirm that point accuracy alone does not guarantee the validity of confidence intervals and that residual validation is essential for trustworthy uncertainty quantification. Overall, the proposed approach provides a robust foundation for a predictive surveillance system capable of supporting differentiated, evidence-based road safety policies in territorially heterogeneous contexts.</description>
	<pubDate>2026-03-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 78: Multiregional Forecasting of Traffic Accidents Using Prophet Models with Statistical Residual Validation</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/78">doi: 10.3390/computation14040078</a></p>
	<p>Authors:
		Jaime Sayago-Heredia
		Tatiana Elizabeth Landivar
		Roberto Vásconez
		Wilson Chango-Sailema
		</p>
	<p>This study develops a multiregional forecasting framework for road traffic accidents in Ecuador, addressing a critical limitation in existing predictive approaches that rely predominantly on point error metrics without validating the statistical assumptions underlying forecast uncertainty. Although the analysis is conducted at the provincial level, the spatial dimension is used primarily for cross-regional comparison and risk classification rather than for explicit spatial interaction modeling. Using a dataset of 27,648 monthly observations covering all 24 provinces from 2014 to 2025, the study applies the Prophet model within a Design Science Research paradigm and a CRISP-DM implementation cycle. Separate provincial models are estimated with a 24-month forecasting horizon, and methodological rigor is ensured through systematic residual diagnostics using the Shapiro&amp;amp;ndash;Wilk test for normality and the Ljung&amp;amp;ndash;Box test for temporal independence. Empirical results indicate that the Prophet-based artifact outperforms a na&amp;amp;iuml;ve seasonal benchmark in 70.8% of the provinces, demonstrating excellent predictive accuracy in structurally stable regions such as Tungurahua (MAPE = 10.9%). At the same time, the framework enables the identification of critical emerging risks in provinces such as Santo Domingo and Cotopaxi, where projected increases exceed 49% despite acceptable point forecasts. The findings confirm that point accuracy alone does not guarantee the validity of confidence intervals and that residual validation is essential for trustworthy uncertainty quantification. Overall, the proposed approach provides a robust foundation for a predictive surveillance system capable of supporting differentiated, evidence-based road safety policies in territorially heterogeneous contexts.</p>
	]]></content:encoded>

	<dc:title>Multiregional Forecasting of Traffic Accidents Using Prophet Models with Statistical Residual Validation</dc:title>
			<dc:creator>Jaime Sayago-Heredia</dc:creator>
			<dc:creator>Tatiana Elizabeth Landivar</dc:creator>
			<dc:creator>Roberto Vásconez</dc:creator>
			<dc:creator>Wilson Chango-Sailema</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040078</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-26</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-26</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>78</prism:startingPage>
		<prism:doi>10.3390/computation14040078</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/78</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/77">

	<title>Computation, Vol. 14, Pages 77: Patient-Specific CFD Analysis of Carotid Artery Haemodynamics: Impact of Anatomical Variations on Atherosclerotic Risk</title>
	<link>https://www.mdpi.com/2079-3197/14/4/77</link>
	<description>Understanding the hemodynamics of the carotid artery is essential for assessing atherosclerotic disease progression and identifying regions vulnerable to plaque formation. Background: Disturbed flow patterns and abnormal shear stresses, particularly near the carotid bifurcation, are known to influence endothelial dysfunction; therefore, this study aims to quantify the impact of patient-specific carotid artery geometry on key hemodynamic parameters associated with atherosclerotic risk. Methods: Four patient-specific carotid artery geometries were reconstructed from medical imaging data, processed using MIMICS, and analyzed using computational fluid dynamics in ANSYS Fluent, with blood modeled as an incompressible non-Newtonian fluid using the Carreau&amp;amp;ndash;Yasuda viscosity model under pulsatile flow conditions; velocity streamlines, pressure distribution, time-averaged wall shear stress (TAWSS), and oscillatory shear index (OSI) were evaluated at early systole, peak systole, and peak diastole. Results: The simulations revealed complex flow behaviour, including flow reversal, pressure build-up, and low-shear regions concentrated near the carotid bulb and bifurcation, with TAWSS consistently identifying low-shear zones (&amp;amp;lt;1 Pa) across all geometries and OSI exhibiting pronounced directional oscillations in models with increased curvature and wider bifurcation angles. Conclusions: These findings demonstrate that geometric characteristics such as bifurcation angle, vessel tortuosity, and asymmetry play a critical role in shaping local haemodynamics, underscoring the utility of patient-specific CFD analysis as a diagnostic and predictive tool for atherosclerotic risk assessment and supporting more informed, personalized clinical decision-making.</description>
	<pubDate>2026-03-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 77: Patient-Specific CFD Analysis of Carotid Artery Haemodynamics: Impact of Anatomical Variations on Atherosclerotic Risk</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/77">doi: 10.3390/computation14040077</a></p>
	<p>Authors:
		Abhilash Hebbandi Ningappa
		S. M. Abdul Khader
		Harishkumar Kamat
		Masaaki Tamagawa
		Ganesh Kamath
		Raghuvir Pai B.
		Prakashini Koteswar
		Irfan Anjum Badruddin
		Mohammad Zuber
		Kevin Amith Mathias
		Gowrava Shenoy Baloor
		</p>
	<p>Understanding the hemodynamics of the carotid artery is essential for assessing atherosclerotic disease progression and identifying regions vulnerable to plaque formation. Background: Disturbed flow patterns and abnormal shear stresses, particularly near the carotid bifurcation, are known to influence endothelial dysfunction; therefore, this study aims to quantify the impact of patient-specific carotid artery geometry on key hemodynamic parameters associated with atherosclerotic risk. Methods: Four patient-specific carotid artery geometries were reconstructed from medical imaging data, processed using MIMICS, and analyzed using computational fluid dynamics in ANSYS Fluent, with blood modeled as an incompressible non-Newtonian fluid using the Carreau&amp;amp;ndash;Yasuda viscosity model under pulsatile flow conditions; velocity streamlines, pressure distribution, time-averaged wall shear stress (TAWSS), and oscillatory shear index (OSI) were evaluated at early systole, peak systole, and peak diastole. Results: The simulations revealed complex flow behaviour, including flow reversal, pressure build-up, and low-shear regions concentrated near the carotid bulb and bifurcation, with TAWSS consistently identifying low-shear zones (&amp;amp;lt;1 Pa) across all geometries and OSI exhibiting pronounced directional oscillations in models with increased curvature and wider bifurcation angles. Conclusions: These findings demonstrate that geometric characteristics such as bifurcation angle, vessel tortuosity, and asymmetry play a critical role in shaping local haemodynamics, underscoring the utility of patient-specific CFD analysis as a diagnostic and predictive tool for atherosclerotic risk assessment and supporting more informed, personalized clinical decision-making.</p>
	]]></content:encoded>

	<dc:title>Patient-Specific CFD Analysis of Carotid Artery Haemodynamics: Impact of Anatomical Variations on Atherosclerotic Risk</dc:title>
			<dc:creator>Abhilash Hebbandi Ningappa</dc:creator>
			<dc:creator>S. M. Abdul Khader</dc:creator>
			<dc:creator>Harishkumar Kamat</dc:creator>
			<dc:creator>Masaaki Tamagawa</dc:creator>
			<dc:creator>Ganesh Kamath</dc:creator>
			<dc:creator>Raghuvir Pai B.</dc:creator>
			<dc:creator>Prakashini Koteswar</dc:creator>
			<dc:creator>Irfan Anjum Badruddin</dc:creator>
			<dc:creator>Mohammad Zuber</dc:creator>
			<dc:creator>Kevin Amith Mathias</dc:creator>
			<dc:creator>Gowrava Shenoy Baloor</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040077</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-26</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-26</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>77</prism:startingPage>
		<prism:doi>10.3390/computation14040077</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/77</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/76">

	<title>Computation, Vol. 14, Pages 76: Computational Economics of Circular Construction: Machine Learning and Digital Twins for Optimizing Demolition Waste Recovery and Business Value</title>
	<link>https://www.mdpi.com/2079-3197/14/4/76</link>
	<description>Construction and demolition waste (CDW) represents a critical environmental challenge in the building sector, with global generation exceeding 3.57 billion tonnes annually. The circular economy (CE) framework offers a transformative pathway through selective deconstruction and material recovery, yet implementation faces significant barriers including information asymmetry, supply chain fragmentation, and regulatory uncertainty. This study conducts a systematic literature review using the Context&amp;amp;ndash;Mechanism&amp;amp;ndash;Outcome (CMO) framework to analyze how computational methods, specifically Digital Twins (DT), Building Information Modeling (BIM), Internet of Things (IoT), blockchain, artificial intelligence, and robotics, act as enablers for resilience in CDW management. Following PRISMA 2020 guidelines and realist synthesis principles, we analyzed 42 high-quality empirical studies from Web of Science and Scopus (2015&amp;amp;ndash;2025). Our analysis identifies seven primary mechanisms: traceability (M1), simulation (M2), classification (M3), tracking (M4), collaboration (M5), analytics (M6) and robotics (M7). These mechanisms interact with four critical contexts (information asymmetry, supply chain fragmentation, economic uncertainty, operational risks) to generate outcomes at two levels: resilience capabilities (visibility, monitoring, collaboration, flexibility, anticipation) and performance indicators (recovery rates, cost reduction, CO2 emissions mitigation, occupational safety). Key findings from the CMO analysis reveal that blockchain-enabled traceability increases material recovery rates by 15&amp;amp;ndash;25%, DT simulation reduces deconstruction costs by 20&amp;amp;ndash;30%, and computer vision automation improves sorting accuracy to 85&amp;amp;ndash;95%. The study contributes middle-range theories explaining how digital technologies enable circular transitions under specific contextual conditions, offering actionable strategic implications for researchers, project managers, technology developers, and policymakers committed to advancing computational economics in sustainable construction.</description>
	<pubDate>2026-03-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 76: Computational Economics of Circular Construction: Machine Learning and Digital Twins for Optimizing Demolition Waste Recovery and Business Value</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/76">doi: 10.3390/computation14040076</a></p>
	<p>Authors:
		Marta Torres-Polo
		Eduardo Guzmán Ortíz
		</p>
	<p>Construction and demolition waste (CDW) represents a critical environmental challenge in the building sector, with global generation exceeding 3.57 billion tonnes annually. The circular economy (CE) framework offers a transformative pathway through selective deconstruction and material recovery, yet implementation faces significant barriers including information asymmetry, supply chain fragmentation, and regulatory uncertainty. This study conducts a systematic literature review using the Context&amp;amp;ndash;Mechanism&amp;amp;ndash;Outcome (CMO) framework to analyze how computational methods, specifically Digital Twins (DT), Building Information Modeling (BIM), Internet of Things (IoT), blockchain, artificial intelligence, and robotics, act as enablers for resilience in CDW management. Following PRISMA 2020 guidelines and realist synthesis principles, we analyzed 42 high-quality empirical studies from Web of Science and Scopus (2015&amp;amp;ndash;2025). Our analysis identifies seven primary mechanisms: traceability (M1), simulation (M2), classification (M3), tracking (M4), collaboration (M5), analytics (M6) and robotics (M7). These mechanisms interact with four critical contexts (information asymmetry, supply chain fragmentation, economic uncertainty, operational risks) to generate outcomes at two levels: resilience capabilities (visibility, monitoring, collaboration, flexibility, anticipation) and performance indicators (recovery rates, cost reduction, CO2 emissions mitigation, occupational safety). Key findings from the CMO analysis reveal that blockchain-enabled traceability increases material recovery rates by 15&amp;amp;ndash;25%, DT simulation reduces deconstruction costs by 20&amp;amp;ndash;30%, and computer vision automation improves sorting accuracy to 85&amp;amp;ndash;95%. The study contributes middle-range theories explaining how digital technologies enable circular transitions under specific contextual conditions, offering actionable strategic implications for researchers, project managers, technology developers, and policymakers committed to advancing computational economics in sustainable construction.</p>
	]]></content:encoded>

	<dc:title>Computational Economics of Circular Construction: Machine Learning and Digital Twins for Optimizing Demolition Waste Recovery and Business Value</dc:title>
			<dc:creator>Marta Torres-Polo</dc:creator>
			<dc:creator>Eduardo Guzmán Ortíz</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040076</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-25</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-25</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>76</prism:startingPage>
		<prism:doi>10.3390/computation14040076</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/76</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/4/75">

	<title>Computation, Vol. 14, Pages 75: Reinforcement-Learning-Based Optimization of Convective Fluxes for High-CFL Finite-Volume Schemes</title>
	<link>https://www.mdpi.com/2079-3197/14/4/75</link>
	<description>In this article, we explore the possibility of using reinforcement learning to create convective flow approximation schemes that maintain accuracy and stability at high Courant-Friedrichs-Lewy (CFL) numbers in the finite-volume discretization of advection equations. Unlike most existing data-driven discretization methods, which primarily concentrate on spatial grid refinement, this work emphasizes increasing the allowable time step without compromising solution accuracy. This approach reduces the total number of time integration steps, thereby enabling faster computation. A neural network is used as a surrogate model for reconstructing the convective flow, which takes as input local information about the flow, scalars, and geometry and predicts scalar values at node points. Reinforcement learning is used for training and is formulated as a policy optimization problem, where the long-term reward is defined as the difference between the numerical and reference solutions over the entire simulation period. Both the genetic algorithm and the Deep Deterministic Policy Gradient (DDPG) method are investigated. The effectiveness of the approach is evaluated using a one-dimensional nonlinear advection problem with a constant velocity field. Despite the simplicity of the test case, the results demonstrate that the trained convective flux approximation scheme achieves accuracy comparable to or better than the classical second-order linear upwind (LUD) scheme, while operating at CFL numbers 2&amp;amp;ndash;50 times higher than the optimal CFL for LUD, thereby reducing the simulation time by the same factor. This allows for a wider range of stability and accuracy in the finite-volume method and the use of larger time steps without compromising the quality of the solution. The study is intentionally limited to a single spatial dimension and serves as a basic analysis of the method&amp;amp;rsquo;s applicability. The results demonstrate that reinforcement learning can successfully find more convective flow approximation schemes that improve efficiency at high CFL numbers than conventional explicit second-order schemes, establishing a framework that is subsequently extended in our follow-up work to improve training methods and three-dimensional complex transport problems. The proposed method improves the spatial discretization of convective fluxes, which is independent of the choice of time integration scheme. Therefore, the neural reconstruction can in principle be used in both explicit and implicit finite-volume solvers.</description>
	<pubDate>2026-03-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 75: Reinforcement-Learning-Based Optimization of Convective Fluxes for High-CFL Finite-Volume Schemes</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/4/75">doi: 10.3390/computation14040075</a></p>
	<p>Authors:
		Andrey Rozhkov
		Andrey Kozelkov
		Vadim Kurulin
		Maxim Shishlenin
		</p>
	<p>In this article, we explore the possibility of using reinforcement learning to create convective flow approximation schemes that maintain accuracy and stability at high Courant-Friedrichs-Lewy (CFL) numbers in the finite-volume discretization of advection equations. Unlike most existing data-driven discretization methods, which primarily concentrate on spatial grid refinement, this work emphasizes increasing the allowable time step without compromising solution accuracy. This approach reduces the total number of time integration steps, thereby enabling faster computation. A neural network is used as a surrogate model for reconstructing the convective flow, which takes as input local information about the flow, scalars, and geometry and predicts scalar values at node points. Reinforcement learning is used for training and is formulated as a policy optimization problem, where the long-term reward is defined as the difference between the numerical and reference solutions over the entire simulation period. Both the genetic algorithm and the Deep Deterministic Policy Gradient (DDPG) method are investigated. The effectiveness of the approach is evaluated using a one-dimensional nonlinear advection problem with a constant velocity field. Despite the simplicity of the test case, the results demonstrate that the trained convective flux approximation scheme achieves accuracy comparable to or better than the classical second-order linear upwind (LUD) scheme, while operating at CFL numbers 2&amp;amp;ndash;50 times higher than the optimal CFL for LUD, thereby reducing the simulation time by the same factor. This allows for a wider range of stability and accuracy in the finite-volume method and the use of larger time steps without compromising the quality of the solution. The study is intentionally limited to a single spatial dimension and serves as a basic analysis of the method&amp;amp;rsquo;s applicability. The results demonstrate that reinforcement learning can successfully find more convective flow approximation schemes that improve efficiency at high CFL numbers than conventional explicit second-order schemes, establishing a framework that is subsequently extended in our follow-up work to improve training methods and three-dimensional complex transport problems. The proposed method improves the spatial discretization of convective fluxes, which is independent of the choice of time integration scheme. Therefore, the neural reconstruction can in principle be used in both explicit and implicit finite-volume solvers.</p>
	]]></content:encoded>

	<dc:title>Reinforcement-Learning-Based Optimization of Convective Fluxes for High-CFL Finite-Volume Schemes</dc:title>
			<dc:creator>Andrey Rozhkov</dc:creator>
			<dc:creator>Andrey Kozelkov</dc:creator>
			<dc:creator>Vadim Kurulin</dc:creator>
			<dc:creator>Maxim Shishlenin</dc:creator>
		<dc:identifier>doi: 10.3390/computation14040075</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-24</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-24</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>75</prism:startingPage>
		<prism:doi>10.3390/computation14040075</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/4/75</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/3/74">

	<title>Computation, Vol. 14, Pages 74: Heat Transfer Coefficient Between Spherical Particles in Low-Conducting Fluid</title>
	<link>https://www.mdpi.com/2079-3197/14/3/74</link>
	<description>Calculation of heat transfer in granular materials is an important task for many applications, from thermal management in electronics to exploring celestial soils. Usually, an effective thermal-conductivity model is employed to predict heat flux in unstructured granular media, such as a packed bed. However, a more advanced approach, the discrete element method (DEM), can capture the complex effects of mechanical loading and material mixtures on thermal transport coefficients, which traditional models struggle with. Pivotal for this approach is knowing the heat transfer coefficient between two adjacent particles. Currently, in most DEM-capable software, only particles in direct surface contact are considered to have non-zero heat conduction. We propose considering particles that are close to each other but don&amp;amp;rsquo;t have a contact area with a non-zero surface area. We perform numerical modeling of the conductive heat transfer coefficient between equal spherical particles separated by media, assuming the fluid&amp;amp;rsquo;s thermal conductivity is at least an order of magnitude lower. We use numerical solutions of differential equations to account for both thermal resistance within particles and through the gap between them. We found a simple generalized correlation for the heat transfer coefficient between particles and a general formula for the angular distribution of heat flux density across the particle surface. By employing a non-dimensional approach, the obtained formulas are constructed using non-dimensional parameters: the ratio of the particle&amp;amp;rsquo;s thermal conductivity to that of the medium, and the ratio of the gap width between particles to their radius. The resulting formula is simple and convenient for DEM heat transfer calculations in packed and fluidized beds.</description>
	<pubDate>2026-03-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 74: Heat Transfer Coefficient Between Spherical Particles in Low-Conducting Fluid</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/3/74">doi: 10.3390/computation14030074</a></p>
	<p>Authors:
		Andrei I. Malinouski
		Oscar S. Rabinovich
		Heorhi U. Barakhouski
		</p>
	<p>Calculation of heat transfer in granular materials is an important task for many applications, from thermal management in electronics to exploring celestial soils. Usually, an effective thermal-conductivity model is employed to predict heat flux in unstructured granular media, such as a packed bed. However, a more advanced approach, the discrete element method (DEM), can capture the complex effects of mechanical loading and material mixtures on thermal transport coefficients, which traditional models struggle with. Pivotal for this approach is knowing the heat transfer coefficient between two adjacent particles. Currently, in most DEM-capable software, only particles in direct surface contact are considered to have non-zero heat conduction. We propose considering particles that are close to each other but don&amp;amp;rsquo;t have a contact area with a non-zero surface area. We perform numerical modeling of the conductive heat transfer coefficient between equal spherical particles separated by media, assuming the fluid&amp;amp;rsquo;s thermal conductivity is at least an order of magnitude lower. We use numerical solutions of differential equations to account for both thermal resistance within particles and through the gap between them. We found a simple generalized correlation for the heat transfer coefficient between particles and a general formula for the angular distribution of heat flux density across the particle surface. By employing a non-dimensional approach, the obtained formulas are constructed using non-dimensional parameters: the ratio of the particle&amp;amp;rsquo;s thermal conductivity to that of the medium, and the ratio of the gap width between particles to their radius. The resulting formula is simple and convenient for DEM heat transfer calculations in packed and fluidized beds.</p>
	]]></content:encoded>

	<dc:title>Heat Transfer Coefficient Between Spherical Particles in Low-Conducting Fluid</dc:title>
			<dc:creator>Andrei I. Malinouski</dc:creator>
			<dc:creator>Oscar S. Rabinovich</dc:creator>
			<dc:creator>Heorhi U. Barakhouski</dc:creator>
		<dc:identifier>doi: 10.3390/computation14030074</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-20</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-20</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>74</prism:startingPage>
		<prism:doi>10.3390/computation14030074</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/3/74</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/3/73">

	<title>Computation, Vol. 14, Pages 73: Online Point-of-Interest Recommendations in Data Streams</title>
	<link>https://www.mdpi.com/2079-3197/14/3/73</link>
	<description>In recent years, social networks have shown a great influx of new users and traffic. As their popularity grows, so does the interest in researching ways to process the information available, in order to produce useful knowledge. One direction is making personalized recommendations based on users&amp;amp;rsquo; preferences and on their social behavior and related characteristics in general. Static recommendations, however, are proven to be highly inaccurate, since as time progresses, people tend to change their preferences, making different decisions than the ones predicted previously. This calls for an adaptive algorithm that shifts according to the changes in preferences and habits of the users. Handling the stream of information is challenging, as the new data can severely change the recommendations to many users. In this work, we propose a novel streaming Point-of-Interest recommendation algorithm that explicitly incorporates location-aware features into its dynamic update mechanism, enabling continuous adaptation to newly arriving data. The proposed approach is experimentally evaluated based on real-life data sets containing the network structure as well as check-in information. The results demonstrate high accuracy, achieving at the same time significant performance gains with respect to runtime costs compared to conventional approaches.</description>
	<pubDate>2026-03-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 73: Online Point-of-Interest Recommendations in Data Streams</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/3/73">doi: 10.3390/computation14030073</a></p>
	<p>Authors:
		Giannis Christoforidis
		Apostolos N. Papadopoulos
		</p>
	<p>In recent years, social networks have shown a great influx of new users and traffic. As their popularity grows, so does the interest in researching ways to process the information available, in order to produce useful knowledge. One direction is making personalized recommendations based on users&amp;amp;rsquo; preferences and on their social behavior and related characteristics in general. Static recommendations, however, are proven to be highly inaccurate, since as time progresses, people tend to change their preferences, making different decisions than the ones predicted previously. This calls for an adaptive algorithm that shifts according to the changes in preferences and habits of the users. Handling the stream of information is challenging, as the new data can severely change the recommendations to many users. In this work, we propose a novel streaming Point-of-Interest recommendation algorithm that explicitly incorporates location-aware features into its dynamic update mechanism, enabling continuous adaptation to newly arriving data. The proposed approach is experimentally evaluated based on real-life data sets containing the network structure as well as check-in information. The results demonstrate high accuracy, achieving at the same time significant performance gains with respect to runtime costs compared to conventional approaches.</p>
	]]></content:encoded>

	<dc:title>Online Point-of-Interest Recommendations in Data Streams</dc:title>
			<dc:creator>Giannis Christoforidis</dc:creator>
			<dc:creator>Apostolos N. Papadopoulos</dc:creator>
		<dc:identifier>doi: 10.3390/computation14030073</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-20</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-20</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>73</prism:startingPage>
		<prism:doi>10.3390/computation14030073</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/3/73</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/3/72">

	<title>Computation, Vol. 14, Pages 72: Comparative Analysis of Machine Learning Algorithms to Predict Municipal Solid Waste</title>
	<link>https://www.mdpi.com/2079-3197/14/3/72</link>
	<description>The management of municipal solid waste in intermediate cities exhibits high daily variability and source heterogeneity, which hinders operational sizing and material recovery. Reliable predictions are required from heterogeneous and often-scarce data. However, studies that compare multiple machine learning algorithms with temporal validation on short time series in intermediate cities are still limited. This study compares fourteen machine learning algorithms to predict the daily generation of organic and inorganic waste in La Joya de los Sachas, Ecuador, formulating the problem as a multi-output regression problem. An adapted CRISP-DM design was employed, using primary data from a waste characterization campaign, temporal feature engineering, variable encoding, and an expanding-window backtesting protocol against lag-7 persistence and ARIMA. Tree-based ensembles achieved the best performance. AdaBoost provided the best organic forecasts (R2=0.985, RMSE&amp;amp;nbsp;=0.081, MAE=0.061 in rate space), while Random Forest was best for inorganic (R2=0.965, RMSE&amp;amp;nbsp;=0.049, MAE=0.040). Linear models were stable but slightly inferior, and other approaches (SVR, KNN, MLP, Lasso, ElasticNet) showed lower generalization capacity. The study provides a multi-output regression protocol with temporal validation for municipal contexts with short time series, comparative evidence across fourteen algorithms, and a conversion from rates to kilograms for operational use.</description>
	<pubDate>2026-03-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 72: Comparative Analysis of Machine Learning Algorithms to Predict Municipal Solid Waste</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/3/72">doi: 10.3390/computation14030072</a></p>
	<p>Authors:
		Pedro Aguilar-Encarnacion
		Pedro Peñafiel-Arcos
		Marcos Barahona Morales
		Wilson Chango
		</p>
	<p>The management of municipal solid waste in intermediate cities exhibits high daily variability and source heterogeneity, which hinders operational sizing and material recovery. Reliable predictions are required from heterogeneous and often-scarce data. However, studies that compare multiple machine learning algorithms with temporal validation on short time series in intermediate cities are still limited. This study compares fourteen machine learning algorithms to predict the daily generation of organic and inorganic waste in La Joya de los Sachas, Ecuador, formulating the problem as a multi-output regression problem. An adapted CRISP-DM design was employed, using primary data from a waste characterization campaign, temporal feature engineering, variable encoding, and an expanding-window backtesting protocol against lag-7 persistence and ARIMA. Tree-based ensembles achieved the best performance. AdaBoost provided the best organic forecasts (R2=0.985, RMSE&amp;amp;nbsp;=0.081, MAE=0.061 in rate space), while Random Forest was best for inorganic (R2=0.965, RMSE&amp;amp;nbsp;=0.049, MAE=0.040). Linear models were stable but slightly inferior, and other approaches (SVR, KNN, MLP, Lasso, ElasticNet) showed lower generalization capacity. The study provides a multi-output regression protocol with temporal validation for municipal contexts with short time series, comparative evidence across fourteen algorithms, and a conversion from rates to kilograms for operational use.</p>
	]]></content:encoded>

	<dc:title>Comparative Analysis of Machine Learning Algorithms to Predict Municipal Solid Waste</dc:title>
			<dc:creator>Pedro Aguilar-Encarnacion</dc:creator>
			<dc:creator>Pedro Peñafiel-Arcos</dc:creator>
			<dc:creator>Marcos Barahona Morales</dc:creator>
			<dc:creator>Wilson Chango</dc:creator>
		<dc:identifier>doi: 10.3390/computation14030072</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-19</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-19</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>72</prism:startingPage>
		<prism:doi>10.3390/computation14030072</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/3/72</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/3/71">

	<title>Computation, Vol. 14, Pages 71: Sensitivity Analysis of CO2 Emitted in Clinker and Cement Production</title>
	<link>https://www.mdpi.com/2079-3197/14/3/71</link>
	<description>This study performs a sensitivity analysis of CO2 emissions from clinker and cement production using life cycle assessment (LCA). Both local and global sensitivity analyses (LSA and GSA) are conducted. LSA uses outputs from the GCCA EPD tool&amp;amp;mdash;developed by the Global Cement and Concrete Association to facilitate Environmental Product Declarations&amp;amp;mdash;and examines correlations between perturbed input variables and the resulting output changes. For GSA, we present an analytical derivation of Sobol&amp;amp;rsquo; indices. We derive quantitative relationships between alternative materials and fuels and key technical indices, while preserving clinker and cement quality throughout the sensitivity analysis. Increasing the share of the alternative fuels (AFs) categories and of recycled concrete produces a negative percentage change in CO2 emitted from the clinker (CO2/CL). The largest CO2/CL reductions arise from high-biomass fuels, followed by alternative solid fuels and refuse-derived fuels, shredded tires, and, lastly, recycled concrete. The clinker-to-cement ratio (CL/CEM) dominates the CO2 emitted in cement production (1% change &amp;amp;rarr; 0.926&amp;amp;ndash;0.956% change), while clinker-level CO2 reductions transmit to cement with only minor variation, confirmed by Sobol&amp;amp;rsquo; indices. Aside from reducing CO2/CL by increasing alternative materials and fuels, the two principal approaches to lowering CO2/CEM are: (i) minimizing clinker content in cement where permitted by applicable standards while maintaining the same performance, and (ii) designing new cement types that deliver equivalent performance with lower clinker content.</description>
	<pubDate>2026-03-18</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 71: Sensitivity Analysis of CO2 Emitted in Clinker and Cement Production</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/3/71">doi: 10.3390/computation14030071</a></p>
	<p>Authors:
		Dimitris Tsamatsoulis
		</p>
	<p>This study performs a sensitivity analysis of CO2 emissions from clinker and cement production using life cycle assessment (LCA). Both local and global sensitivity analyses (LSA and GSA) are conducted. LSA uses outputs from the GCCA EPD tool&amp;amp;mdash;developed by the Global Cement and Concrete Association to facilitate Environmental Product Declarations&amp;amp;mdash;and examines correlations between perturbed input variables and the resulting output changes. For GSA, we present an analytical derivation of Sobol&amp;amp;rsquo; indices. We derive quantitative relationships between alternative materials and fuels and key technical indices, while preserving clinker and cement quality throughout the sensitivity analysis. Increasing the share of the alternative fuels (AFs) categories and of recycled concrete produces a negative percentage change in CO2 emitted from the clinker (CO2/CL). The largest CO2/CL reductions arise from high-biomass fuels, followed by alternative solid fuels and refuse-derived fuels, shredded tires, and, lastly, recycled concrete. The clinker-to-cement ratio (CL/CEM) dominates the CO2 emitted in cement production (1% change &amp;amp;rarr; 0.926&amp;amp;ndash;0.956% change), while clinker-level CO2 reductions transmit to cement with only minor variation, confirmed by Sobol&amp;amp;rsquo; indices. Aside from reducing CO2/CL by increasing alternative materials and fuels, the two principal approaches to lowering CO2/CEM are: (i) minimizing clinker content in cement where permitted by applicable standards while maintaining the same performance, and (ii) designing new cement types that deliver equivalent performance with lower clinker content.</p>
	]]></content:encoded>

	<dc:title>Sensitivity Analysis of CO2 Emitted in Clinker and Cement Production</dc:title>
			<dc:creator>Dimitris Tsamatsoulis</dc:creator>
		<dc:identifier>doi: 10.3390/computation14030071</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-18</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-18</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>71</prism:startingPage>
		<prism:doi>10.3390/computation14030071</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/3/71</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/3/70">

	<title>Computation, Vol. 14, Pages 70: Optimization-Driven Multimodal Brain Tumor Segmentation Using &amp;alpha;-Expansion Graph Cuts</title>
	<link>https://www.mdpi.com/2079-3197/14/3/70</link>
	<description>Precise segmentation of brain tumors from multimodal MRI scans is essential for accurate neuro-oncological diagnosis and treatment planning. To address this challenge, we propose a label-free optimization-driven segmentation framework based on the &amp;amp;alpha;-expansion graph cut algorithm, offering improved computational efficiency and interpretability compared to deep learning alternatives. The method relies on structured optimization and handcrafted features, including local intensity patches, entropy-based texture descriptors, and statistical moments, to compute voxel-wise unary potentials via gradient-boosted decision trees (XGBoost). These are integrated with spatially adaptive pairwise terms within a graph model optimized through &amp;amp;alpha;-expansion. Evaluation on 146 BraTS validation volumes demonstrates reliable whole-tumor overlap, with a mean Dice score of 0.855 &amp;amp;plusmn; 0.184 and a 95% Hausdorff distance of 18.66 mm. Bootstrap analysis confirms the statistical stability of these results. The low computational overhead and modular design make the method particularly suitable for transparent and resource-constrained clinical deployment scenarios.</description>
	<pubDate>2026-03-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 70: Optimization-Driven Multimodal Brain Tumor Segmentation Using &amp;alpha;-Expansion Graph Cuts</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/3/70">doi: 10.3390/computation14030070</a></p>
	<p>Authors:
		Roaa Soloh
		Bilal Nakhal
		Abdallah El Chakik
		</p>
	<p>Precise segmentation of brain tumors from multimodal MRI scans is essential for accurate neuro-oncological diagnosis and treatment planning. To address this challenge, we propose a label-free optimization-driven segmentation framework based on the &amp;amp;alpha;-expansion graph cut algorithm, offering improved computational efficiency and interpretability compared to deep learning alternatives. The method relies on structured optimization and handcrafted features, including local intensity patches, entropy-based texture descriptors, and statistical moments, to compute voxel-wise unary potentials via gradient-boosted decision trees (XGBoost). These are integrated with spatially adaptive pairwise terms within a graph model optimized through &amp;amp;alpha;-expansion. Evaluation on 146 BraTS validation volumes demonstrates reliable whole-tumor overlap, with a mean Dice score of 0.855 &amp;amp;plusmn; 0.184 and a 95% Hausdorff distance of 18.66 mm. Bootstrap analysis confirms the statistical stability of these results. The low computational overhead and modular design make the method particularly suitable for transparent and resource-constrained clinical deployment scenarios.</p>
	]]></content:encoded>

	<dc:title>Optimization-Driven Multimodal Brain Tumor Segmentation Using &amp;amp;alpha;-Expansion Graph Cuts</dc:title>
			<dc:creator>Roaa Soloh</dc:creator>
			<dc:creator>Bilal Nakhal</dc:creator>
			<dc:creator>Abdallah El Chakik</dc:creator>
		<dc:identifier>doi: 10.3390/computation14030070</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-15</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-15</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>70</prism:startingPage>
		<prism:doi>10.3390/computation14030070</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/3/70</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/3/69">

	<title>Computation, Vol. 14, Pages 69: A Hybrid Model Reduction Method for Dual-Continuum Model with Random Inputs</title>
	<link>https://www.mdpi.com/2079-3197/14/3/69</link>
	<description>In this paper, a hybrid model reduction method for solving flows in fractured media is proposed. The approach integrates the Generalized Multiscale Finite Element Method (GMsFEM) with a novel variable-separation (VS) technique. Compared with many widely used variable-separation methods, the proposed model reduction method shares their merits but has lower computation complexity and higher efficiency. Within this framework, we can get the low-rank variable-separation expansion of dual-continuum model solutions in a systematic enrichment manner. No iteration is performed at each enrichment step. The expansion is constructed using two sets of basis functions: stochastic basis functions and deterministic physical basis functions, both derived from offline, model-oriented computations. To efficiently construct the stochastic basis functions, the original model is used to learn stochastic information. Meanwhile, the deterministic physical basis functions are trained using solutions obtained by applying an uncoupled GMsFEM to the dual-continuum system at a select number of optimal samples. Once these bases are established, the online evaluation for each new random sample becomes highly efficient, allowing for the computation of a large number of stochastic realizations at minimal cost. To demonstrate the performance of the proposed method, two numerical examples for dual-continuum models with random inputs are presented. The results confirm that the hybrid model reduction method is both efficient and achieves high approximation accuracy.</description>
	<pubDate>2026-03-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 69: A Hybrid Model Reduction Method for Dual-Continuum Model with Random Inputs</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/3/69">doi: 10.3390/computation14030069</a></p>
	<p>Authors:
		Lingling Ma
		</p>
	<p>In this paper, a hybrid model reduction method for solving flows in fractured media is proposed. The approach integrates the Generalized Multiscale Finite Element Method (GMsFEM) with a novel variable-separation (VS) technique. Compared with many widely used variable-separation methods, the proposed model reduction method shares their merits but has lower computation complexity and higher efficiency. Within this framework, we can get the low-rank variable-separation expansion of dual-continuum model solutions in a systematic enrichment manner. No iteration is performed at each enrichment step. The expansion is constructed using two sets of basis functions: stochastic basis functions and deterministic physical basis functions, both derived from offline, model-oriented computations. To efficiently construct the stochastic basis functions, the original model is used to learn stochastic information. Meanwhile, the deterministic physical basis functions are trained using solutions obtained by applying an uncoupled GMsFEM to the dual-continuum system at a select number of optimal samples. Once these bases are established, the online evaluation for each new random sample becomes highly efficient, allowing for the computation of a large number of stochastic realizations at minimal cost. To demonstrate the performance of the proposed method, two numerical examples for dual-continuum models with random inputs are presented. The results confirm that the hybrid model reduction method is both efficient and achieves high approximation accuracy.</p>
	]]></content:encoded>

	<dc:title>A Hybrid Model Reduction Method for Dual-Continuum Model with Random Inputs</dc:title>
			<dc:creator>Lingling Ma</dc:creator>
		<dc:identifier>doi: 10.3390/computation14030069</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-13</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-13</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>69</prism:startingPage>
		<prism:doi>10.3390/computation14030069</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/3/69</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/3/68">

	<title>Computation, Vol. 14, Pages 68: Determining When Gurobi Generates Optimal Solutions for the Partial Coverage Weighted Set Covering Problem</title>
	<link>https://www.mdpi.com/2079-3197/14/3/68</link>
	<description>The partial coverage weighted set covering problem (PCWSCP) allows for less than 100% of the rows to be satisfied in a weighted set covering problem (WSCP). This paper does not claim to contribute to operations research (OR) theory or methodology. Instead, it demonstrates that a large number of PCWSCPs based on WSCPs from the OR literature can be efficiently solved using the software Gurobi 12 with default parameter settings on a standard PC. This is an important practical result because it indicates what types of PCWSCPs can be solved optimally using commercial software without resorting to customized algorithms that do not guarantee optimums or even bounds on their solutions. Specifically, using 105 WSCP instances from the literature, 420 PCWSCP instances are generated with 105 instances at 80%, 85%, 90%, and 95% coverage respectively. It is shown that using Gurobi on a standard PC, optimal solutions could be obtained within 300 s (average of 17 s) for instances with up to 800 rows by 8000 columns by 2% density. This is about 86% of the 420 instances. As expected, in general, the execution time decreases as the row coverage decreases. Furthermore, it is shown that initializing (&amp;amp;ldquo;warm-starting&amp;amp;rdquo;) Gurobi with solutions from either a greedy, carousel greedy, or local branching algorithm results in no statistically significant difference in performance compared to Gurobi&amp;amp;rsquo;s cold start. Hence, there is no advantage to &amp;amp;ldquo;warm-starting&amp;amp;rdquo; Gurobi with one of these common heuristic approaches when solving PCWSCPs. Finally, this is the first time the weighted version of the partial coverage set covering problem is discussed in the literature. All previous discussions dealt only with solution approaches specifically developed for the unit-cost version of the problem.</description>
	<pubDate>2026-03-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 68: Determining When Gurobi Generates Optimal Solutions for the Partial Coverage Weighted Set Covering Problem</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/3/68">doi: 10.3390/computation14030068</a></p>
	<p>Authors:
		Myung Soon Song
		Amber Kulp
		Yun Lu
		Francis J. Vasko
		</p>
	<p>The partial coverage weighted set covering problem (PCWSCP) allows for less than 100% of the rows to be satisfied in a weighted set covering problem (WSCP). This paper does not claim to contribute to operations research (OR) theory or methodology. Instead, it demonstrates that a large number of PCWSCPs based on WSCPs from the OR literature can be efficiently solved using the software Gurobi 12 with default parameter settings on a standard PC. This is an important practical result because it indicates what types of PCWSCPs can be solved optimally using commercial software without resorting to customized algorithms that do not guarantee optimums or even bounds on their solutions. Specifically, using 105 WSCP instances from the literature, 420 PCWSCP instances are generated with 105 instances at 80%, 85%, 90%, and 95% coverage respectively. It is shown that using Gurobi on a standard PC, optimal solutions could be obtained within 300 s (average of 17 s) for instances with up to 800 rows by 8000 columns by 2% density. This is about 86% of the 420 instances. As expected, in general, the execution time decreases as the row coverage decreases. Furthermore, it is shown that initializing (&amp;amp;ldquo;warm-starting&amp;amp;rdquo;) Gurobi with solutions from either a greedy, carousel greedy, or local branching algorithm results in no statistically significant difference in performance compared to Gurobi&amp;amp;rsquo;s cold start. Hence, there is no advantage to &amp;amp;ldquo;warm-starting&amp;amp;rdquo; Gurobi with one of these common heuristic approaches when solving PCWSCPs. Finally, this is the first time the weighted version of the partial coverage set covering problem is discussed in the literature. All previous discussions dealt only with solution approaches specifically developed for the unit-cost version of the problem.</p>
	]]></content:encoded>

	<dc:title>Determining When Gurobi Generates Optimal Solutions for the Partial Coverage Weighted Set Covering Problem</dc:title>
			<dc:creator>Myung Soon Song</dc:creator>
			<dc:creator>Amber Kulp</dc:creator>
			<dc:creator>Yun Lu</dc:creator>
			<dc:creator>Francis J. Vasko</dc:creator>
		<dc:identifier>doi: 10.3390/computation14030068</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-12</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-12</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>68</prism:startingPage>
		<prism:doi>10.3390/computation14030068</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/3/68</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/3/67">

	<title>Computation, Vol. 14, Pages 67: Performance Analysis of the YOLO Object Detection Algorithm in Embedded Systems: Generated Code vs. Native Implementation</title>
	<link>https://www.mdpi.com/2079-3197/14/3/67</link>
	<description>This paper evaluates the current maturity of automatic code-generation workflows for deploying modern CNN-based object detectors on embedded GPU platforms. We compare a native pipeline against a code generation pipeline through a Model-Based Engineering (MBE) approach, using YOLOv8/YOLOv9 inference on NVIDIA Jetson Orin Nano and Jetson AGX Orin as representative edge-GPU workloads. We report detection-quality metrics (mAP, PR curves) and system-level metrics (latency distribution and initialization overhead) under a controlled single-class scenario based on a CARLA-generated sequence with frame-level annotations. Absolute accuracy and latency values are scenario-dependent and may vary under different camera optics, illumination, motion blur, sensor noise, occlusion patterns, and multi-class scene. Results quantify the performance gap between code generation and native pipelines and show that, for the evaluated workloads, the automated pipeline remains less competitive in both latency and accuracy. We discuss the implications of this gap for deployment workflows in safety-oriented domains, and we outline bottlenecks that should be addressed. The study is intended as a controlled traffic-light detection micro-benchmark and does not aim to validate full ADAS perception stacks.</description>
	<pubDate>2026-03-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 67: Performance Analysis of the YOLO Object Detection Algorithm in Embedded Systems: Generated Code vs. Native Implementation</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/3/67">doi: 10.3390/computation14030067</a></p>
	<p>Authors:
		Pablo Martínez Otero
		Alberto Tellaeche
		Mar Hernández Melero
		</p>
	<p>This paper evaluates the current maturity of automatic code-generation workflows for deploying modern CNN-based object detectors on embedded GPU platforms. We compare a native pipeline against a code generation pipeline through a Model-Based Engineering (MBE) approach, using YOLOv8/YOLOv9 inference on NVIDIA Jetson Orin Nano and Jetson AGX Orin as representative edge-GPU workloads. We report detection-quality metrics (mAP, PR curves) and system-level metrics (latency distribution and initialization overhead) under a controlled single-class scenario based on a CARLA-generated sequence with frame-level annotations. Absolute accuracy and latency values are scenario-dependent and may vary under different camera optics, illumination, motion blur, sensor noise, occlusion patterns, and multi-class scene. Results quantify the performance gap between code generation and native pipelines and show that, for the evaluated workloads, the automated pipeline remains less competitive in both latency and accuracy. We discuss the implications of this gap for deployment workflows in safety-oriented domains, and we outline bottlenecks that should be addressed. The study is intended as a controlled traffic-light detection micro-benchmark and does not aim to validate full ADAS perception stacks.</p>
	]]></content:encoded>

	<dc:title>Performance Analysis of the YOLO Object Detection Algorithm in Embedded Systems: Generated Code vs. Native Implementation</dc:title>
			<dc:creator>Pablo Martínez Otero</dc:creator>
			<dc:creator>Alberto Tellaeche</dc:creator>
			<dc:creator>Mar Hernández Melero</dc:creator>
		<dc:identifier>doi: 10.3390/computation14030067</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-12</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-12</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>67</prism:startingPage>
		<prism:doi>10.3390/computation14030067</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/3/67</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/3/66">

	<title>Computation, Vol. 14, Pages 66: Advanced Thick FGM Plate&amp;ndash;Cylindrical Shells in Supersonic Air Flow by Navier&amp;ndash;Stokes Equation Analytical&amp;ndash;Numerical Flow Model</title>
	<link>https://www.mdpi.com/2079-3197/14/3/66</link>
	<description>The thermal vibrations of a thick-walled functionally graded material (FGM) plate&amp;amp;ndash;cylindrical shells in unsteady supersonic flow with a Navier&amp;amp;ndash;Stokes equation analytical&amp;amp;ndash;numerical flow model and third-order shear deformation theory (TSDT) displacement models are investigated. The aerodynamic pressure load can be provided by using the Navier&amp;amp;ndash;Stokes equation analytical&amp;amp;ndash;numerical flow model. The data regarding the effect of the aerodynamic pressure load and TSDT model of the motion equation on the thermal stress and displacement of the FGM plate&amp;amp;ndash;cylindrical shells in unsteady supersonic flow are calculated with the generalized differential quadrature (GDQ) method. The Navier&amp;amp;ndash;Stokes equation analytical&amp;amp;ndash;numerical flow model, TSDT model, and advanced shear correction coefficient provide an additional effect on the values of displacement and stress.</description>
	<pubDate>2026-03-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 66: Advanced Thick FGM Plate&amp;ndash;Cylindrical Shells in Supersonic Air Flow by Navier&amp;ndash;Stokes Equation Analytical&amp;ndash;Numerical Flow Model</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/3/66">doi: 10.3390/computation14030066</a></p>
	<p>Authors:
		Chih-Chiang Hong
		</p>
	<p>The thermal vibrations of a thick-walled functionally graded material (FGM) plate&amp;amp;ndash;cylindrical shells in unsteady supersonic flow with a Navier&amp;amp;ndash;Stokes equation analytical&amp;amp;ndash;numerical flow model and third-order shear deformation theory (TSDT) displacement models are investigated. The aerodynamic pressure load can be provided by using the Navier&amp;amp;ndash;Stokes equation analytical&amp;amp;ndash;numerical flow model. The data regarding the effect of the aerodynamic pressure load and TSDT model of the motion equation on the thermal stress and displacement of the FGM plate&amp;amp;ndash;cylindrical shells in unsteady supersonic flow are calculated with the generalized differential quadrature (GDQ) method. The Navier&amp;amp;ndash;Stokes equation analytical&amp;amp;ndash;numerical flow model, TSDT model, and advanced shear correction coefficient provide an additional effect on the values of displacement and stress.</p>
	]]></content:encoded>

	<dc:title>Advanced Thick FGM Plate&amp;amp;ndash;Cylindrical Shells in Supersonic Air Flow by Navier&amp;amp;ndash;Stokes Equation Analytical&amp;amp;ndash;Numerical Flow Model</dc:title>
			<dc:creator>Chih-Chiang Hong</dc:creator>
		<dc:identifier>doi: 10.3390/computation14030066</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-06</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-06</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>66</prism:startingPage>
		<prism:doi>10.3390/computation14030066</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/3/66</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/3/65">

	<title>Computation, Vol. 14, Pages 65: Exploring a Family-Based Approach as a Control Strategy for Gastric Ulcers and Gastric Cancer: A Mathematical Modeling Approach</title>
	<link>https://www.mdpi.com/2079-3197/14/3/65</link>
	<description>This study formulates a deterministic model to assess the effect of a family-based control and management (FBCM) strategy against the transmission of Helicobacter pylori infection and its consequent development of gastric ulcers and gastric cancer. The model includes nine epidemiological compartments to model disease transmission and contact epidemiology between susceptible and infected individuals. In the model analysis, we compute positivity, the invariant region, equilibria, stabilities, and bifurcation analysis. We calculate the control reproduction number R0 and demonstrate that the model has a unique disease-free equilibrium (DFE) and an endemic equilibrium point (EEP) that are locally and globally stable for R0&amp;amp;lt;1 and R0&amp;amp;gt;1, respectively. We perform a thorough mathematical analysis and validate the model by fitting it to real data on gastric cancer cases recorded at Meru Teaching and Referral Hospital, Kenya. The best numerical results are achieved when we combine both preventive measures (sensitization and a family-based approach) and curative measures (prompt treatment and adherence), resulting in the greatest decrease in gastric ulcer and gastric cancer cases compared with a single intervention. This study shows that integrated household-level interventions can reduce transmission and prevent mild-to-severe disease progression through effective sensitization campaigns, high FBCM efficacy, effective gastric ulcer treatment, and adherence to drug protocols. The use of such strategies offers an effective means of reducing Helicobacter pylori-related gastric ulcers and gastric cancer outcomes, with important implications for public health control program design.</description>
	<pubDate>2026-03-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 65: Exploring a Family-Based Approach as a Control Strategy for Gastric Ulcers and Gastric Cancer: A Mathematical Modeling Approach</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/3/65">doi: 10.3390/computation14030065</a></p>
	<p>Authors:
		Glory Kawira Mutua
		Musyoka Kinyili
		Dominic Makaa Kitavi
		</p>
	<p>This study formulates a deterministic model to assess the effect of a family-based control and management (FBCM) strategy against the transmission of Helicobacter pylori infection and its consequent development of gastric ulcers and gastric cancer. The model includes nine epidemiological compartments to model disease transmission and contact epidemiology between susceptible and infected individuals. In the model analysis, we compute positivity, the invariant region, equilibria, stabilities, and bifurcation analysis. We calculate the control reproduction number R0 and demonstrate that the model has a unique disease-free equilibrium (DFE) and an endemic equilibrium point (EEP) that are locally and globally stable for R0&amp;amp;lt;1 and R0&amp;amp;gt;1, respectively. We perform a thorough mathematical analysis and validate the model by fitting it to real data on gastric cancer cases recorded at Meru Teaching and Referral Hospital, Kenya. The best numerical results are achieved when we combine both preventive measures (sensitization and a family-based approach) and curative measures (prompt treatment and adherence), resulting in the greatest decrease in gastric ulcer and gastric cancer cases compared with a single intervention. This study shows that integrated household-level interventions can reduce transmission and prevent mild-to-severe disease progression through effective sensitization campaigns, high FBCM efficacy, effective gastric ulcer treatment, and adherence to drug protocols. The use of such strategies offers an effective means of reducing Helicobacter pylori-related gastric ulcers and gastric cancer outcomes, with important implications for public health control program design.</p>
	]]></content:encoded>

	<dc:title>Exploring a Family-Based Approach as a Control Strategy for Gastric Ulcers and Gastric Cancer: A Mathematical Modeling Approach</dc:title>
			<dc:creator>Glory Kawira Mutua</dc:creator>
			<dc:creator>Musyoka Kinyili</dc:creator>
			<dc:creator>Dominic Makaa Kitavi</dc:creator>
		<dc:identifier>doi: 10.3390/computation14030065</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-05</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-05</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>65</prism:startingPage>
		<prism:doi>10.3390/computation14030065</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/3/65</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/3/64">

	<title>Computation, Vol. 14, Pages 64: Lyapunov-Based Synthesis of Self-Organizing Nonlinear Integrators for Stage Motion Control Under Parametric Uncertainty</title>
	<link>https://www.mdpi.com/2079-3197/14/3/64</link>
	<description>Linear integrators are traditionally used in motion control systems to compensate for static effects and suppress low-frequency disturbances. However, their use is inevitably accompanied by phase delays that limit the performance and robustness of control systems, especially in conditions of parametric uncertainty. In this regard, nonlinear integrators have been considered for several decades as a promising alternative that can weaken phase constraints and improve the quality of transients. In this paper, the concept of nonlinear integrators is reinterpreted in the context of self-organizing motion control of precision stages. In contrast to traditional approaches focused primarily on frequency analysis and the method of describing the function, a method is proposed for the synthesis of a self-organizing control system for nonlinear SISO objects based on catastrophe theory, namely in the class of elliptical dynamics with the property of structural stability. The control action is formed in such a way that transitions between stable modes occur due to bifurcation-conditioned self-organization, without using external switching logic. To ensure strict analytical guarantees of stability, the Lyapunov gradient-velocity vector function method is used, which guarantees aperiodic robust stability, suppression of oscillatory and chaotic modes, as well as monotonic convergence of trajectories under conditions of parameter uncertainty. The parameters of the nonlinear integrator are adapted using Self-Organizing Maps (SOM), while any parameter changes are allowed only within the regions that meet the conditions of Lyapunov stability. This approach ensures the alignment of analytical and data-oriented methods without violating the structural stability of the system. The results of numerical experiments demonstrate the superiority of the proposed method in comparison with classical linear and adaptive regulators in problems of controlling the movement of stages, especially near bifurcation boundaries and with significant parametric uncertainty. The results obtained confirm that the integration of nonlinear integrators with catastrophe theory and self-organization mechanisms forms a promising basis for the creation of robust and high-precision motion control systems of a new generation.</description>
	<pubDate>2026-03-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 64: Lyapunov-Based Synthesis of Self-Organizing Nonlinear Integrators for Stage Motion Control Under Parametric Uncertainty</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/3/64">doi: 10.3390/computation14030064</a></p>
	<p>Authors:
		Raigul Tuleuova
		Nurgul Shazhdekeyeva
		Sharbat Nurzhanova
		Aigul Myrzasheva
		Saltanat Sharmukhanbet
		Maxot Rakhmetov
		Makhatova Valentina
		Lyailya Kurmangaziyeva
		</p>
	<p>Linear integrators are traditionally used in motion control systems to compensate for static effects and suppress low-frequency disturbances. However, their use is inevitably accompanied by phase delays that limit the performance and robustness of control systems, especially in conditions of parametric uncertainty. In this regard, nonlinear integrators have been considered for several decades as a promising alternative that can weaken phase constraints and improve the quality of transients. In this paper, the concept of nonlinear integrators is reinterpreted in the context of self-organizing motion control of precision stages. In contrast to traditional approaches focused primarily on frequency analysis and the method of describing the function, a method is proposed for the synthesis of a self-organizing control system for nonlinear SISO objects based on catastrophe theory, namely in the class of elliptical dynamics with the property of structural stability. The control action is formed in such a way that transitions between stable modes occur due to bifurcation-conditioned self-organization, without using external switching logic. To ensure strict analytical guarantees of stability, the Lyapunov gradient-velocity vector function method is used, which guarantees aperiodic robust stability, suppression of oscillatory and chaotic modes, as well as monotonic convergence of trajectories under conditions of parameter uncertainty. The parameters of the nonlinear integrator are adapted using Self-Organizing Maps (SOM), while any parameter changes are allowed only within the regions that meet the conditions of Lyapunov stability. This approach ensures the alignment of analytical and data-oriented methods without violating the structural stability of the system. The results of numerical experiments demonstrate the superiority of the proposed method in comparison with classical linear and adaptive regulators in problems of controlling the movement of stages, especially near bifurcation boundaries and with significant parametric uncertainty. The results obtained confirm that the integration of nonlinear integrators with catastrophe theory and self-organization mechanisms forms a promising basis for the creation of robust and high-precision motion control systems of a new generation.</p>
	]]></content:encoded>

	<dc:title>Lyapunov-Based Synthesis of Self-Organizing Nonlinear Integrators for Stage Motion Control Under Parametric Uncertainty</dc:title>
			<dc:creator>Raigul Tuleuova</dc:creator>
			<dc:creator>Nurgul Shazhdekeyeva</dc:creator>
			<dc:creator>Sharbat Nurzhanova</dc:creator>
			<dc:creator>Aigul Myrzasheva</dc:creator>
			<dc:creator>Saltanat Sharmukhanbet</dc:creator>
			<dc:creator>Maxot Rakhmetov</dc:creator>
			<dc:creator>Makhatova Valentina</dc:creator>
			<dc:creator>Lyailya Kurmangaziyeva</dc:creator>
		<dc:identifier>doi: 10.3390/computation14030064</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-03</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-03</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>64</prism:startingPage>
		<prism:doi>10.3390/computation14030064</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/3/64</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/3/63">

	<title>Computation, Vol. 14, Pages 63: Surrogate-Based Multi-Objective Bayesian Optimization for Automated Parameter Identification in 3D Mesoscale Concrete Fatigue Modeling</title>
	<link>https://www.mdpi.com/2079-3197/14/3/63</link>
	<description>Prediction of fatigue failure in concrete structures remains a major challenge due to progressive material degradation. Reliable prediction, therefore, requires modeling the 3D heterogeneous microstructure of concrete to explain the underlying mechanisms governing fatigue failure. While such mesoscale models can reliably predict the fatigue-induced fracture mechanisms, the identification of the associated material parameters remains a significant challenge due to the high-dimensional parameter space introduced by the model. The key challenge addressed in this study is to capture microcrack initiation and coalescence under fatigue loading, using a model capable of representing fracture process: crack initiation, crack propagation, and final failure. Firstly, concrete domain is discretized into Voronoi cells, enabling explicit representation of aggregates and mortar by randomly assigning cohesive links connecting Voronoi cells as aggregates and mortar. After this, mortar links are modeled as coupled damage&amp;amp;ndash;plasticity 3D Timoshenko beam elements with nonlinear kinematic hardening and isotropic softening introduced using embedded discontinuity formulation, enabling fracture Modes I&amp;amp;ndash;III, whereas aggregate links are modeled as elastic 3D Timoshenko beam elements. The model efficiency is additionally reinforced by using surrogate model approach, with corresponding material parameter identification carried out by multi-objective Bayesian optimization framework to reproduce experimental results. The performance of the proposed model is illustrated by reproducing experimental results obtained from concrete cube compression test and three-point bending test under low-cycle fatigue loading, where the errors between experimental and numerical results are reduced by 82% (stress) and 88% (energy) for the cube test and by 86% (force) and 93% (energy) for the bending test, relative to the initial dataset error.</description>
	<pubDate>2026-03-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 63: Surrogate-Based Multi-Objective Bayesian Optimization for Automated Parameter Identification in 3D Mesoscale Concrete Fatigue Modeling</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/3/63">doi: 10.3390/computation14030063</a></p>
	<p>Authors:
		Himanshu Rana
		Adnan Ibrahimbegovic
		</p>
	<p>Prediction of fatigue failure in concrete structures remains a major challenge due to progressive material degradation. Reliable prediction, therefore, requires modeling the 3D heterogeneous microstructure of concrete to explain the underlying mechanisms governing fatigue failure. While such mesoscale models can reliably predict the fatigue-induced fracture mechanisms, the identification of the associated material parameters remains a significant challenge due to the high-dimensional parameter space introduced by the model. The key challenge addressed in this study is to capture microcrack initiation and coalescence under fatigue loading, using a model capable of representing fracture process: crack initiation, crack propagation, and final failure. Firstly, concrete domain is discretized into Voronoi cells, enabling explicit representation of aggregates and mortar by randomly assigning cohesive links connecting Voronoi cells as aggregates and mortar. After this, mortar links are modeled as coupled damage&amp;amp;ndash;plasticity 3D Timoshenko beam elements with nonlinear kinematic hardening and isotropic softening introduced using embedded discontinuity formulation, enabling fracture Modes I&amp;amp;ndash;III, whereas aggregate links are modeled as elastic 3D Timoshenko beam elements. The model efficiency is additionally reinforced by using surrogate model approach, with corresponding material parameter identification carried out by multi-objective Bayesian optimization framework to reproduce experimental results. The performance of the proposed model is illustrated by reproducing experimental results obtained from concrete cube compression test and three-point bending test under low-cycle fatigue loading, where the errors between experimental and numerical results are reduced by 82% (stress) and 88% (energy) for the cube test and by 86% (force) and 93% (energy) for the bending test, relative to the initial dataset error.</p>
	]]></content:encoded>

	<dc:title>Surrogate-Based Multi-Objective Bayesian Optimization for Automated Parameter Identification in 3D Mesoscale Concrete Fatigue Modeling</dc:title>
			<dc:creator>Himanshu Rana</dc:creator>
			<dc:creator>Adnan Ibrahimbegovic</dc:creator>
		<dc:identifier>doi: 10.3390/computation14030063</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-02</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>63</prism:startingPage>
		<prism:doi>10.3390/computation14030063</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/3/63</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/3/62">

	<title>Computation, Vol. 14, Pages 62: MSB-UNet: A Multi-Scale Bifurcation U-Net Architecture for Precise Segmentation of Breast Cancer in Histopathology Images</title>
	<link>https://www.mdpi.com/2079-3197/14/3/62</link>
	<description>Accurate segmentation of breast cancer regions in histopathological images is critical for advancing computer-aided diagnostic systems, yet challenges persist due to heterogeneous tissue structures, staining variations, and the need to capture features across multiple scales. This study introduces MSB-UNet, a novel Multi-Scale Bifurcated U-Net architecture designed to address these challenges through a dual-pathway encoder&amp;amp;ndash;decoder framework that processes images at multiple resolutions simultaneously. By integrating a bifurcated encoder with a Feature Fusion Module, MSB-UNet effectively captures fine-grained cellular details and broader tissue-level patterns. MSB-UNet is formulated as a binary segmentation framework (tumor vs. outside region of interest), producing a two-channel probability map via a channel-wise Softmax output. Evaluated on a publicly available breast cancer histopathology dataset, MSB-UNet achieves a Dice Similarity Coefficient (DSC) of 91.3% and a mean Intersection over Union (mIoU) of 84.4%, outperforming state-of-the-art segmentation models. The architecture demonstrates better results compared to other baseline methods and has the potential to enhance automated diagnostic tools for breast cancer histopathology.</description>
	<pubDate>2026-03-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 62: MSB-UNet: A Multi-Scale Bifurcation U-Net Architecture for Precise Segmentation of Breast Cancer in Histopathology Images</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/3/62">doi: 10.3390/computation14030062</a></p>
	<p>Authors:
		Arda Yunianta
		</p>
	<p>Accurate segmentation of breast cancer regions in histopathological images is critical for advancing computer-aided diagnostic systems, yet challenges persist due to heterogeneous tissue structures, staining variations, and the need to capture features across multiple scales. This study introduces MSB-UNet, a novel Multi-Scale Bifurcated U-Net architecture designed to address these challenges through a dual-pathway encoder&amp;amp;ndash;decoder framework that processes images at multiple resolutions simultaneously. By integrating a bifurcated encoder with a Feature Fusion Module, MSB-UNet effectively captures fine-grained cellular details and broader tissue-level patterns. MSB-UNet is formulated as a binary segmentation framework (tumor vs. outside region of interest), producing a two-channel probability map via a channel-wise Softmax output. Evaluated on a publicly available breast cancer histopathology dataset, MSB-UNet achieves a Dice Similarity Coefficient (DSC) of 91.3% and a mean Intersection over Union (mIoU) of 84.4%, outperforming state-of-the-art segmentation models. The architecture demonstrates better results compared to other baseline methods and has the potential to enhance automated diagnostic tools for breast cancer histopathology.</p>
	]]></content:encoded>

	<dc:title>MSB-UNet: A Multi-Scale Bifurcation U-Net Architecture for Precise Segmentation of Breast Cancer in Histopathology Images</dc:title>
			<dc:creator>Arda Yunianta</dc:creator>
		<dc:identifier>doi: 10.3390/computation14030062</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-02</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>62</prism:startingPage>
		<prism:doi>10.3390/computation14030062</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/3/62</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/3/61">

	<title>Computation, Vol. 14, Pages 61: Modeling a High-Efficiency BMS for Light Electromobility and Energy Storage in Critical Environments</title>
	<link>https://www.mdpi.com/2079-3197/14/3/61</link>
	<description>Recent advances in energy storage systems and in increasingly efficient, safe, and energy-dense cell chemistries have driven the need for commercial Battery Management System (BMS) architectures with greater control, data acquisition, and communication capabilities, primarily oriented towards customization. This demand introduces a significant change in how electrical systems are modeled and simulated when they integrate active electrochemical elements such as lithium-ion cells. This work presents the development and modeling of a BMS for critical and high-efficiency applications, based on active balancing techniques and incorporating an additional safety stage to respond to failures when charging LiFePO4 cells. The electrochemical model was built using an equivalent RLC circuit and RC pairs to represent the Thevenin response of the cell. For the simulation of active balancers, LTspice was employed, while charging and discharging processes and their effects on state of charge (SOC) and state of health (SOH) were complemented through analysis in MATLAB R2024a.The proposed approach offers an efficient tool for evaluating cell dynamics and validating battery management strategies in demanding scenarios. While the current approach prioritizes the individual modeling of electrical conversion systems, our framework presents an innovative multisystem macromodel, where not only is the electrical behavior simulated but also the control, efficiency, and safety of the system are determined, prioritizing reproducibility through SPICE tools.</description>
	<pubDate>2026-03-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 61: Modeling a High-Efficiency BMS for Light Electromobility and Energy Storage in Critical Environments</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/3/61">doi: 10.3390/computation14030061</a></p>
	<p>Authors:
		Manuel J. Pasion-Fuentes
		Mauricio P. Galvez-Legua
		Diego E. Galvez-Aranda
		</p>
	<p>Recent advances in energy storage systems and in increasingly efficient, safe, and energy-dense cell chemistries have driven the need for commercial Battery Management System (BMS) architectures with greater control, data acquisition, and communication capabilities, primarily oriented towards customization. This demand introduces a significant change in how electrical systems are modeled and simulated when they integrate active electrochemical elements such as lithium-ion cells. This work presents the development and modeling of a BMS for critical and high-efficiency applications, based on active balancing techniques and incorporating an additional safety stage to respond to failures when charging LiFePO4 cells. The electrochemical model was built using an equivalent RLC circuit and RC pairs to represent the Thevenin response of the cell. For the simulation of active balancers, LTspice was employed, while charging and discharging processes and their effects on state of charge (SOC) and state of health (SOH) were complemented through analysis in MATLAB R2024a.The proposed approach offers an efficient tool for evaluating cell dynamics and validating battery management strategies in demanding scenarios. While the current approach prioritizes the individual modeling of electrical conversion systems, our framework presents an innovative multisystem macromodel, where not only is the electrical behavior simulated but also the control, efficiency, and safety of the system are determined, prioritizing reproducibility through SPICE tools.</p>
	]]></content:encoded>

	<dc:title>Modeling a High-Efficiency BMS for Light Electromobility and Energy Storage in Critical Environments</dc:title>
			<dc:creator>Manuel J. Pasion-Fuentes</dc:creator>
			<dc:creator>Mauricio P. Galvez-Legua</dc:creator>
			<dc:creator>Diego E. Galvez-Aranda</dc:creator>
		<dc:identifier>doi: 10.3390/computation14030061</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-02</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>61</prism:startingPage>
		<prism:doi>10.3390/computation14030061</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/3/61</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/3/57">

	<title>Computation, Vol. 14, Pages 57: Hybrid Wasserstein Distance: An Approximation for Optimal Transport Distances</title>
	<link>https://www.mdpi.com/2079-3197/14/3/57</link>
	<description>Projection-based variants of optimal transport, such as the Sliced Wasserstein (SW) and its extensions, have become popular alternatives to classical Wasserstein distances due to their scalability and analytical tractability. However, most of these methods rely on independently sampled random projections, which often fail to capture semantically meaningful directions, leading to inefficiencies and limited expressiveness, especially in high-dimensional settings. In this work, we propose the Hybrid Merging Projection Wasserstein (HW) distance, a novel and efficient alternative that addresses these limitations by combining data-driven and random projections in a principled way. At the core of HW is the Linear Merging Projection (LMP), a new projection technique designed to minimize between-class variance, thereby promoting smooth alignment between distributions. HW incorporates random directions as well to achieve a balance between structural awareness and projection diversity. We evaluate HW across a range of synthetic and real-world benchmarks, including color transfer and distribution alignment tasks, to demonstrate the favorable performance of the proposed HW.</description>
	<pubDate>2026-03-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 57: Hybrid Wasserstein Distance: An Approximation for Optimal Transport Distances</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/3/57">doi: 10.3390/computation14030057</a></p>
	<p>Authors:
		Sara Nassar
		Rachid Hedjam
		Samir Brahim Belhaouari
		</p>
	<p>Projection-based variants of optimal transport, such as the Sliced Wasserstein (SW) and its extensions, have become popular alternatives to classical Wasserstein distances due to their scalability and analytical tractability. However, most of these methods rely on independently sampled random projections, which often fail to capture semantically meaningful directions, leading to inefficiencies and limited expressiveness, especially in high-dimensional settings. In this work, we propose the Hybrid Merging Projection Wasserstein (HW) distance, a novel and efficient alternative that addresses these limitations by combining data-driven and random projections in a principled way. At the core of HW is the Linear Merging Projection (LMP), a new projection technique designed to minimize between-class variance, thereby promoting smooth alignment between distributions. HW incorporates random directions as well to achieve a balance between structural awareness and projection diversity. We evaluate HW across a range of synthetic and real-world benchmarks, including color transfer and distribution alignment tasks, to demonstrate the favorable performance of the proposed HW.</p>
	]]></content:encoded>

	<dc:title>Hybrid Wasserstein Distance: An Approximation for Optimal Transport Distances</dc:title>
			<dc:creator>Sara Nassar</dc:creator>
			<dc:creator>Rachid Hedjam</dc:creator>
			<dc:creator>Samir Brahim Belhaouari</dc:creator>
		<dc:identifier>doi: 10.3390/computation14030057</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-02</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>57</prism:startingPage>
		<prism:doi>10.3390/computation14030057</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/3/57</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/3/60">

	<title>Computation, Vol. 14, Pages 60: A Novel Approach to Mitigate Blade-to-Blade Interactions in Vertical-Axis Wind Turbines Suitable for Urban Areas</title>
	<link>https://www.mdpi.com/2079-3197/14/3/60</link>
	<description>With the growth of urban zones and the increasing need for energy, the use of renewable energy solutions in the built environment becomes a must. Due to their small size and the ability to capture wind from any direction, vertical-axis wind turbines are an alternative to conventional wind energy generators. However, the use of these turbines in the built environment faces difficulties due to performance inefficiencies, particularly because of the intricate aerodynamic characteristics of the blades. This work investigates a method for increasing the efficiency of VAWTs by addressing blade-to-blade interactions using Computational Fluid Dynamics simulations. The research aims to improve turbine design for urban locations, which motivates the application context of the study. The present numerical model employs a uniform inflow to isolate blade&amp;amp;ndash;blade interaction mechanisms under controlled conditions. The paper presents a design that minimizes aerodynamic losses, decreases turbulence-induced drag, and increases overall energy capture efficiency by modeling different blade configurations and their interactions. The performance of four asymmetric configurations of blade chord and radius was numerically studied and compared to a symmetric configuration.</description>
	<pubDate>2026-03-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 60: A Novel Approach to Mitigate Blade-to-Blade Interactions in Vertical-Axis Wind Turbines Suitable for Urban Areas</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/3/60">doi: 10.3390/computation14030060</a></p>
	<p>Authors:
		Ion Mălăel
		</p>
	<p>With the growth of urban zones and the increasing need for energy, the use of renewable energy solutions in the built environment becomes a must. Due to their small size and the ability to capture wind from any direction, vertical-axis wind turbines are an alternative to conventional wind energy generators. However, the use of these turbines in the built environment faces difficulties due to performance inefficiencies, particularly because of the intricate aerodynamic characteristics of the blades. This work investigates a method for increasing the efficiency of VAWTs by addressing blade-to-blade interactions using Computational Fluid Dynamics simulations. The research aims to improve turbine design for urban locations, which motivates the application context of the study. The present numerical model employs a uniform inflow to isolate blade&amp;amp;ndash;blade interaction mechanisms under controlled conditions. The paper presents a design that minimizes aerodynamic losses, decreases turbulence-induced drag, and increases overall energy capture efficiency by modeling different blade configurations and their interactions. The performance of four asymmetric configurations of blade chord and radius was numerically studied and compared to a symmetric configuration.</p>
	]]></content:encoded>

	<dc:title>A Novel Approach to Mitigate Blade-to-Blade Interactions in Vertical-Axis Wind Turbines Suitable for Urban Areas</dc:title>
			<dc:creator>Ion Mălăel</dc:creator>
		<dc:identifier>doi: 10.3390/computation14030060</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-02</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>60</prism:startingPage>
		<prism:doi>10.3390/computation14030060</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/3/60</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/3/59">

	<title>Computation, Vol. 14, Pages 59: Incremental Recall: An Efficient Method for Estimating Egocentric Network Density</title>
	<link>https://www.mdpi.com/2079-3197/14/3/59</link>
	<description>Accurate estimation of network density is central to egocentric social network analysis, yet existing survey-based methods require researchers to balance accuracy against participant burden and systematic recall bias. Traditional approaches, such as fixed-list name generators, tend to overrepresent salient ties. Although the more recent random sampling method yields better accuracy, it relies on exhaustive free recall, which can be cognitively demanding and impractical for researchers. In this study, we introduce and evaluate an alternative approach&amp;amp;mdash;incremental recall&amp;amp;mdash;that structures alter nomination across relationship categories to improve coverage of differing tie strengths while reducing respondent burden. Using a large-scale Monte Carlo simulation encompassing over 9 million egocentric networks, we compare incremental recall against traditional fixed-list recall and random sampling across a wide range of network sizes, compositions, and recall bias assumptions. Results show that the incremental recall method consistently outperforms traditional fixed-list recall and performs comparably to or better than random sampling under unbiased and moderately biased recall conditions. Performance advantages persist even when respondents are unable to provide the full number of alters specified by design. We further validate these findings using empirical egocentric network data from 103 participants. Treating observed networks as proxy ground truths, empirical results closely mirror the simulation patterns, confirming the robustness of incremental recall under real-world reporting conditions. These findings demonstrate that incremental recall addresses a central practical challenge in egocentric social network research: balancing feasibility and accuracy in density estimation. The proposed method maintains strong performance while substantially reducing respondent burden and simplifying administration for applied studies. For researchers conducting large scale surveys where network density is one of several measures, incremental recall provides a practical and validated alternative to exhaustive recall that maintains robustness to realistic reporting biases.</description>
	<pubDate>2026-03-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 59: Incremental Recall: An Efficient Method for Estimating Egocentric Network Density</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/3/59">doi: 10.3390/computation14030059</a></p>
	<p>Authors:
		Chad A. Davis
		Caimiao Liu
		</p>
	<p>Accurate estimation of network density is central to egocentric social network analysis, yet existing survey-based methods require researchers to balance accuracy against participant burden and systematic recall bias. Traditional approaches, such as fixed-list name generators, tend to overrepresent salient ties. Although the more recent random sampling method yields better accuracy, it relies on exhaustive free recall, which can be cognitively demanding and impractical for researchers. In this study, we introduce and evaluate an alternative approach&amp;amp;mdash;incremental recall&amp;amp;mdash;that structures alter nomination across relationship categories to improve coverage of differing tie strengths while reducing respondent burden. Using a large-scale Monte Carlo simulation encompassing over 9 million egocentric networks, we compare incremental recall against traditional fixed-list recall and random sampling across a wide range of network sizes, compositions, and recall bias assumptions. Results show that the incremental recall method consistently outperforms traditional fixed-list recall and performs comparably to or better than random sampling under unbiased and moderately biased recall conditions. Performance advantages persist even when respondents are unable to provide the full number of alters specified by design. We further validate these findings using empirical egocentric network data from 103 participants. Treating observed networks as proxy ground truths, empirical results closely mirror the simulation patterns, confirming the robustness of incremental recall under real-world reporting conditions. These findings demonstrate that incremental recall addresses a central practical challenge in egocentric social network research: balancing feasibility and accuracy in density estimation. The proposed method maintains strong performance while substantially reducing respondent burden and simplifying administration for applied studies. For researchers conducting large scale surveys where network density is one of several measures, incremental recall provides a practical and validated alternative to exhaustive recall that maintains robustness to realistic reporting biases.</p>
	]]></content:encoded>

	<dc:title>Incremental Recall: An Efficient Method for Estimating Egocentric Network Density</dc:title>
			<dc:creator>Chad A. Davis</dc:creator>
			<dc:creator>Caimiao Liu</dc:creator>
		<dc:identifier>doi: 10.3390/computation14030059</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-02</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>59</prism:startingPage>
		<prism:doi>10.3390/computation14030059</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/3/59</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/3/58">

	<title>Computation, Vol. 14, Pages 58: Application of the Curvilinear Coordinate Method for the Numerical Solution of the Navier&amp;ndash;Stokes Equations in Domains with Complex Boundaries</title>
	<link>https://www.mdpi.com/2079-3197/14/3/58</link>
	<description>In this paper, the coordinate transformation method is applied to the Navier&amp;amp;ndash;Stokes equations expressed in terms of the stream function and vorticity formulation. An elliptical grid generator is used to construct an orthogonal curvilinear grid within an irregular domain of complex geometry, mapping the physical region onto a computational square domain. The developed algorithm is capable of generating both orthogonal and general curvilinear grids. The finite-difference scheme of the Navier&amp;amp;ndash;Stokes system in arbitrary orthogonal curvilinear coordinates is then solved numerically on this grid using the alternating direction method. Numerical simulations of the Roach problem are conducted at low Reynolds numbers and on grids of varying resolutions. The obtained results are compared with the reference studies of Napolitano and Orlandi, showing satisfactory agreement with the data reported by 16 other research groups. Overall, the proposed method enables efficient numerical simulation of laminar flows in domains with complex geometry. The developed approach provides high accuracy and stability and can be effectively used for the numerical analysis of applied fluid dynamics problems. Furthermore, the methodology described in this work may serve as a foundation for future studies focused on improving computational efficiency and expanding the applicability of curvilinear grid techniques in modern fluid dynamics.</description>
	<pubDate>2026-03-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 58: Application of the Curvilinear Coordinate Method for the Numerical Solution of the Navier&amp;ndash;Stokes Equations in Domains with Complex Boundaries</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/3/58">doi: 10.3390/computation14030058</a></p>
	<p>Authors:
		Nurlan Temirbekov
		Gayaz Khakimzyanov
		Ainur Kerimakyn
		</p>
	<p>In this paper, the coordinate transformation method is applied to the Navier&amp;amp;ndash;Stokes equations expressed in terms of the stream function and vorticity formulation. An elliptical grid generator is used to construct an orthogonal curvilinear grid within an irregular domain of complex geometry, mapping the physical region onto a computational square domain. The developed algorithm is capable of generating both orthogonal and general curvilinear grids. The finite-difference scheme of the Navier&amp;amp;ndash;Stokes system in arbitrary orthogonal curvilinear coordinates is then solved numerically on this grid using the alternating direction method. Numerical simulations of the Roach problem are conducted at low Reynolds numbers and on grids of varying resolutions. The obtained results are compared with the reference studies of Napolitano and Orlandi, showing satisfactory agreement with the data reported by 16 other research groups. Overall, the proposed method enables efficient numerical simulation of laminar flows in domains with complex geometry. The developed approach provides high accuracy and stability and can be effectively used for the numerical analysis of applied fluid dynamics problems. Furthermore, the methodology described in this work may serve as a foundation for future studies focused on improving computational efficiency and expanding the applicability of curvilinear grid techniques in modern fluid dynamics.</p>
	]]></content:encoded>

	<dc:title>Application of the Curvilinear Coordinate Method for the Numerical Solution of the Navier&amp;amp;ndash;Stokes Equations in Domains with Complex Boundaries</dc:title>
			<dc:creator>Nurlan Temirbekov</dc:creator>
			<dc:creator>Gayaz Khakimzyanov</dc:creator>
			<dc:creator>Ainur Kerimakyn</dc:creator>
		<dc:identifier>doi: 10.3390/computation14030058</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-02</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>58</prism:startingPage>
		<prism:doi>10.3390/computation14030058</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/3/58</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/3/56">

	<title>Computation, Vol. 14, Pages 56: Enhancing Short-Term Wind Energy Forecasting with XGBoost and Conformal Prediction for Robust Uncertainty Quantification</title>
	<link>https://www.mdpi.com/2079-3197/14/3/56</link>
	<description>This paper presents probabilistic wind energy forecasting using quantile regression averaging combined with a conformal prediction modelling framework. The study uses data from Eskom, South Africa&amp;amp;rsquo;s power utility company. The data is from April 2019 to November 2023. A partial linear additive quantile regression (PLAQR) averaging method is used to combine forecasts from two competing forecasting models: eXtreme Gradient Boosting (XGBoost) and Principal Component Regression (PCR). To compare the predictive abilities of the models, two data splits are used: 80%, 10% and 10% for the first set, and 85%, 10% and 5% for the second set, for training, validation and testing, respectively. Empirical results suggest that the combined predictions from PLAQR perform better than the individual models, significantly improving calibration and accuracy. The proposed combination has the smallest root mean square error (RMSE) and the highest probability of change in direction (POCID). The combination captures nonlinearities and produces well-calibrated probabilistic results. Probability integral transform histograms validate this. This performance gain reflected the importance of data volume. This is reinforced by the fact that the PLAQR model, which combines the benefits of tree-based approaches and linear models, is a robust modelling approach for reliable renewable energy forecasting. Future research directions should consider more varied ensembles.</description>
	<pubDate>2026-03-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 56: Enhancing Short-Term Wind Energy Forecasting with XGBoost and Conformal Prediction for Robust Uncertainty Quantification</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/3/56">doi: 10.3390/computation14030056</a></p>
	<p>Authors:
		Rabelani Innocent Nthangeni
		Caston Sigauke
		Thakhani Ravele
		Thinawanga Hangwani Tshisikhawe
		</p>
	<p>This paper presents probabilistic wind energy forecasting using quantile regression averaging combined with a conformal prediction modelling framework. The study uses data from Eskom, South Africa&amp;amp;rsquo;s power utility company. The data is from April 2019 to November 2023. A partial linear additive quantile regression (PLAQR) averaging method is used to combine forecasts from two competing forecasting models: eXtreme Gradient Boosting (XGBoost) and Principal Component Regression (PCR). To compare the predictive abilities of the models, two data splits are used: 80%, 10% and 10% for the first set, and 85%, 10% and 5% for the second set, for training, validation and testing, respectively. Empirical results suggest that the combined predictions from PLAQR perform better than the individual models, significantly improving calibration and accuracy. The proposed combination has the smallest root mean square error (RMSE) and the highest probability of change in direction (POCID). The combination captures nonlinearities and produces well-calibrated probabilistic results. Probability integral transform histograms validate this. This performance gain reflected the importance of data volume. This is reinforced by the fact that the PLAQR model, which combines the benefits of tree-based approaches and linear models, is a robust modelling approach for reliable renewable energy forecasting. Future research directions should consider more varied ensembles.</p>
	]]></content:encoded>

	<dc:title>Enhancing Short-Term Wind Energy Forecasting with XGBoost and Conformal Prediction for Robust Uncertainty Quantification</dc:title>
			<dc:creator>Rabelani Innocent Nthangeni</dc:creator>
			<dc:creator>Caston Sigauke</dc:creator>
			<dc:creator>Thakhani Ravele</dc:creator>
			<dc:creator>Thinawanga Hangwani Tshisikhawe</dc:creator>
		<dc:identifier>doi: 10.3390/computation14030056</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-03-01</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-03-01</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>56</prism:startingPage>
		<prism:doi>10.3390/computation14030056</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/3/56</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/55">

	<title>Computation, Vol. 14, Pages 55: Remaining Useful Life Prediction of Fracturing Truck Valve Bodies Based on the CB2-RUL Algorithm</title>
	<link>https://www.mdpi.com/2079-3197/14/2/55</link>
	<description>The triplex reciprocating drilling pump is a critical piece of equipment in drilling platforms, and the operational condition of its core component&amp;amp;mdash;the valve body&amp;amp;mdash;directly affects the pump&amp;amp;rsquo;s performance and the stability of the entire system. Therefore, accurate prediction of the valve body&amp;amp;rsquo;s Remaining Useful Life (RUL) is of great significance for ensuring the safe operation of drilling pumps and enabling predictive maintenance. However, achieving this goal involves two major challenges: (1) The complex degradation process of the valve body, which involves strong impact loads, nonlinear wear, and coupling effects between fluid and mechanical systems, makes it difficult to establish a stable degradation model and achieve accurate RUL prediction. (2) There is a lack of publicly available real-world datasets for research purposes. To address these challenges, we propose CEEMDAN-BWO-optimized Bidirectional LSTM for Remaining Useful Life prediction (CB2-RUL). The method first applies Complete Ensemble Empirical Mode Decomposition with Adaptive Noise (CEEMDAN) to the raw vibration signals for decomposition and denoising, thereby improving signal stationarity and enhancing feature representation. Next, the Black Widow Optimization (BWO) algorithm is employed to automatically tune key hyperparameters of a Bidirectional Long Short-Term Memory (BiLSTM) network. Finally, the optimized BiLSTM captures the temporal evolution patterns of valve-body degradation and produces high-accuracy RUL estimates. Finally, to verify the effectiveness of the proposed approach, we constructed a real-world dataset named VB-Lifecycle, which comprises ten valve bodies from different positions within the equipment and spans the complete lifecycle from pristine condition to failure. Extensive experiments conducted on the VB-Lifecycle dataset demonstrate that the proposed method provides accurate RUL prediction for valve bodies.</description>
	<pubDate>2026-02-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 55: Remaining Useful Life Prediction of Fracturing Truck Valve Bodies Based on the CB2-RUL Algorithm</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/55">doi: 10.3390/computation14020055</a></p>
	<p>Authors:
		Xinyue Chen
		Jishun Ren
		Yang Wang
		Jiquan He
		Xuyou Guo
		Gantailai Ye
		</p>
	<p>The triplex reciprocating drilling pump is a critical piece of equipment in drilling platforms, and the operational condition of its core component&amp;amp;mdash;the valve body&amp;amp;mdash;directly affects the pump&amp;amp;rsquo;s performance and the stability of the entire system. Therefore, accurate prediction of the valve body&amp;amp;rsquo;s Remaining Useful Life (RUL) is of great significance for ensuring the safe operation of drilling pumps and enabling predictive maintenance. However, achieving this goal involves two major challenges: (1) The complex degradation process of the valve body, which involves strong impact loads, nonlinear wear, and coupling effects between fluid and mechanical systems, makes it difficult to establish a stable degradation model and achieve accurate RUL prediction. (2) There is a lack of publicly available real-world datasets for research purposes. To address these challenges, we propose CEEMDAN-BWO-optimized Bidirectional LSTM for Remaining Useful Life prediction (CB2-RUL). The method first applies Complete Ensemble Empirical Mode Decomposition with Adaptive Noise (CEEMDAN) to the raw vibration signals for decomposition and denoising, thereby improving signal stationarity and enhancing feature representation. Next, the Black Widow Optimization (BWO) algorithm is employed to automatically tune key hyperparameters of a Bidirectional Long Short-Term Memory (BiLSTM) network. Finally, the optimized BiLSTM captures the temporal evolution patterns of valve-body degradation and produces high-accuracy RUL estimates. Finally, to verify the effectiveness of the proposed approach, we constructed a real-world dataset named VB-Lifecycle, which comprises ten valve bodies from different positions within the equipment and spans the complete lifecycle from pristine condition to failure. Extensive experiments conducted on the VB-Lifecycle dataset demonstrate that the proposed method provides accurate RUL prediction for valve bodies.</p>
	]]></content:encoded>

	<dc:title>Remaining Useful Life Prediction of Fracturing Truck Valve Bodies Based on the CB2-RUL Algorithm</dc:title>
			<dc:creator>Xinyue Chen</dc:creator>
			<dc:creator>Jishun Ren</dc:creator>
			<dc:creator>Yang Wang</dc:creator>
			<dc:creator>Jiquan He</dc:creator>
			<dc:creator>Xuyou Guo</dc:creator>
			<dc:creator>Gantailai Ye</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020055</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-23</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-23</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>55</prism:startingPage>
		<prism:doi>10.3390/computation14020055</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/55</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/54">

	<title>Computation, Vol. 14, Pages 54: Improving the Accuracy of Infectious Disease Forecasts Based on Comparing Neural Network Architectures</title>
	<link>https://www.mdpi.com/2079-3197/14/2/54</link>
	<description>This paper aims to improve the accuracy of infectious disease forecasting using machine learning methods. The main results of this work are an analysis of infectious diseases spread in Ukraine during the time span from December 2016 to January 2024 and a performance comparison of different neural network architectures in the scope of time series forecasting. The following steps were taken: analysis of current forecasting methods, selection of neural network architectures, dataset preprocessing, and model testing. The developed system can be an effective tool for rational management decisions to ensure the epidemiological well-being and biosecurity of the population.</description>
	<pubDate>2026-02-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 54: Improving the Accuracy of Infectious Disease Forecasts Based on Comparing Neural Network Architectures</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/54">doi: 10.3390/computation14020054</a></p>
	<p>Authors:
		Oleksandr Kovaliv
		Yuriy Kondratenko
		Ievgen Sidenko
		Galyna Kondratenko
		Dmytro Chumachenko
		</p>
	<p>This paper aims to improve the accuracy of infectious disease forecasting using machine learning methods. The main results of this work are an analysis of infectious diseases spread in Ukraine during the time span from December 2016 to January 2024 and a performance comparison of different neural network architectures in the scope of time series forecasting. The following steps were taken: analysis of current forecasting methods, selection of neural network architectures, dataset preprocessing, and model testing. The developed system can be an effective tool for rational management decisions to ensure the epidemiological well-being and biosecurity of the population.</p>
	]]></content:encoded>

	<dc:title>Improving the Accuracy of Infectious Disease Forecasts Based on Comparing Neural Network Architectures</dc:title>
			<dc:creator>Oleksandr Kovaliv</dc:creator>
			<dc:creator>Yuriy Kondratenko</dc:creator>
			<dc:creator>Ievgen Sidenko</dc:creator>
			<dc:creator>Galyna Kondratenko</dc:creator>
			<dc:creator>Dmytro Chumachenko</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020054</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-21</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-21</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>54</prism:startingPage>
		<prism:doi>10.3390/computation14020054</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/54</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/53">

	<title>Computation, Vol. 14, Pages 53: Analysis of Internal Mechanical Friction Losses Influence on the Francis-99 Runner Using the Friction Torque Approach</title>
	<link>https://www.mdpi.com/2079-3197/14/2/53</link>
	<description>Francis turbines are renowned for their high efficiency and adaptability across a wide range of head and discharge conditions. However, internal mechanical friction losses (IMFLs), resulting from rotational frictional resistance between the rotating runner and the surrounding fluid, remain a significant obstacle to further performance optimisation. This study introduced a CFD-derived integral friction torque framework, validated through theoretical analysis, that enables the spatially resolved quantification of IMFLs in Francis turbine runners. Building on this framework, a comprehensive computational approach was established to quantify IMFLs in a Francis turbine runner using a CFD-derived integral torque method combined with a theoretical verification model. Three runner configurations were analysed: the original runner model (ORM), a modified runner (RM1) with selective exit height reduction, and a modified runner (RM2) with uniform exit height reduction. Transient simulations were conducted at the best efficiency point (BEP) using the shear stress transport (SST) k&amp;amp;ndash;&amp;amp;omega; turbulence model and a sliding mesh approach. The numerical results were verified using the theoretical model and systematically evaluated to assess IMFL mechanisms and runner performance. The findings demonstrate that variations in runner geometry significantly influence internal frictional resistance and turbine efficiency. Compared with ORM, both RM1 and RM2 reduced the rotational friction torque, with RM2 exhibiting the greatest improvement: a 2.83% reduction in total friction resistance torque, a 14.74% reduction in total power losses, and a 1% absolute increase in efficiency. These improvements are primarily attributed to reduced wall shear stress and a more uniform pressure distribution across the runner surface.</description>
	<pubDate>2026-02-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 53: Analysis of Internal Mechanical Friction Losses Influence on the Francis-99 Runner Using the Friction Torque Approach</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/53">doi: 10.3390/computation14020053</a></p>
	<p>Authors:
		Otibh M. M. Abubkry
		Yun Zeng
		Juan Duan
		Altyib Abdallah Mahmoud Ahmed
		Hassan Babeker
		Altyeb Ali Abaker Omer
		</p>
	<p>Francis turbines are renowned for their high efficiency and adaptability across a wide range of head and discharge conditions. However, internal mechanical friction losses (IMFLs), resulting from rotational frictional resistance between the rotating runner and the surrounding fluid, remain a significant obstacle to further performance optimisation. This study introduced a CFD-derived integral friction torque framework, validated through theoretical analysis, that enables the spatially resolved quantification of IMFLs in Francis turbine runners. Building on this framework, a comprehensive computational approach was established to quantify IMFLs in a Francis turbine runner using a CFD-derived integral torque method combined with a theoretical verification model. Three runner configurations were analysed: the original runner model (ORM), a modified runner (RM1) with selective exit height reduction, and a modified runner (RM2) with uniform exit height reduction. Transient simulations were conducted at the best efficiency point (BEP) using the shear stress transport (SST) k&amp;amp;ndash;&amp;amp;omega; turbulence model and a sliding mesh approach. The numerical results were verified using the theoretical model and systematically evaluated to assess IMFL mechanisms and runner performance. The findings demonstrate that variations in runner geometry significantly influence internal frictional resistance and turbine efficiency. Compared with ORM, both RM1 and RM2 reduced the rotational friction torque, with RM2 exhibiting the greatest improvement: a 2.83% reduction in total friction resistance torque, a 14.74% reduction in total power losses, and a 1% absolute increase in efficiency. These improvements are primarily attributed to reduced wall shear stress and a more uniform pressure distribution across the runner surface.</p>
	]]></content:encoded>

	<dc:title>Analysis of Internal Mechanical Friction Losses Influence on the Francis-99 Runner Using the Friction Torque Approach</dc:title>
			<dc:creator>Otibh M. M. Abubkry</dc:creator>
			<dc:creator>Yun Zeng</dc:creator>
			<dc:creator>Juan Duan</dc:creator>
			<dc:creator>Altyib Abdallah Mahmoud Ahmed</dc:creator>
			<dc:creator>Hassan Babeker</dc:creator>
			<dc:creator>Altyeb Ali Abaker Omer</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020053</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-19</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-19</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>53</prism:startingPage>
		<prism:doi>10.3390/computation14020053</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/53</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/52">

	<title>Computation, Vol. 14, Pages 52: TOTEMS: Histogram of Evolutionarily Conserved Amino Acids</title>
	<link>https://www.mdpi.com/2079-3197/14/2/52</link>
	<description>We have developed a tool that allows us to easily visualize evolutionary variation via complementary multiple sequence alignments and frequency-based stacked Sequence Logos. This tool, TOTEMS (hisTogram of evOluTionarily consErved aMino acidS), visualizes conserved regions in a multiple sequence alignment within regions of a three-dimensional structure that share similar degrees of evolutionary conservation as revealed in ConSurf output data. Unlike Sequence Logos that illustrate the relative frequency of individual amino acid residues (as in MSAViewer), or moving window averages that focus on properties such as hydrophobicity or electrical charge (as in CATH), TOTEMS can help users discriminate degrees of evolutionary conservation in adjacent positions within a three-dimensional structure. Thus, we offer a tool that serves to complement pre-existing visualization applications such as ConSurf, MSAViewer, and CATH. TOTEMS and its source code are freely available.</description>
	<pubDate>2026-02-18</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 52: TOTEMS: Histogram of Evolutionarily Conserved Amino Acids</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/52">doi: 10.3390/computation14020052</a></p>
	<p>Authors:
		Michael J. Fajardo
		Adam G. Marsh
		John R. Jungck
		</p>
	<p>We have developed a tool that allows us to easily visualize evolutionary variation via complementary multiple sequence alignments and frequency-based stacked Sequence Logos. This tool, TOTEMS (hisTogram of evOluTionarily consErved aMino acidS), visualizes conserved regions in a multiple sequence alignment within regions of a three-dimensional structure that share similar degrees of evolutionary conservation as revealed in ConSurf output data. Unlike Sequence Logos that illustrate the relative frequency of individual amino acid residues (as in MSAViewer), or moving window averages that focus on properties such as hydrophobicity or electrical charge (as in CATH), TOTEMS can help users discriminate degrees of evolutionary conservation in adjacent positions within a three-dimensional structure. Thus, we offer a tool that serves to complement pre-existing visualization applications such as ConSurf, MSAViewer, and CATH. TOTEMS and its source code are freely available.</p>
	]]></content:encoded>

	<dc:title>TOTEMS: Histogram of Evolutionarily Conserved Amino Acids</dc:title>
			<dc:creator>Michael J. Fajardo</dc:creator>
			<dc:creator>Adam G. Marsh</dc:creator>
			<dc:creator>John R. Jungck</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020052</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-18</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-18</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Communication</prism:section>
	<prism:startingPage>52</prism:startingPage>
		<prism:doi>10.3390/computation14020052</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/52</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/51">

	<title>Computation, Vol. 14, Pages 51: Robust Backstepping-Sliding Control of a Quadrotor UAV with Disturbance Compensation</title>
	<link>https://www.mdpi.com/2079-3197/14/2/51</link>
	<description>Quadrotor unmanned aerial vehicles (QUAVs) are widely used in civil and defense applications, yet reliable trajectory tracking remains challenging under external disturbances and limited sensing. Conventional backstepping&amp;amp;ndash;sliding mode controllers ensure robustness only by selecting discontinuous gains larger than the disturbance bound, which increases chattering and limits the use of smooth switching functions. This paper addresses these limitations by integrating explicit disturbance compensation into a backstepping&amp;amp;ndash;sliding framework through a super-twisting observer (STO). The STO reconstructs matched disturbances acting on the translational and rotational dynamics in real time, and the estimated signals are directly injected into the control law. This approach enables effective disturbance rejection beyond the nominal sliding gain while preserving robustness under smooth control actions. Simulation results under single- and multi-frequency perturbations demonstrate accurate disturbance reconstruction (FIT indices above 95%), improved tracking performance, and a significant reduction in chattering. The proposed strategy provides a robust control solution for QUAVs operating in uncertain environments.</description>
	<pubDate>2026-02-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 51: Robust Backstepping-Sliding Control of a Quadrotor UAV with Disturbance Compensation</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/51">doi: 10.3390/computation14020051</a></p>
	<p>Authors:
		Vicente Borja-Jaimes
		Jorge Salvador Valdez-Martínez
		Miguel Beltrán-Escobar
		Guillermo Ramírez-Zúñiga
		Adriana Reyes-Mayer
		Manuela Calixto-Rodríguez
		</p>
	<p>Quadrotor unmanned aerial vehicles (QUAVs) are widely used in civil and defense applications, yet reliable trajectory tracking remains challenging under external disturbances and limited sensing. Conventional backstepping&amp;amp;ndash;sliding mode controllers ensure robustness only by selecting discontinuous gains larger than the disturbance bound, which increases chattering and limits the use of smooth switching functions. This paper addresses these limitations by integrating explicit disturbance compensation into a backstepping&amp;amp;ndash;sliding framework through a super-twisting observer (STO). The STO reconstructs matched disturbances acting on the translational and rotational dynamics in real time, and the estimated signals are directly injected into the control law. This approach enables effective disturbance rejection beyond the nominal sliding gain while preserving robustness under smooth control actions. Simulation results under single- and multi-frequency perturbations demonstrate accurate disturbance reconstruction (FIT indices above 95%), improved tracking performance, and a significant reduction in chattering. The proposed strategy provides a robust control solution for QUAVs operating in uncertain environments.</p>
	]]></content:encoded>

	<dc:title>Robust Backstepping-Sliding Control of a Quadrotor UAV with Disturbance Compensation</dc:title>
			<dc:creator>Vicente Borja-Jaimes</dc:creator>
			<dc:creator>Jorge Salvador Valdez-Martínez</dc:creator>
			<dc:creator>Miguel Beltrán-Escobar</dc:creator>
			<dc:creator>Guillermo Ramírez-Zúñiga</dc:creator>
			<dc:creator>Adriana Reyes-Mayer</dc:creator>
			<dc:creator>Manuela Calixto-Rodríguez</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020051</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-14</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-14</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>51</prism:startingPage>
		<prism:doi>10.3390/computation14020051</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/51</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/50">

	<title>Computation, Vol. 14, Pages 50: Extending Q-Learning for Economic Modelling: A Design Framework with Equilibrium Benchmarks</title>
	<link>https://www.mdpi.com/2079-3197/14/2/50</link>
	<description>This paper proposes a methodological architecture to integrate Q-learning into economic modelling systematically. It addresses a common gap: the lack of a shared framework linking economic foundations to Reinforcement Learning components. Rather than introducing a new algorithm, it specifies and reports how preferences, frictions, information structures, and time horizons map to the reward function, discount factor, and learning environment design. Equilibrium outcomes serve as benchmarks for comparing learned policies, not as imposed axioms. This approach interprets learning dynamics through standard economic categories and enables comparability across studies. The architecture organizes models along explicit dimensions: behavioural preferences, institutional frictions, economic environment class, information structure, learning and exploration mechanisms, and evaluation metrics. A simulation illustrates how variations in frictions, risk attitudes, and intertemporal preferences affect learned policies, their stability, and their relationship to static benchmarks. The paper aims to promote the cumulative use of Reinforcement Learning in applied economics by providing a general specification that improves interpretability, comparability, and reproducibility, turning deviations from theoretical equilibria into measurable diagnostics that refine economic fundamentals.</description>
	<pubDate>2026-02-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 50: Extending Q-Learning for Economic Modelling: A Design Framework with Equilibrium Benchmarks</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/50">doi: 10.3390/computation14020050</a></p>
	<p>Authors:
		Jorge Moya Velasco
		Jorge Soria Ruiz-Ogarrio
		Pedro Caja Meri
		Silvia Álvarez-Santás
		</p>
	<p>This paper proposes a methodological architecture to integrate Q-learning into economic modelling systematically. It addresses a common gap: the lack of a shared framework linking economic foundations to Reinforcement Learning components. Rather than introducing a new algorithm, it specifies and reports how preferences, frictions, information structures, and time horizons map to the reward function, discount factor, and learning environment design. Equilibrium outcomes serve as benchmarks for comparing learned policies, not as imposed axioms. This approach interprets learning dynamics through standard economic categories and enables comparability across studies. The architecture organizes models along explicit dimensions: behavioural preferences, institutional frictions, economic environment class, information structure, learning and exploration mechanisms, and evaluation metrics. A simulation illustrates how variations in frictions, risk attitudes, and intertemporal preferences affect learned policies, their stability, and their relationship to static benchmarks. The paper aims to promote the cumulative use of Reinforcement Learning in applied economics by providing a general specification that improves interpretability, comparability, and reproducibility, turning deviations from theoretical equilibria into measurable diagnostics that refine economic fundamentals.</p>
	]]></content:encoded>

	<dc:title>Extending Q-Learning for Economic Modelling: A Design Framework with Equilibrium Benchmarks</dc:title>
			<dc:creator>Jorge Moya Velasco</dc:creator>
			<dc:creator>Jorge Soria Ruiz-Ogarrio</dc:creator>
			<dc:creator>Pedro Caja Meri</dc:creator>
			<dc:creator>Silvia Álvarez-Santás</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020050</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-14</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-14</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>50</prism:startingPage>
		<prism:doi>10.3390/computation14020050</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/50</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/49">

	<title>Computation, Vol. 14, Pages 49: ECG Heartbeat Classification Using Echo State Networks with Noisy Reservoirs and Variable Activation Function</title>
	<link>https://www.mdpi.com/2079-3197/14/2/49</link>
	<description>In this work, we use an Echo State Network (ESN) model, which is essentially a recurrent neural network (RNN) operating according to the reservoir computing (RC) paradigm, to classify individual ECG heartbeats using the MIT-BIH arrhythmia database. The aim is to evaluate the performance of ESN in a challenging task that involves classification of complex, unprocessed one-dimensional signals, distributed into five classes. Moreover, we investigate the performance of the ESN in the presence of (i) noise in the dynamics of the internal variables of the hidden (reservoir) layer and (ii) random variability in the activation functions of the hidden layer cells (neurons). The overall accuracy of the best-performing ESN, without noise and variability, exceeded 96% with per-class accuracies ranging from 90.2% to 99.1%, which is higher than previous studies using CNNs and more complex machine learning approaches. The top-performing ESN required only 40 min of training on a CPU (Intel i5-1235U@1.3 GHz) HP laptop. Notably, an alternative ESN configuration that matched the accuracy of a prior CNN-based study (93.4%) required only 6 min of training, whereas a CNN would typically require an estimated training time of 2&amp;amp;ndash;3 days. Surprisingly, ESN performance proved to be very robust when Gaussian noise was added to the dynamics of the reservoir hidden variables, even for high noise amplitudes. Moreover, the success rates remained essentially the same when random variability was imposed in the activation functions of the hidden layer cells. The stability of ESN performance under noisy conditions and random variability in the hidden layer (reservoir) cells demonstrates the potential of analog hardware implementations of ESNs to be robust in time-series classification tasks.</description>
	<pubDate>2026-02-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 49: ECG Heartbeat Classification Using Echo State Networks with Noisy Reservoirs and Variable Activation Function</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/49">doi: 10.3390/computation14020049</a></p>
	<p>Authors:
		Ioannis P. Antoniades
		Anastasios N. Tsiftsis
		Christos K. Volos
		Andreas D. Tsigopoulos
		Konstantia G. Kyritsi
		Hector E. Nistazakis
		</p>
	<p>In this work, we use an Echo State Network (ESN) model, which is essentially a recurrent neural network (RNN) operating according to the reservoir computing (RC) paradigm, to classify individual ECG heartbeats using the MIT-BIH arrhythmia database. The aim is to evaluate the performance of ESN in a challenging task that involves classification of complex, unprocessed one-dimensional signals, distributed into five classes. Moreover, we investigate the performance of the ESN in the presence of (i) noise in the dynamics of the internal variables of the hidden (reservoir) layer and (ii) random variability in the activation functions of the hidden layer cells (neurons). The overall accuracy of the best-performing ESN, without noise and variability, exceeded 96% with per-class accuracies ranging from 90.2% to 99.1%, which is higher than previous studies using CNNs and more complex machine learning approaches. The top-performing ESN required only 40 min of training on a CPU (Intel i5-1235U@1.3 GHz) HP laptop. Notably, an alternative ESN configuration that matched the accuracy of a prior CNN-based study (93.4%) required only 6 min of training, whereas a CNN would typically require an estimated training time of 2&amp;amp;ndash;3 days. Surprisingly, ESN performance proved to be very robust when Gaussian noise was added to the dynamics of the reservoir hidden variables, even for high noise amplitudes. Moreover, the success rates remained essentially the same when random variability was imposed in the activation functions of the hidden layer cells. The stability of ESN performance under noisy conditions and random variability in the hidden layer (reservoir) cells demonstrates the potential of analog hardware implementations of ESNs to be robust in time-series classification tasks.</p>
	]]></content:encoded>

	<dc:title>ECG Heartbeat Classification Using Echo State Networks with Noisy Reservoirs and Variable Activation Function</dc:title>
			<dc:creator>Ioannis P. Antoniades</dc:creator>
			<dc:creator>Anastasios N. Tsiftsis</dc:creator>
			<dc:creator>Christos K. Volos</dc:creator>
			<dc:creator>Andreas D. Tsigopoulos</dc:creator>
			<dc:creator>Konstantia G. Kyritsi</dc:creator>
			<dc:creator>Hector E. Nistazakis</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020049</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-13</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-13</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>49</prism:startingPage>
		<prism:doi>10.3390/computation14020049</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/49</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/48">

	<title>Computation, Vol. 14, Pages 48: Multi-Level Parallel CPU Execution Method for Accelerated Portion-Based Variant Call Format Data Processing</title>
	<link>https://www.mdpi.com/2079-3197/14/2/48</link>
	<description>This paper proposes and experimentally evaluates a multi-level CPU-oriented execution method for high-throughput portion-based processing of file-backed Variant Call Format (VCF) data and automated mutation classification. The approach is based on a formally defined local processing scheme and integrates three coordinated levels of parallelism: block-based partitioning of file-backed VCF portions read sequentially into localized fragments with data-level parallel processing; task-level decomposition of feature construction into independent transformations; and execution-level specialization via JIT compilation of numerical kernels. To prevent performance degradation caused by nested parallelism, a resource-control mechanism is introduced as an execution rule that bounds effective parallelism and mitigates oversubscription, improving throughput stability on a single multi-core CPU node. Experiments on a public chromosome-17 VCF dataset for BRCA1-region pathogenicity classification demonstrate that the proposed multi-level local CPU execution (parsing/filtering, feature construction, and JIT-specialized numeric kernels) reduces runtime from 291.25 s (sequential) to 73.82 s, yielding a 3.95&amp;amp;times; speedup. When combined with resource-coordinated parallel model training, the end-to-end runtime further decreases to 51.18 s, corresponding to a 5.69&amp;amp;times; speedup, while preserving classification quality (accuracy 0.8483, precision 0.8758, recall 0.8261, F1 0.8502). A stage-wise ablation analysis quantifies the contribution of each execution level and confirms consistent scaling under resource-bounded execution.</description>
	<pubDate>2026-02-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 48: Multi-Level Parallel CPU Execution Method for Accelerated Portion-Based Variant Call Format Data Processing</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/48">doi: 10.3390/computation14020048</a></p>
	<p>Authors:
		Lesia Mochurad
		Ivan Tsmots
		Vita Mostova
		Karina Kystsiv
		</p>
	<p>This paper proposes and experimentally evaluates a multi-level CPU-oriented execution method for high-throughput portion-based processing of file-backed Variant Call Format (VCF) data and automated mutation classification. The approach is based on a formally defined local processing scheme and integrates three coordinated levels of parallelism: block-based partitioning of file-backed VCF portions read sequentially into localized fragments with data-level parallel processing; task-level decomposition of feature construction into independent transformations; and execution-level specialization via JIT compilation of numerical kernels. To prevent performance degradation caused by nested parallelism, a resource-control mechanism is introduced as an execution rule that bounds effective parallelism and mitigates oversubscription, improving throughput stability on a single multi-core CPU node. Experiments on a public chromosome-17 VCF dataset for BRCA1-region pathogenicity classification demonstrate that the proposed multi-level local CPU execution (parsing/filtering, feature construction, and JIT-specialized numeric kernels) reduces runtime from 291.25 s (sequential) to 73.82 s, yielding a 3.95&amp;amp;times; speedup. When combined with resource-coordinated parallel model training, the end-to-end runtime further decreases to 51.18 s, corresponding to a 5.69&amp;amp;times; speedup, while preserving classification quality (accuracy 0.8483, precision 0.8758, recall 0.8261, F1 0.8502). A stage-wise ablation analysis quantifies the contribution of each execution level and confirms consistent scaling under resource-bounded execution.</p>
	]]></content:encoded>

	<dc:title>Multi-Level Parallel CPU Execution Method for Accelerated Portion-Based Variant Call Format Data Processing</dc:title>
			<dc:creator>Lesia Mochurad</dc:creator>
			<dc:creator>Ivan Tsmots</dc:creator>
			<dc:creator>Vita Mostova</dc:creator>
			<dc:creator>Karina Kystsiv</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020048</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-08</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-08</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>48</prism:startingPage>
		<prism:doi>10.3390/computation14020048</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/48</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/47">

	<title>Computation, Vol. 14, Pages 47: SOH- and Temperature-Aware Adaptive SOC Boundaries for Second-Life Li-Ion Batteries in Off-Grid PV&amp;ndash;BESSs</title>
	<link>https://www.mdpi.com/2079-3197/14/2/47</link>
	<description>In this study, an adaptive state-of-charge (SOC) boundary strategy (ASBS) is proposed that dynamically adjusts the admissible upper and lower SOC limits of second-life lithium-ion batteries in off-grid photovoltaic battery energy storage systems (PV-BESSs) based on real-time state of health (SOH) and temperature feedback. The strategy is formulated using a unified electrical&amp;amp;ndash;thermal&amp;amp;ndash;aging model with an online state estimator and ensures both electrical safety and power feasibility while remaining fully compatible with standard energy management functions. Two representative simulations&amp;amp;mdash;a single-day operating profile and a continuous thirty-day sequence&amp;amp;mdash;demonstrate the effectiveness of the ASBS. In the twenty-four-hour case, the duration spent in high state-of-charge conditions is reduced by approximately 0.30&amp;amp;ndash;0.50 h, the abrupt end-of-charging transition is eliminated, and the temperature rise is slightly moderated, all without any loss of energy supply. Over thirty days, the difference between the ASBS and a fixed state-of-charge window remains effectively zero for almost all hours, with only a brief midday deviation of &amp;amp;minus;4 to &amp;amp;minus;5 percentage points and no cumulative drift. Indicators of electrical and thermal stress improve substantially, including an approximate 70% reduction in the root mean square charging current. These results confirm that the ASBS provides a practical and non-intrusive means of mitigating stress on second-life lithium-ion batteries while preserving full energy autonomy in off-grid photovoltaic systems.</description>
	<pubDate>2026-02-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 47: SOH- and Temperature-Aware Adaptive SOC Boundaries for Second-Life Li-Ion Batteries in Off-Grid PV&amp;ndash;BESSs</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/47">doi: 10.3390/computation14020047</a></p>
	<p>Authors:
		Hongyan Wang
		Atthapol Ngaopitakkul
		Suntiti Yoomak
		</p>
	<p>In this study, an adaptive state-of-charge (SOC) boundary strategy (ASBS) is proposed that dynamically adjusts the admissible upper and lower SOC limits of second-life lithium-ion batteries in off-grid photovoltaic battery energy storage systems (PV-BESSs) based on real-time state of health (SOH) and temperature feedback. The strategy is formulated using a unified electrical&amp;amp;ndash;thermal&amp;amp;ndash;aging model with an online state estimator and ensures both electrical safety and power feasibility while remaining fully compatible with standard energy management functions. Two representative simulations&amp;amp;mdash;a single-day operating profile and a continuous thirty-day sequence&amp;amp;mdash;demonstrate the effectiveness of the ASBS. In the twenty-four-hour case, the duration spent in high state-of-charge conditions is reduced by approximately 0.30&amp;amp;ndash;0.50 h, the abrupt end-of-charging transition is eliminated, and the temperature rise is slightly moderated, all without any loss of energy supply. Over thirty days, the difference between the ASBS and a fixed state-of-charge window remains effectively zero for almost all hours, with only a brief midday deviation of &amp;amp;minus;4 to &amp;amp;minus;5 percentage points and no cumulative drift. Indicators of electrical and thermal stress improve substantially, including an approximate 70% reduction in the root mean square charging current. These results confirm that the ASBS provides a practical and non-intrusive means of mitigating stress on second-life lithium-ion batteries while preserving full energy autonomy in off-grid photovoltaic systems.</p>
	]]></content:encoded>

	<dc:title>SOH- and Temperature-Aware Adaptive SOC Boundaries for Second-Life Li-Ion Batteries in Off-Grid PV&amp;amp;ndash;BESSs</dc:title>
			<dc:creator>Hongyan Wang</dc:creator>
			<dc:creator>Atthapol Ngaopitakkul</dc:creator>
			<dc:creator>Suntiti Yoomak</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020047</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-07</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-07</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>47</prism:startingPage>
		<prism:doi>10.3390/computation14020047</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/47</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/46">

	<title>Computation, Vol. 14, Pages 46: Phishing Email Detection Using BERT and RoBERTa</title>
	<link>https://www.mdpi.com/2079-3197/14/2/46</link>
	<description>One of the most harmful and deceptive forms of cybercrime is phishing, which targets users with malicious emails and websites. In this paper, we focus on the use of natural language processing (NLP) techniques and transformer models for phishing email detection. The Nazario Phishing Corpus is preprocessed and blended with real emails from the Enron dataset to create a robustly balanced dataset. Urgency, deceptive phrasing, and structural anomalies were some of the neglected features and sociolinguistic traits of the text, which underwent tokenization, lemmatization, and noise filtration. We fine-tuned two transformer models, Bidirectional Encoder Representations from Transformers (BERT) and the Robustly Optimized BERT Pretraining Approach (RoBERTa), for binary classification. The models were evaluated on the standard metrics of accuracy, precision, recall, and F1-score. Given the context of phishing, emphasis was placed on recall to reduce the number of phishing attacks that went unnoticed. The results show that RoBERTa has more general performance and fewer false negatives than BERT and is therefore a better candidate for deployment on security-critical tasks.</description>
	<pubDate>2026-02-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 46: Phishing Email Detection Using BERT and RoBERTa</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/46">doi: 10.3390/computation14020046</a></p>
	<p>Authors:
		Mariam Ibrahim
		Ruba Elhafiz
		</p>
	<p>One of the most harmful and deceptive forms of cybercrime is phishing, which targets users with malicious emails and websites. In this paper, we focus on the use of natural language processing (NLP) techniques and transformer models for phishing email detection. The Nazario Phishing Corpus is preprocessed and blended with real emails from the Enron dataset to create a robustly balanced dataset. Urgency, deceptive phrasing, and structural anomalies were some of the neglected features and sociolinguistic traits of the text, which underwent tokenization, lemmatization, and noise filtration. We fine-tuned two transformer models, Bidirectional Encoder Representations from Transformers (BERT) and the Robustly Optimized BERT Pretraining Approach (RoBERTa), for binary classification. The models were evaluated on the standard metrics of accuracy, precision, recall, and F1-score. Given the context of phishing, emphasis was placed on recall to reduce the number of phishing attacks that went unnoticed. The results show that RoBERTa has more general performance and fewer false negatives than BERT and is therefore a better candidate for deployment on security-critical tasks.</p>
	]]></content:encoded>

	<dc:title>Phishing Email Detection Using BERT and RoBERTa</dc:title>
			<dc:creator>Mariam Ibrahim</dc:creator>
			<dc:creator>Ruba Elhafiz</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020046</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-07</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-07</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>46</prism:startingPage>
		<prism:doi>10.3390/computation14020046</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/46</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/45">

	<title>Computation, Vol. 14, Pages 45: An Enhanced Projection-Iterative-Methods-Based Optimizer for Complex Constrained Engineering Design Problems</title>
	<link>https://www.mdpi.com/2079-3197/14/2/45</link>
	<description>This paper proposes an Enhanced Projection-Iterative-Methods-based Optimizer (EPIMO) to overcome the limitations of its predecessor, the Projection-Iterative-Methods-based Optimizer (PIMO), including deterministic parameter decay, insufficient diversity maintenance, and static exploration&amp;amp;ndash;exploitation balance. The enhancements incorporate three core strategies: (1) an adaptive decay strategy that introduces stochastic perturbations into the step-size evolution; (2) a mirror opposition-based learning strategy to actively inject structured population diversity; and (3) an adaptive adjustment mechanism for the L&amp;amp;eacute;vy flight parameter &amp;amp;beta; to enable phase-sensitive optimization behavior. The effectiveness of EPIMO is validated through a multi-stage experimental framework. Systematic evaluations on the CEC 2017 and CEC 2022 benchmark suites, alongside four classical engineering optimization problems (Himmelblau function, step-cone pulley design, hydrostatic thrust bearing design, and three-bar truss design), demonstrate its comprehensive superiority. The Wilcoxon rank-sum test confirms statistically significant performance improvements over its predecessor (PIMO) and a range of state-of-the-art and classical algorithms. EPIMO exhibits exceptional performance in convergence accuracy, stability, robustness, and constraint-handling capability, establishing it as a highly reliable and efficient metaheuristic optimizer. This research contributes a systematic, adaptive enhancement framework for projection-based metaheuristics, which can be generalized to improve other swarm intelligence systems when facing complex, constrained, and high-dimensional engineering optimization tasks.</description>
	<pubDate>2026-02-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 45: An Enhanced Projection-Iterative-Methods-Based Optimizer for Complex Constrained Engineering Design Problems</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/45">doi: 10.3390/computation14020045</a></p>
	<p>Authors:
		Xuemei Zhu
		Han Peng
		Haoyu Cai
		Yu Liu
		Shirong Li
		Wei Peng
		</p>
	<p>This paper proposes an Enhanced Projection-Iterative-Methods-based Optimizer (EPIMO) to overcome the limitations of its predecessor, the Projection-Iterative-Methods-based Optimizer (PIMO), including deterministic parameter decay, insufficient diversity maintenance, and static exploration&amp;amp;ndash;exploitation balance. The enhancements incorporate three core strategies: (1) an adaptive decay strategy that introduces stochastic perturbations into the step-size evolution; (2) a mirror opposition-based learning strategy to actively inject structured population diversity; and (3) an adaptive adjustment mechanism for the L&amp;amp;eacute;vy flight parameter &amp;amp;beta; to enable phase-sensitive optimization behavior. The effectiveness of EPIMO is validated through a multi-stage experimental framework. Systematic evaluations on the CEC 2017 and CEC 2022 benchmark suites, alongside four classical engineering optimization problems (Himmelblau function, step-cone pulley design, hydrostatic thrust bearing design, and three-bar truss design), demonstrate its comprehensive superiority. The Wilcoxon rank-sum test confirms statistically significant performance improvements over its predecessor (PIMO) and a range of state-of-the-art and classical algorithms. EPIMO exhibits exceptional performance in convergence accuracy, stability, robustness, and constraint-handling capability, establishing it as a highly reliable and efficient metaheuristic optimizer. This research contributes a systematic, adaptive enhancement framework for projection-based metaheuristics, which can be generalized to improve other swarm intelligence systems when facing complex, constrained, and high-dimensional engineering optimization tasks.</p>
	]]></content:encoded>

	<dc:title>An Enhanced Projection-Iterative-Methods-Based Optimizer for Complex Constrained Engineering Design Problems</dc:title>
			<dc:creator>Xuemei Zhu</dc:creator>
			<dc:creator>Han Peng</dc:creator>
			<dc:creator>Haoyu Cai</dc:creator>
			<dc:creator>Yu Liu</dc:creator>
			<dc:creator>Shirong Li</dc:creator>
			<dc:creator>Wei Peng</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020045</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-06</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-06</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>45</prism:startingPage>
		<prism:doi>10.3390/computation14020045</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/45</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/44">

	<title>Computation, Vol. 14, Pages 44: Nonlinear System Modelling and Control: Trends, Challenges, and Future Perspectives</title>
	<link>https://www.mdpi.com/2079-3197/14/2/44</link>
	<description>Nonlinear systems engineering has undergone a profound transformation with the rapid development of computational tools and advanced analytical methods [...]</description>
	<pubDate>2026-02-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 44: Nonlinear System Modelling and Control: Trends, Challenges, and Future Perspectives</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/44">doi: 10.3390/computation14020044</a></p>
	<p>Authors:
		Chathura Wanigasekara
		</p>
	<p>Nonlinear systems engineering has undergone a profound transformation with the rapid development of computational tools and advanced analytical methods [...]</p>
	]]></content:encoded>

	<dc:title>Nonlinear System Modelling and Control: Trends, Challenges, and Future Perspectives</dc:title>
			<dc:creator>Chathura Wanigasekara</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020044</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-03</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-03</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Editorial</prism:section>
	<prism:startingPage>44</prism:startingPage>
		<prism:doi>10.3390/computation14020044</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/44</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/43">

	<title>Computation, Vol. 14, Pages 43: Methodology for Predicting Geochemical Anomalies Using Preprocessing of Input Geological Data and Dual Application of a Multilayer Perceptron</title>
	<link>https://www.mdpi.com/2079-3197/14/2/43</link>
	<description>The increasing need for accurate prediction of geochemical anomalies requires methods capable of capturing complex spatial patterns that traditional approaches often fail to represent adequately. For N datasets of the form (Xi,Yi) representing the geographic coordinates of sampling points and Ci denoting the geochemical measurement, training multilayer perceptrons (MLPs) presents a challenge. The low informativeness of the input features and their weak correlation with the target variable result in excessively simplified predictions. Analysis of a baseline model trained only on geographic coordinates showed that, while the loss function converges rapidly, the resulting values become overly &amp;amp;ldquo;compressed&amp;amp;rdquo; and fail to reflect the actual concentration range. To address this, a preprocessing method based on anisotropy was developed to enhance the correlation between input and output variables. This approach constructs, for each prediction point, a structured informational model that incorporates the direction and magnitude of spatial variability through sectoral and radial partitioning of the nearest sampling data. The transformed features are then used in a dual-MLP architecture, where the first network produces sectoral estimates, and the second aggregates them into the final prediction. The results show that anisotropic feature transformation significantly improves neural network prediction capabilities in geochemical analysis.</description>
	<pubDate>2026-02-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 43: Methodology for Predicting Geochemical Anomalies Using Preprocessing of Input Geological Data and Dual Application of a Multilayer Perceptron</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/43">doi: 10.3390/computation14020043</a></p>
	<p>Authors:
		Daulet Akhmedov
		Baurzhan Bekmukhamedov
		Moldir Tanashova
		Zulfiya Seitmuratova
		</p>
	<p>The increasing need for accurate prediction of geochemical anomalies requires methods capable of capturing complex spatial patterns that traditional approaches often fail to represent adequately. For N datasets of the form (Xi,Yi) representing the geographic coordinates of sampling points and Ci denoting the geochemical measurement, training multilayer perceptrons (MLPs) presents a challenge. The low informativeness of the input features and their weak correlation with the target variable result in excessively simplified predictions. Analysis of a baseline model trained only on geographic coordinates showed that, while the loss function converges rapidly, the resulting values become overly &amp;amp;ldquo;compressed&amp;amp;rdquo; and fail to reflect the actual concentration range. To address this, a preprocessing method based on anisotropy was developed to enhance the correlation between input and output variables. This approach constructs, for each prediction point, a structured informational model that incorporates the direction and magnitude of spatial variability through sectoral and radial partitioning of the nearest sampling data. The transformed features are then used in a dual-MLP architecture, where the first network produces sectoral estimates, and the second aggregates them into the final prediction. The results show that anisotropic feature transformation significantly improves neural network prediction capabilities in geochemical analysis.</p>
	]]></content:encoded>

	<dc:title>Methodology for Predicting Geochemical Anomalies Using Preprocessing of Input Geological Data and Dual Application of a Multilayer Perceptron</dc:title>
			<dc:creator>Daulet Akhmedov</dc:creator>
			<dc:creator>Baurzhan Bekmukhamedov</dc:creator>
			<dc:creator>Moldir Tanashova</dc:creator>
			<dc:creator>Zulfiya Seitmuratova</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020043</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-03</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-03</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>43</prism:startingPage>
		<prism:doi>10.3390/computation14020043</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/43</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/42">

	<title>Computation, Vol. 14, Pages 42: Information Inequalities for Five Random Variables</title>
	<link>https://www.mdpi.com/2079-3197/14/2/42</link>
	<description>The entropic region is formed by the collection of the Shannon entropies of all subvectors of finitely many jointly distributed discrete random variables. For four or more variables, the structure of the entropic region is mostly unknown. We utilize a variant of the Maximum Entropy Method to obtain five-variable non-Shannon entropy inequalities, which delimit the five-variable entropy region. This method adds copies of some of the random variables in generations. A significant reduction in computational complexity, achieved through theoretical considerations and by harnessing the inherent symmetries, allowed us to calculate all five-variable non-Shannon inequalities provided by the first nine generations. Based on the results, we define two infinite collections of such inequalities and prove them to be entropy inequalities. We investigate downward-closed subsets of non-negative lattice points that parameterize these collections, and based on this, we develop an algorithm to enumerate all extremal inequalities. The discovered set of entropy inequalities is conjectured to characterize the applied method completely.</description>
	<pubDate>2026-02-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 42: Information Inequalities for Five Random Variables</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/42">doi: 10.3390/computation14020042</a></p>
	<p>Authors:
		Laszlo Csirmaz
		Elod P. Csirmaz
		</p>
	<p>The entropic region is formed by the collection of the Shannon entropies of all subvectors of finitely many jointly distributed discrete random variables. For four or more variables, the structure of the entropic region is mostly unknown. We utilize a variant of the Maximum Entropy Method to obtain five-variable non-Shannon entropy inequalities, which delimit the five-variable entropy region. This method adds copies of some of the random variables in generations. A significant reduction in computational complexity, achieved through theoretical considerations and by harnessing the inherent symmetries, allowed us to calculate all five-variable non-Shannon inequalities provided by the first nine generations. Based on the results, we define two infinite collections of such inequalities and prove them to be entropy inequalities. We investigate downward-closed subsets of non-negative lattice points that parameterize these collections, and based on this, we develop an algorithm to enumerate all extremal inequalities. The discovered set of entropy inequalities is conjectured to characterize the applied method completely.</p>
	]]></content:encoded>

	<dc:title>Information Inequalities for Five Random Variables</dc:title>
			<dc:creator>Laszlo Csirmaz</dc:creator>
			<dc:creator>Elod P. Csirmaz</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020042</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-02</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>42</prism:startingPage>
		<prism:doi>10.3390/computation14020042</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/42</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/41">

	<title>Computation, Vol. 14, Pages 41: Modelling of Batch Fermentation Processes of Ethanol Production by Kluyveromyces marxianus</title>
	<link>https://www.mdpi.com/2079-3197/14/2/41</link>
	<description>A representative cluster-based model of the batch process of ethanol production by Kluyveromyces sp. is proposed. Experimental data from fermentation processes of 17 different strains of K. marxianus are used; each of them potentially exhibits different metabolic and kinetic behavior. Three algorithms for clustering are applied. Two modifications of Principal Component Analysis (PCA)&amp;amp;mdash;hierarchical clustering and k-means clustering; and InterCriteria Analysis (ICrA) are used to simplify a large dataset into a smaller set while preserving as much information as possible. The experimental data are organized into two main clusters. As a result, the most representative fermentation processes are identified. For each of the fermentation processes in the clusters, structural and parameter identification are performed. Four different structures describing the specific substrate (glucose) consumption rate are applied. The best structure is used to derive the representative model using the data from the first cluster. Verification of the derived model is performed using experimental data of the second cluster. Model parameter identification is performed by applying an evolutionary optimization algorithm.</description>
	<pubDate>2026-02-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 41: Modelling of Batch Fermentation Processes of Ethanol Production by Kluyveromyces marxianus</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/41">doi: 10.3390/computation14020041</a></p>
	<p>Authors:
		Olympia Roeva
		Anastasiya Zlatkova
		Velislava Lyubenova
		Maya Ignatova
		Denitsa Kristeva
		Gergana Roeva
		Dafina Zoteva
		</p>
	<p>A representative cluster-based model of the batch process of ethanol production by Kluyveromyces sp. is proposed. Experimental data from fermentation processes of 17 different strains of K. marxianus are used; each of them potentially exhibits different metabolic and kinetic behavior. Three algorithms for clustering are applied. Two modifications of Principal Component Analysis (PCA)&amp;amp;mdash;hierarchical clustering and k-means clustering; and InterCriteria Analysis (ICrA) are used to simplify a large dataset into a smaller set while preserving as much information as possible. The experimental data are organized into two main clusters. As a result, the most representative fermentation processes are identified. For each of the fermentation processes in the clusters, structural and parameter identification are performed. Four different structures describing the specific substrate (glucose) consumption rate are applied. The best structure is used to derive the representative model using the data from the first cluster. Verification of the derived model is performed using experimental data of the second cluster. Model parameter identification is performed by applying an evolutionary optimization algorithm.</p>
	]]></content:encoded>

	<dc:title>Modelling of Batch Fermentation Processes of Ethanol Production by Kluyveromyces marxianus</dc:title>
			<dc:creator>Olympia Roeva</dc:creator>
			<dc:creator>Anastasiya Zlatkova</dc:creator>
			<dc:creator>Velislava Lyubenova</dc:creator>
			<dc:creator>Maya Ignatova</dc:creator>
			<dc:creator>Denitsa Kristeva</dc:creator>
			<dc:creator>Gergana Roeva</dc:creator>
			<dc:creator>Dafina Zoteva</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020041</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-02</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>41</prism:startingPage>
		<prism:doi>10.3390/computation14020041</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/41</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/40">

	<title>Computation, Vol. 14, Pages 40: Can Generative AI Co-Evolve with Human Guidance and Display Non-Utilitarian Moral Behavior?</title>
	<link>https://www.mdpi.com/2079-3197/14/2/40</link>
	<description>The growing presence of autonomous AI systems, such as self-driving cars and humanoid robots, raises critical ethical questions about how these technologies should make moral decisions. Most existing moral machine (MM) models rely on secular, utilitarian principles, which prioritize the greatest good for the greatest number but often overlook the religious and cultural values that shape moral reasoning across different traditions. This paper explores how theological perspectives, particularly those from Christian, Islamic, and East Asian ethical frameworks, can inform and enrich algorithmic ethics in autonomous systems. By integrating these religious values, the study proposes a more inclusive approach to AI decision making that respects diverse beliefs. A key innovation of this research is the use of large language models (LLMs), such as ChatGPT (GPT-5.2), to design with human guidance MM architectures that incorporate these ethical systems. Through Python 3 scripts, the paper demonstrates how autonomous machines, e.g., vehicles and humanoid robots, can make ethically informed decisions based on different religious principles. The aim is to contribute to the development of AI systems that are not only technologically advanced but also culturally sensitive and ethically responsible, ensuring that they align with a wide range of theological values in morally complex situations.</description>
	<pubDate>2026-02-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 40: Can Generative AI Co-Evolve with Human Guidance and Display Non-Utilitarian Moral Behavior?</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/40">doi: 10.3390/computation14020040</a></p>
	<p>Authors:
		Rafael Lahoz-Beltra
		</p>
	<p>The growing presence of autonomous AI systems, such as self-driving cars and humanoid robots, raises critical ethical questions about how these technologies should make moral decisions. Most existing moral machine (MM) models rely on secular, utilitarian principles, which prioritize the greatest good for the greatest number but often overlook the religious and cultural values that shape moral reasoning across different traditions. This paper explores how theological perspectives, particularly those from Christian, Islamic, and East Asian ethical frameworks, can inform and enrich algorithmic ethics in autonomous systems. By integrating these religious values, the study proposes a more inclusive approach to AI decision making that respects diverse beliefs. A key innovation of this research is the use of large language models (LLMs), such as ChatGPT (GPT-5.2), to design with human guidance MM architectures that incorporate these ethical systems. Through Python 3 scripts, the paper demonstrates how autonomous machines, e.g., vehicles and humanoid robots, can make ethically informed decisions based on different religious principles. The aim is to contribute to the development of AI systems that are not only technologically advanced but also culturally sensitive and ethically responsible, ensuring that they align with a wide range of theological values in morally complex situations.</p>
	]]></content:encoded>

	<dc:title>Can Generative AI Co-Evolve with Human Guidance and Display Non-Utilitarian Moral Behavior?</dc:title>
			<dc:creator>Rafael Lahoz-Beltra</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020040</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-02</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>40</prism:startingPage>
		<prism:doi>10.3390/computation14020040</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/40</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/37">

	<title>Computation, Vol. 14, Pages 37: LocRes&amp;ndash;PINN: A Physics&amp;ndash;Informed Neural Network with Local Awareness and Residual Learning</title>
	<link>https://www.mdpi.com/2079-3197/14/2/37</link>
	<description>Physics&amp;amp;ndash;Informed Neural Networks (PINNs) have demonstrated efficacy in solving both forward and inverse problems for nonlinear partial differential equations (PDEs). However, they frequently struggle to accurately capture multiscale physical features, particularly in regions exhibiting sharp local variations such as shock waves and discontinuities, and often suffer from optimization difficulties in complex loss landscapes. To address these issues, we propose LocRes&amp;amp;ndash;PINN, a physics&amp;amp;ndash;informed neural network framework that integrates local awareness mechanisms with residual learning. This framework integrates a radial basis function (RBF) encoder to enhance the perception of local variations and embeds it within a residual backbone to facilitate stable gradient propagation. Furthermore, we incorporate a residual&amp;amp;ndash;based adaptive refinement strategy and an adaptive weighted loss scheme to dynamically focus training on high&amp;amp;ndash;error regions and balance multi&amp;amp;ndash;objective constraints. Numerical experiments on the Extended Korteweg&amp;amp;ndash;de Vries, Navier&amp;amp;ndash;Stokes, and Burgers equations demonstrate that LocRes&amp;amp;ndash;PINN reduces relative prediction errors by approximately 12% to 67% compared to standard benchmarks. The results also verify the model&amp;amp;rsquo;s robustness in parameter identification and noise resilience.</description>
	<pubDate>2026-02-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 37: LocRes&amp;ndash;PINN: A Physics&amp;ndash;Informed Neural Network with Local Awareness and Residual Learning</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/37">doi: 10.3390/computation14020037</a></p>
	<p>Authors:
		Tangying Lv
		Wenming Yin
		Hengkai Yao
		Qingliang Liu
		Yitong Sun
		Kuan Zhao
		Shanliang Zhu
		</p>
	<p>Physics&amp;amp;ndash;Informed Neural Networks (PINNs) have demonstrated efficacy in solving both forward and inverse problems for nonlinear partial differential equations (PDEs). However, they frequently struggle to accurately capture multiscale physical features, particularly in regions exhibiting sharp local variations such as shock waves and discontinuities, and often suffer from optimization difficulties in complex loss landscapes. To address these issues, we propose LocRes&amp;amp;ndash;PINN, a physics&amp;amp;ndash;informed neural network framework that integrates local awareness mechanisms with residual learning. This framework integrates a radial basis function (RBF) encoder to enhance the perception of local variations and embeds it within a residual backbone to facilitate stable gradient propagation. Furthermore, we incorporate a residual&amp;amp;ndash;based adaptive refinement strategy and an adaptive weighted loss scheme to dynamically focus training on high&amp;amp;ndash;error regions and balance multi&amp;amp;ndash;objective constraints. Numerical experiments on the Extended Korteweg&amp;amp;ndash;de Vries, Navier&amp;amp;ndash;Stokes, and Burgers equations demonstrate that LocRes&amp;amp;ndash;PINN reduces relative prediction errors by approximately 12% to 67% compared to standard benchmarks. The results also verify the model&amp;amp;rsquo;s robustness in parameter identification and noise resilience.</p>
	]]></content:encoded>

	<dc:title>LocRes&amp;amp;ndash;PINN: A Physics&amp;amp;ndash;Informed Neural Network with Local Awareness and Residual Learning</dc:title>
			<dc:creator>Tangying Lv</dc:creator>
			<dc:creator>Wenming Yin</dc:creator>
			<dc:creator>Hengkai Yao</dc:creator>
			<dc:creator>Qingliang Liu</dc:creator>
			<dc:creator>Yitong Sun</dc:creator>
			<dc:creator>Kuan Zhao</dc:creator>
			<dc:creator>Shanliang Zhu</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020037</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-02</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>37</prism:startingPage>
		<prism:doi>10.3390/computation14020037</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/37</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/39">

	<title>Computation, Vol. 14, Pages 39: Semi-Empirical Estimation of Aerosol Particle Influence at the Performance of Terrestrial FSO Links over the Sea</title>
	<link>https://www.mdpi.com/2079-3197/14/2/39</link>
	<description>Free-space optical (FSO) communication enables high-bandwidth license-free data transmission and is particularly attractive for maritime point-to-point links. However, FSO performance is strongly affected by atmospheric conditions. This work presents a semi-empirical model quantifying the impact of fine particulate matter (PM2.5) on received optical power in a maritime FSO link. The model is derived from long-term experimental measurements collected over a 2.96 km horizontal optical path above the sea surface, combining received signal strength indicator (RSSI) data with co-located PM2.5 observations. Statistical analysis reveals a strong negative correlation between PM2.5 concentration and received optical power (Pearson coefficient &amp;amp;minus;0.748). Using a logarithmic attenuation formulation, the PM2.5-induced attenuation is estimated to increase by approximately 0.0026 dB/km per &amp;amp;micro;g/m3 of PM2.5 concentration. A second-order semi-empirical model captures the observed nonlinear attenuation behavior with a coefficient of determination of R2 = 0.57. The proposed model provides a practical tool for link budgeting, performance forecasting, and adaptive design of maritime FSO systems operating in aerosol-rich environments.</description>
	<pubDate>2026-02-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 39: Semi-Empirical Estimation of Aerosol Particle Influence at the Performance of Terrestrial FSO Links over the Sea</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/39">doi: 10.3390/computation14020039</a></p>
	<p>Authors:
		Argyris N. Stassinakis
		Efstratios V. Chatzikontis
		Kyle R. Drexler
		Andreas D. Tsigopoulos
		Gratchia Mkrttchian
		Hector E. Nistazakis
		</p>
	<p>Free-space optical (FSO) communication enables high-bandwidth license-free data transmission and is particularly attractive for maritime point-to-point links. However, FSO performance is strongly affected by atmospheric conditions. This work presents a semi-empirical model quantifying the impact of fine particulate matter (PM2.5) on received optical power in a maritime FSO link. The model is derived from long-term experimental measurements collected over a 2.96 km horizontal optical path above the sea surface, combining received signal strength indicator (RSSI) data with co-located PM2.5 observations. Statistical analysis reveals a strong negative correlation between PM2.5 concentration and received optical power (Pearson coefficient &amp;amp;minus;0.748). Using a logarithmic attenuation formulation, the PM2.5-induced attenuation is estimated to increase by approximately 0.0026 dB/km per &amp;amp;micro;g/m3 of PM2.5 concentration. A second-order semi-empirical model captures the observed nonlinear attenuation behavior with a coefficient of determination of R2 = 0.57. The proposed model provides a practical tool for link budgeting, performance forecasting, and adaptive design of maritime FSO systems operating in aerosol-rich environments.</p>
	]]></content:encoded>

	<dc:title>Semi-Empirical Estimation of Aerosol Particle Influence at the Performance of Terrestrial FSO Links over the Sea</dc:title>
			<dc:creator>Argyris N. Stassinakis</dc:creator>
			<dc:creator>Efstratios V. Chatzikontis</dc:creator>
			<dc:creator>Kyle R. Drexler</dc:creator>
			<dc:creator>Andreas D. Tsigopoulos</dc:creator>
			<dc:creator>Gratchia Mkrttchian</dc:creator>
			<dc:creator>Hector E. Nistazakis</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020039</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-02</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>39</prism:startingPage>
		<prism:doi>10.3390/computation14020039</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/39</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/38">

	<title>Computation, Vol. 14, Pages 38: Development of a Dashboard for Simulation Workflow Visualization and Optimization of an Ammonia Synthesis Reactor in the HySTrAm Project (Horizon EU)</title>
	<link>https://www.mdpi.com/2079-3197/14/2/38</link>
	<description>Although hydrogen plays a crucial role in the EU&amp;amp;rsquo;s strategy to reduce greenhouse gas emissions, its storage and transport are technically challenging. If ammonia is produced efficiently, it can be a promising hydrogen carrier, especially in decentralized and flexible conditions. The Horizon EU HySTrAm project addresses this problem by developing a small-scale, containerized demonstration plant consisting of (1) a short-term hydrogen storage container using novel ultraporous materials optimized through machine learning, and (2) an ammonia synthesis reactor based on an improved low-pressure Haber&amp;amp;ndash;Bosch process. This paper presents an initial version of a Python (v3.9)-based dashboard designed to visualize and optimize the simulation workflow of the ammonia synthesis process. Designed as a baseline for a future online, automated tool, the dashboard allows the comparison of three reactor configurations already defined through simulations and aligned with the upcoming experimental campaign: single tube, two reactors in parallel swing mode and two reactors in series. Pressures at the inlet/outlet, temperatures across the reactor, operation recipe and ammonia production over time are displayed dynamically to evaluate the performance of the reactor. Future versions will include optimization features, such as the identification of optimal operating modes, the reduction of production time, an increase of productivity, and catalyst degradation estimation.</description>
	<pubDate>2026-02-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 38: Development of a Dashboard for Simulation Workflow Visualization and Optimization of an Ammonia Synthesis Reactor in the HySTrAm Project (Horizon EU)</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/38">doi: 10.3390/computation14020038</a></p>
	<p>Authors:
		Eleni Douvi
		Dimitra Douvi
		Jason Tsahalis
		Haralabos-Theodoros Tsahalis
		</p>
	<p>Although hydrogen plays a crucial role in the EU&amp;amp;rsquo;s strategy to reduce greenhouse gas emissions, its storage and transport are technically challenging. If ammonia is produced efficiently, it can be a promising hydrogen carrier, especially in decentralized and flexible conditions. The Horizon EU HySTrAm project addresses this problem by developing a small-scale, containerized demonstration plant consisting of (1) a short-term hydrogen storage container using novel ultraporous materials optimized through machine learning, and (2) an ammonia synthesis reactor based on an improved low-pressure Haber&amp;amp;ndash;Bosch process. This paper presents an initial version of a Python (v3.9)-based dashboard designed to visualize and optimize the simulation workflow of the ammonia synthesis process. Designed as a baseline for a future online, automated tool, the dashboard allows the comparison of three reactor configurations already defined through simulations and aligned with the upcoming experimental campaign: single tube, two reactors in parallel swing mode and two reactors in series. Pressures at the inlet/outlet, temperatures across the reactor, operation recipe and ammonia production over time are displayed dynamically to evaluate the performance of the reactor. Future versions will include optimization features, such as the identification of optimal operating modes, the reduction of production time, an increase of productivity, and catalyst degradation estimation.</p>
	]]></content:encoded>

	<dc:title>Development of a Dashboard for Simulation Workflow Visualization and Optimization of an Ammonia Synthesis Reactor in the HySTrAm Project (Horizon EU)</dc:title>
			<dc:creator>Eleni Douvi</dc:creator>
			<dc:creator>Dimitra Douvi</dc:creator>
			<dc:creator>Jason Tsahalis</dc:creator>
			<dc:creator>Haralabos-Theodoros Tsahalis</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020038</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-02</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>38</prism:startingPage>
		<prism:doi>10.3390/computation14020038</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/38</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/35">

	<title>Computation, Vol. 14, Pages 35: Comparison of Lagrangian and Isogeometric Boundary Element Formulations for Orthotropic Heat Conduction Problems</title>
	<link>https://www.mdpi.com/2079-3197/14/2/35</link>
	<description>Orthotropic materials are increasingly employed in advanced thermal systems due to their direction-dependent heat transfer characteristics. Accurate numerical modeling of heat conduction in such media remains challenging, particularly for 3D geometries with nonlinear boundary conditions and internal heat generation. In this study, conventional boundary element method (BEM) and isogeometric boundary element method (IGABEM) formulations are developed and compared for steady-state orthotropic heat conduction problems. A coordinate transformation is adopted to map the anisotropic governing equation onto an equivalent isotropic form, enabling the use of classical Laplace fundamental solutions. Volumetric heat generation is incorporated via the radial integration method (RIM), preserving the boundary-only discretization, while nonlinear Robin boundary conditions are treated using variable condensation and a Newton&amp;amp;ndash;Raphson iterative scheme. The performance of both methods is evaluated using a hollow ellipsoidal benchmark problem with available analytical solutions. The results demonstrate that IGABEM provides higher accuracy and smoother convergence than conventional BEM, particularly for higher-order discretizations, which is owing to its exact geometric representation and higher continuity. Although IGABEM involves additional computational overhead due to NURBS evaluations, both methods exhibit similar quadratic scaling with respect to the degrees of freedom.</description>
	<pubDate>2026-02-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 35: Comparison of Lagrangian and Isogeometric Boundary Element Formulations for Orthotropic Heat Conduction Problems</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/35">doi: 10.3390/computation14020035</a></p>
	<p>Authors:
		Ege Erdoğan
		Barbaros Çetin
		</p>
	<p>Orthotropic materials are increasingly employed in advanced thermal systems due to their direction-dependent heat transfer characteristics. Accurate numerical modeling of heat conduction in such media remains challenging, particularly for 3D geometries with nonlinear boundary conditions and internal heat generation. In this study, conventional boundary element method (BEM) and isogeometric boundary element method (IGABEM) formulations are developed and compared for steady-state orthotropic heat conduction problems. A coordinate transformation is adopted to map the anisotropic governing equation onto an equivalent isotropic form, enabling the use of classical Laplace fundamental solutions. Volumetric heat generation is incorporated via the radial integration method (RIM), preserving the boundary-only discretization, while nonlinear Robin boundary conditions are treated using variable condensation and a Newton&amp;amp;ndash;Raphson iterative scheme. The performance of both methods is evaluated using a hollow ellipsoidal benchmark problem with available analytical solutions. The results demonstrate that IGABEM provides higher accuracy and smoother convergence than conventional BEM, particularly for higher-order discretizations, which is owing to its exact geometric representation and higher continuity. Although IGABEM involves additional computational overhead due to NURBS evaluations, both methods exhibit similar quadratic scaling with respect to the degrees of freedom.</p>
	]]></content:encoded>

	<dc:title>Comparison of Lagrangian and Isogeometric Boundary Element Formulations for Orthotropic Heat Conduction Problems</dc:title>
			<dc:creator>Ege Erdoğan</dc:creator>
			<dc:creator>Barbaros Çetin</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020035</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-02</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>35</prism:startingPage>
		<prism:doi>10.3390/computation14020035</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/35</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/36">

	<title>Computation, Vol. 14, Pages 36: A Method for Road Spectrum Identification in Real-Vehicle Tests by Fusing Time-Frequency Domain Features</title>
	<link>https://www.mdpi.com/2079-3197/14/2/36</link>
	<description>Most unpaved roads are subjectively classified as Class D roads. However, significant variations exist across different sites and environments (e.g., mining areas). A major challenge in the engineering field is how to quickly correct the Power Spectral Density (PSD) of the unpaved road in question using existing equipment and limited sensors. To address this issue, this study combines real-vehicle test data with a suspension dynamics simulation model. It employs time-domain reconstruction via Inverse Fast Fourier Transform (IFFT) and wavelet processing methods to construct an optimized model that fuses time-frequency domain features. With the help of a surrogate optimization method, the model achieves the best approximation of the actual road surface, corrects the PSD parameters of the unpaved road, and provides a reliable input basis for vehicle dynamics simulation, fatigue life prediction, and performance evaluation.</description>
	<pubDate>2026-02-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 36: A Method for Road Spectrum Identification in Real-Vehicle Tests by Fusing Time-Frequency Domain Features</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/36">doi: 10.3390/computation14020036</a></p>
	<p>Authors:
		Biao Qiu
		Chaiyan Jettanasen
		</p>
	<p>Most unpaved roads are subjectively classified as Class D roads. However, significant variations exist across different sites and environments (e.g., mining areas). A major challenge in the engineering field is how to quickly correct the Power Spectral Density (PSD) of the unpaved road in question using existing equipment and limited sensors. To address this issue, this study combines real-vehicle test data with a suspension dynamics simulation model. It employs time-domain reconstruction via Inverse Fast Fourier Transform (IFFT) and wavelet processing methods to construct an optimized model that fuses time-frequency domain features. With the help of a surrogate optimization method, the model achieves the best approximation of the actual road surface, corrects the PSD parameters of the unpaved road, and provides a reliable input basis for vehicle dynamics simulation, fatigue life prediction, and performance evaluation.</p>
	]]></content:encoded>

	<dc:title>A Method for Road Spectrum Identification in Real-Vehicle Tests by Fusing Time-Frequency Domain Features</dc:title>
			<dc:creator>Biao Qiu</dc:creator>
			<dc:creator>Chaiyan Jettanasen</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020036</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-02</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>36</prism:startingPage>
		<prism:doi>10.3390/computation14020036</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/36</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/34">

	<title>Computation, Vol. 14, Pages 34: Application of the Dynamic Latent Space Model to Social Networks with Time-Varying Covariates</title>
	<link>https://www.mdpi.com/2079-3197/14/2/34</link>
	<description>With the growing accessibility of tools such as online surveys and web scraping, longitudinal social network data are more commonly collected in social science research along with non-network survey data. Such data play a critical role in helping social scientists understand how relationships develop and evolve over time. Existing dynamic network models such as the Stochastic Actor-Oriented Model and the Temporal Exponential Random Graph Model provide frameworks to analyze traits of both the networks and the external non-network covariates. However, research on the dynamic latent space model (DLSM) has focused mainly on factors intrinsic to the networks themselves. Despite some discussion, the role of non-network data such as contextual or behavioral covariates remain a topic to be further explored in the context of DLSMs. In this study, one application of the DLSM to incorporate dynamic non-network covariates collected alongside friendship networks using autoregressive processes is presented. By analyzing two friendship network datasets with different time points and psychological covariates, it is shown how external factors can contribute to a deeper understanding of social interaction dynamics over time.</description>
	<pubDate>2026-02-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 34: Application of the Dynamic Latent Space Model to Social Networks with Time-Varying Covariates</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/34">doi: 10.3390/computation14020034</a></p>
	<p>Authors:
		Ziqian Xu
		Zhiyong Zhang
		</p>
	<p>With the growing accessibility of tools such as online surveys and web scraping, longitudinal social network data are more commonly collected in social science research along with non-network survey data. Such data play a critical role in helping social scientists understand how relationships develop and evolve over time. Existing dynamic network models such as the Stochastic Actor-Oriented Model and the Temporal Exponential Random Graph Model provide frameworks to analyze traits of both the networks and the external non-network covariates. However, research on the dynamic latent space model (DLSM) has focused mainly on factors intrinsic to the networks themselves. Despite some discussion, the role of non-network data such as contextual or behavioral covariates remain a topic to be further explored in the context of DLSMs. In this study, one application of the DLSM to incorporate dynamic non-network covariates collected alongside friendship networks using autoregressive processes is presented. By analyzing two friendship network datasets with different time points and psychological covariates, it is shown how external factors can contribute to a deeper understanding of social interaction dynamics over time.</p>
	]]></content:encoded>

	<dc:title>Application of the Dynamic Latent Space Model to Social Networks with Time-Varying Covariates</dc:title>
			<dc:creator>Ziqian Xu</dc:creator>
			<dc:creator>Zhiyong Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020034</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-01</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-01</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>34</prism:startingPage>
		<prism:doi>10.3390/computation14020034</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/34</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/33">

	<title>Computation, Vol. 14, Pages 33: Integrative Nutritional Assessment of Avocado Leaves Using Entropy-Weighted Spectral Indices and Fusion Learning</title>
	<link>https://www.mdpi.com/2079-3197/14/2/33</link>
	<description>Accurate and non-destructive assessment of plant nutritional status remains a key challenge in precision agriculture, particularly under dynamic physiological conditions such as dehydration. Therefore, this study focused on developing an integrated nutritional assessment framework for avocado (Persea americana Mill.) leaves across progressive dehydration stages using spectral analysis. A novel nutritional function index (NFI) was innovatively constructed using an entropy-weighted multi-criteria decision-making approach. This unified assessment metric integrated critical physiological indicators, such as moisture content, nitrogen content, and chlorophyll content estimated from soil and plant analyzer development (SPAD) readings. To enhance the prediction accuracy and interpretability of NFI, innovative vegetation indices (VIs) specifically tailored to NFI were systematically constructed using exhaustive wavelength-combination screening. Optimal wavelengths identified from short-wave infrared regions (1446, 1455, 1465, 1865, and 1937 nm) were employed to build physiologically meaningful VIs, which were highly sensitive to moisture and biochemical constituents. Feature wavelengths selected via the successive projections algorithm and competitive adaptive reweighted sampling further reduced spectral redundancy and improved modeling efficiency. Both feature-level and algorithm-level data fusion methods effectively combined VIs and selected feature wavelengths, significantly enhancing prediction performance. The stacking algorithm demonstrated robust performance, achieving the highest predictive accuracy (R2V = 0.986, RMSEV = 0.032) for NFI estimation. This fusion-based modeling approach outperformed conventional single-model schemes in terms of accuracy and robustness. Unlike previous studies that focused on isolated spectral predictors, this work introduces an integrative framework combining entropy-weighted feature synthesis and multiscale fusion learning. The developed strategy offers a powerful tool for real-time plant health monitoring and supports precision agricultural decision-making.</description>
	<pubDate>2026-02-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 33: Integrative Nutritional Assessment of Avocado Leaves Using Entropy-Weighted Spectral Indices and Fusion Learning</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/33">doi: 10.3390/computation14020033</a></p>
	<p>Authors:
		Zhen Guo
		Juan Sebastian Estrada
		Xingfeng Guo
		Redmond R. Shamshiri
		Marcelo Pereyra
		Fernando Auat Cheein
		</p>
	<p>Accurate and non-destructive assessment of plant nutritional status remains a key challenge in precision agriculture, particularly under dynamic physiological conditions such as dehydration. Therefore, this study focused on developing an integrated nutritional assessment framework for avocado (Persea americana Mill.) leaves across progressive dehydration stages using spectral analysis. A novel nutritional function index (NFI) was innovatively constructed using an entropy-weighted multi-criteria decision-making approach. This unified assessment metric integrated critical physiological indicators, such as moisture content, nitrogen content, and chlorophyll content estimated from soil and plant analyzer development (SPAD) readings. To enhance the prediction accuracy and interpretability of NFI, innovative vegetation indices (VIs) specifically tailored to NFI were systematically constructed using exhaustive wavelength-combination screening. Optimal wavelengths identified from short-wave infrared regions (1446, 1455, 1465, 1865, and 1937 nm) were employed to build physiologically meaningful VIs, which were highly sensitive to moisture and biochemical constituents. Feature wavelengths selected via the successive projections algorithm and competitive adaptive reweighted sampling further reduced spectral redundancy and improved modeling efficiency. Both feature-level and algorithm-level data fusion methods effectively combined VIs and selected feature wavelengths, significantly enhancing prediction performance. The stacking algorithm demonstrated robust performance, achieving the highest predictive accuracy (R2V = 0.986, RMSEV = 0.032) for NFI estimation. This fusion-based modeling approach outperformed conventional single-model schemes in terms of accuracy and robustness. Unlike previous studies that focused on isolated spectral predictors, this work introduces an integrative framework combining entropy-weighted feature synthesis and multiscale fusion learning. The developed strategy offers a powerful tool for real-time plant health monitoring and supports precision agricultural decision-making.</p>
	]]></content:encoded>

	<dc:title>Integrative Nutritional Assessment of Avocado Leaves Using Entropy-Weighted Spectral Indices and Fusion Learning</dc:title>
			<dc:creator>Zhen Guo</dc:creator>
			<dc:creator>Juan Sebastian Estrada</dc:creator>
			<dc:creator>Xingfeng Guo</dc:creator>
			<dc:creator>Redmond R. Shamshiri</dc:creator>
			<dc:creator>Marcelo Pereyra</dc:creator>
			<dc:creator>Fernando Auat Cheein</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020033</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-01</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-01</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>33</prism:startingPage>
		<prism:doi>10.3390/computation14020033</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/33</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/32">

	<title>Computation, Vol. 14, Pages 32: A Study of the Efficiency of Parallel Computing for Constructing Bifurcation Diagrams of the Fractional Selkov Oscillator with Variable Coefficients and Memory</title>
	<link>https://www.mdpi.com/2079-3197/14/2/32</link>
	<description>This paper presents a comprehensive performance analysis and practical implementation of a parallel algorithm for constructing bifurcation diagrams of the fractional Selkov oscillator with variable coefficients and memory (SFO). The primary contribution lies in the systematic benchmarking and validation of a coarse-grained parallelization strategy (MapReduce) applied to a computationally intensive class of problems&amp;amp;mdash;fractional-order systems with hereditary effects. We investigate the efficiency of a parallel algorithm that leverages central processing unit (CPU) capabilities to compute bifurcation diagrams of the Selkov fractional oscillator as a function of the characteristic time scale. The parallel algorithm is implemented in the ABMSelkovFracSim 2.0 software package using Python 3.13. This package also incorporates the Adams&amp;amp;ndash;Bashforth&amp;amp;ndash;Moulton numerical algorithm for obtaining numerical solutions to the Selkov fractional oscillator, thereby accounting for heredity (memory) effects. The Selkov fractional oscillator is a system of nonlinear ordinary differential equations with Gerasimov&amp;amp;ndash;Caputo derivatives of fractional order variables and non-constant coefficients, which include a characteristic time scale parameter to ensure dimensional consistency in the model equations. This paper evaluates the efficiency, speedup, and cost of the parallel algorithm, and determines its optimal configuration based on the number of worker processes. The optimal number of processes required to achieve maximum efficiency for the algorithm is determined. We apply the TAECO approach to evaluate the efficiency of the parallel algorithm: T (execution time), A (acceleration), E (efficiency), C (cost), O (cost optimality index). Graphs illustrating the efficiency characteristics of the parallel algorithm as functions of the number of CPU processes are provided.</description>
	<pubDate>2026-02-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 32: A Study of the Efficiency of Parallel Computing for Constructing Bifurcation Diagrams of the Fractional Selkov Oscillator with Variable Coefficients and Memory</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/32">doi: 10.3390/computation14020032</a></p>
	<p>Authors:
		Dmitriy Tverdyi
		Roman Parovik
		</p>
	<p>This paper presents a comprehensive performance analysis and practical implementation of a parallel algorithm for constructing bifurcation diagrams of the fractional Selkov oscillator with variable coefficients and memory (SFO). The primary contribution lies in the systematic benchmarking and validation of a coarse-grained parallelization strategy (MapReduce) applied to a computationally intensive class of problems&amp;amp;mdash;fractional-order systems with hereditary effects. We investigate the efficiency of a parallel algorithm that leverages central processing unit (CPU) capabilities to compute bifurcation diagrams of the Selkov fractional oscillator as a function of the characteristic time scale. The parallel algorithm is implemented in the ABMSelkovFracSim 2.0 software package using Python 3.13. This package also incorporates the Adams&amp;amp;ndash;Bashforth&amp;amp;ndash;Moulton numerical algorithm for obtaining numerical solutions to the Selkov fractional oscillator, thereby accounting for heredity (memory) effects. The Selkov fractional oscillator is a system of nonlinear ordinary differential equations with Gerasimov&amp;amp;ndash;Caputo derivatives of fractional order variables and non-constant coefficients, which include a characteristic time scale parameter to ensure dimensional consistency in the model equations. This paper evaluates the efficiency, speedup, and cost of the parallel algorithm, and determines its optimal configuration based on the number of worker processes. The optimal number of processes required to achieve maximum efficiency for the algorithm is determined. We apply the TAECO approach to evaluate the efficiency of the parallel algorithm: T (execution time), A (acceleration), E (efficiency), C (cost), O (cost optimality index). Graphs illustrating the efficiency characteristics of the parallel algorithm as functions of the number of CPU processes are provided.</p>
	]]></content:encoded>

	<dc:title>A Study of the Efficiency of Parallel Computing for Constructing Bifurcation Diagrams of the Fractional Selkov Oscillator with Variable Coefficients and Memory</dc:title>
			<dc:creator>Dmitriy Tverdyi</dc:creator>
			<dc:creator>Roman Parovik</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020032</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-01</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-01</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>32</prism:startingPage>
		<prism:doi>10.3390/computation14020032</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/32</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/31">

	<title>Computation, Vol. 14, Pages 31: A Replication Study for Consumer Digital Twins: Pilot Sites Analysis and Experience from the SENDER Project (Horizon 2020)</title>
	<link>https://www.mdpi.com/2079-3197/14/2/31</link>
	<description>The SENDER (Sustainable Consumer Engagement and Demand Response) project aims to develop an innovative interface that engages energy consumers in Demand Response (DR) programs by developing new technologies to predict energy consumption, enhance market flexibility, and manage the exploitation of Renewable Energy Sources (RES). The current paper presents a replication study for consumer Digital Twins (DTs) that simulate energy consumption patterns and occupancy behaviors in various households across three pilot sites (Austria, Spain, Finland) based on six-month historical and real-time data related to loads, sensors, and relevant details for every household. Due to data limitations and inhomogeneity, we conducted a replication analysis focusing only on Austria and Spain, where available data regarding power and motion alarm sensors were sufficient, leading to a replication scenario by gradually increasing the number of households. In addition to limited data and short time of measurements, other challenges faced included inconsistencies in sensor installations and limited information on occupancy. In order to ensure reliable results, data was filtered, and households with common characteristics were grouped together to improve accuracy and consistency in DT modeling. Finally, it was concluded that a successful replication procedure requires sufficient continuous, frequent, and homogeneous data, along with its validation.</description>
	<pubDate>2026-02-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 31: A Replication Study for Consumer Digital Twins: Pilot Sites Analysis and Experience from the SENDER Project (Horizon 2020)</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/31">doi: 10.3390/computation14020031</a></p>
	<p>Authors:
		Eleni Douvi
		Dimitra Douvi
		Jason Tsahalis
		Haralabos-Theodoros Tsahalis
		</p>
	<p>The SENDER (Sustainable Consumer Engagement and Demand Response) project aims to develop an innovative interface that engages energy consumers in Demand Response (DR) programs by developing new technologies to predict energy consumption, enhance market flexibility, and manage the exploitation of Renewable Energy Sources (RES). The current paper presents a replication study for consumer Digital Twins (DTs) that simulate energy consumption patterns and occupancy behaviors in various households across three pilot sites (Austria, Spain, Finland) based on six-month historical and real-time data related to loads, sensors, and relevant details for every household. Due to data limitations and inhomogeneity, we conducted a replication analysis focusing only on Austria and Spain, where available data regarding power and motion alarm sensors were sufficient, leading to a replication scenario by gradually increasing the number of households. In addition to limited data and short time of measurements, other challenges faced included inconsistencies in sensor installations and limited information on occupancy. In order to ensure reliable results, data was filtered, and households with common characteristics were grouped together to improve accuracy and consistency in DT modeling. Finally, it was concluded that a successful replication procedure requires sufficient continuous, frequent, and homogeneous data, along with its validation.</p>
	]]></content:encoded>

	<dc:title>A Replication Study for Consumer Digital Twins: Pilot Sites Analysis and Experience from the SENDER Project (Horizon 2020)</dc:title>
			<dc:creator>Eleni Douvi</dc:creator>
			<dc:creator>Dimitra Douvi</dc:creator>
			<dc:creator>Jason Tsahalis</dc:creator>
			<dc:creator>Haralabos-Theodoros Tsahalis</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020031</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-01</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-01</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>31</prism:startingPage>
		<prism:doi>10.3390/computation14020031</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/31</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/30">

	<title>Computation, Vol. 14, Pages 30: Analyzing the Impact of Vandalism, Hoarding, and Strikes on Fuel Distribution in Nigeria</title>
	<link>https://www.mdpi.com/2079-3197/14/2/30</link>
	<description>Fuel scarcity remains a recurrent challenge in Nigeria, with significant socioeconomic consequences despite the country&amp;amp;rsquo;s status as a major crude oil producer. This study develops a novel deterministic mathematical model to examine the dynamics of petroleum product distribution in Nigeria&amp;amp;rsquo;s downstream sector, with particular emphasis on Premium Motor Spirit (PMS). The model explicitly incorporates key disruption and behavioral mechanisms: pipeline vandalism, industrial actions, product diversion, and hoarding that collectively drive persistent fuel shortages. The model&amp;amp;rsquo;s feasibility, positivity of solutions, and existence and uniqueness were established, ensuring consistency with real-world operational conditions. Five equilibrium points were identified, reflecting distinct operational regimes within the distribution network. A critical distribution threshold was analytically derived and numerically validated, revealing that a minimum supply of approximately 42 million liters of PMS per day is required to satisfy demand and eliminate fuel queues. Local and global stability analyses, conducted using Lyapunov functions and the Routh&amp;amp;ndash;Hurwitz criteria, demonstrate that stable fuel distribution is achievable under effective policy coordination and stakeholder compliance. Numerical simulations show that hoarding by private retail marketers substantially intensifies scarcity, while industrial actions by transporters exert a more severe disruption than pipeline vandalism. The results further highlight the stabilizing role of alternative transportation routes, such as rail systems, in mitigating infrastructure failures and road-based logistics risks. Although refinery sources are aggregated and rail transport is idealized, the proposed framework offers a robust and adaptable tool for policy analysis, with relevance to both oil-producing and fuel-import-dependent economies.</description>
	<pubDate>2026-02-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 30: Analyzing the Impact of Vandalism, Hoarding, and Strikes on Fuel Distribution in Nigeria</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/30">doi: 10.3390/computation14020030</a></p>
	<p>Authors:
		Adam Ajimoti Ishaq
		Kazeem Babatunde Akande
		Samuel T. Akinyemi
		Adejimi A. Adeniji
		Kekana C. Malesela
		Kayode Oshinubi
		</p>
	<p>Fuel scarcity remains a recurrent challenge in Nigeria, with significant socioeconomic consequences despite the country&amp;amp;rsquo;s status as a major crude oil producer. This study develops a novel deterministic mathematical model to examine the dynamics of petroleum product distribution in Nigeria&amp;amp;rsquo;s downstream sector, with particular emphasis on Premium Motor Spirit (PMS). The model explicitly incorporates key disruption and behavioral mechanisms: pipeline vandalism, industrial actions, product diversion, and hoarding that collectively drive persistent fuel shortages. The model&amp;amp;rsquo;s feasibility, positivity of solutions, and existence and uniqueness were established, ensuring consistency with real-world operational conditions. Five equilibrium points were identified, reflecting distinct operational regimes within the distribution network. A critical distribution threshold was analytically derived and numerically validated, revealing that a minimum supply of approximately 42 million liters of PMS per day is required to satisfy demand and eliminate fuel queues. Local and global stability analyses, conducted using Lyapunov functions and the Routh&amp;amp;ndash;Hurwitz criteria, demonstrate that stable fuel distribution is achievable under effective policy coordination and stakeholder compliance. Numerical simulations show that hoarding by private retail marketers substantially intensifies scarcity, while industrial actions by transporters exert a more severe disruption than pipeline vandalism. The results further highlight the stabilizing role of alternative transportation routes, such as rail systems, in mitigating infrastructure failures and road-based logistics risks. Although refinery sources are aggregated and rail transport is idealized, the proposed framework offers a robust and adaptable tool for policy analysis, with relevance to both oil-producing and fuel-import-dependent economies.</p>
	]]></content:encoded>

	<dc:title>Analyzing the Impact of Vandalism, Hoarding, and Strikes on Fuel Distribution in Nigeria</dc:title>
			<dc:creator>Adam Ajimoti Ishaq</dc:creator>
			<dc:creator>Kazeem Babatunde Akande</dc:creator>
			<dc:creator>Samuel T. Akinyemi</dc:creator>
			<dc:creator>Adejimi A. Adeniji</dc:creator>
			<dc:creator>Kekana C. Malesela</dc:creator>
			<dc:creator>Kayode Oshinubi</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020030</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-02-01</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-02-01</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>30</prism:startingPage>
		<prism:doi>10.3390/computation14020030</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/30</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/29">

	<title>Computation, Vol. 14, Pages 29: Advanced Topology Optimization: Methods and Applications</title>
	<link>https://www.mdpi.com/2079-3197/14/2/29</link>
	<description>Structural topology optimization is a powerful computational design paradigm that seeks the most efficient material distribution within a prescribed design domain to satisfy given performance requirements [...]</description>
	<pubDate>2026-01-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 29: Advanced Topology Optimization: Methods and Applications</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/29">doi: 10.3390/computation14020029</a></p>
	<p>Authors:
		Yun-Fei Fu
		</p>
	<p>Structural topology optimization is a powerful computational design paradigm that seeks the most efficient material distribution within a prescribed design domain to satisfy given performance requirements [...]</p>
	]]></content:encoded>

	<dc:title>Advanced Topology Optimization: Methods and Applications</dc:title>
			<dc:creator>Yun-Fei Fu</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020029</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-29</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-29</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Editorial</prism:section>
	<prism:startingPage>29</prism:startingPage>
		<prism:doi>10.3390/computation14020029</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/29</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/2/28">

	<title>Computation, Vol. 14, Pages 28: Method for Simulating Solar Panel Oscillations Considering Thermal Shock</title>
	<link>https://www.mdpi.com/2079-3197/14/2/28</link>
	<description>The purpose of this work is to develop an approximate method for simulating the oscillations of a solar panel with consideration of thermal shock, based on a simulated spacecraft system model. The influence of thermal shock is reduced to an additional rotation of the spacecraft. The mechanical system itself (the spacecraft model) consists of a main body (a rigid body) and a flexible solar panel. The solar panel performs natural oscillations. An analysis of the influence of thermal shock on the parameters of natural oscillations was conducted. Results of computer simulation for a spacecraft configuration with a single solar panel are presented.</description>
	<pubDate>2026-01-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 28: Method for Simulating Solar Panel Oscillations Considering Thermal Shock</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/2/28">doi: 10.3390/computation14020028</a></p>
	<p>Authors:
		Andrey V. Sedelnikov
		Alexandra S. Marshalkina
		</p>
	<p>The purpose of this work is to develop an approximate method for simulating the oscillations of a solar panel with consideration of thermal shock, based on a simulated spacecraft system model. The influence of thermal shock is reduced to an additional rotation of the spacecraft. The mechanical system itself (the spacecraft model) consists of a main body (a rigid body) and a flexible solar panel. The solar panel performs natural oscillations. An analysis of the influence of thermal shock on the parameters of natural oscillations was conducted. Results of computer simulation for a spacecraft configuration with a single solar panel are presented.</p>
	]]></content:encoded>

	<dc:title>Method for Simulating Solar Panel Oscillations Considering Thermal Shock</dc:title>
			<dc:creator>Andrey V. Sedelnikov</dc:creator>
			<dc:creator>Alexandra S. Marshalkina</dc:creator>
		<dc:identifier>doi: 10.3390/computation14020028</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-24</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-24</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>28</prism:startingPage>
		<prism:doi>10.3390/computation14020028</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/2/28</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/27">

	<title>Computation, Vol. 14, Pages 27: State-of-the-Art Overview of Smooth-Edged Material Distribution for Optimizing Topology (SEMDOT) Algorithm</title>
	<link>https://www.mdpi.com/2079-3197/14/1/27</link>
	<description>Topology optimization is a powerful and efficient design tool, but the structures obtained by element-based topology optimization methods are often limited by fuzzy or jagged boundaries. The smooth-edged material distribution for optimizing topology algorithm (SEMDOT) can effectively deal with this problem and promote the practical application of topology optimization structures. This review outlines the theoretical evolution of SEMDOT, including both penalty-based and non-penalty-based formulations, while also providing access to open access codes. SEMDOT&amp;amp;rsquo;s applications cover diverse areas, including self-supporting structures, energy-efficient manufacturing, bone tissue scaffolds, heat transfer systems, and building parts, demonstrating the versatility of SEMDOT. While SEMDOT addresses boundary issues in topology optimization structures, further theoretical refinement is needed to develop it into a comprehensive platform. This work consolidates the advances in SEMDOT, highlights its interdisciplinary impact, and identifies future research and implementation directions.</description>
	<pubDate>2026-01-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 27: State-of-the-Art Overview of Smooth-Edged Material Distribution for Optimizing Topology (SEMDOT) Algorithm</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/27">doi: 10.3390/computation14010027</a></p>
	<p>Authors:
		Minyan Liu
		Wanghua Hu
		Xuhui Gong
		Hao Zhou
		Baolin Zhao
		</p>
	<p>Topology optimization is a powerful and efficient design tool, but the structures obtained by element-based topology optimization methods are often limited by fuzzy or jagged boundaries. The smooth-edged material distribution for optimizing topology algorithm (SEMDOT) can effectively deal with this problem and promote the practical application of topology optimization structures. This review outlines the theoretical evolution of SEMDOT, including both penalty-based and non-penalty-based formulations, while also providing access to open access codes. SEMDOT&amp;amp;rsquo;s applications cover diverse areas, including self-supporting structures, energy-efficient manufacturing, bone tissue scaffolds, heat transfer systems, and building parts, demonstrating the versatility of SEMDOT. While SEMDOT addresses boundary issues in topology optimization structures, further theoretical refinement is needed to develop it into a comprehensive platform. This work consolidates the advances in SEMDOT, highlights its interdisciplinary impact, and identifies future research and implementation directions.</p>
	]]></content:encoded>

	<dc:title>State-of-the-Art Overview of Smooth-Edged Material Distribution for Optimizing Topology (SEMDOT) Algorithm</dc:title>
			<dc:creator>Minyan Liu</dc:creator>
			<dc:creator>Wanghua Hu</dc:creator>
			<dc:creator>Xuhui Gong</dc:creator>
			<dc:creator>Hao Zhou</dc:creator>
			<dc:creator>Baolin Zhao</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010027</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-21</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-21</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>27</prism:startingPage>
		<prism:doi>10.3390/computation14010027</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/27</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/26">

	<title>Computation, Vol. 14, Pages 26: Regression Extensions of the New Polynomial Exponential Distribution: NPED-GLM and Poisson&amp;ndash;NPED Count Models with Applications in Engineering and Insurance</title>
	<link>https://www.mdpi.com/2079-3197/14/1/26</link>
	<description>The New Polynomial Exponential Distribution (NPED), introduced by Beghriche et al. (2022), provides a flexible one-parameter family capable of representing diverse hazard shapes and heavy-tailed behavior. Regression frameworks based on the NPED, however, have not yet been established. This paper introduces two methodological extensions: (i) a generalized linear model (NPED-GLM) in which the distribution parameter depends on covariates, and (ii) a Poisson&amp;amp;ndash;NPED count regression model suitable for overdispersed and heavy-tailed count data. Likelihood-based inference, asymptotic properties, and simulation studies are developed to investigate the performance of the estimators. Applications to engineering failure-count data and insurance claim frequencies illustrate the advantages of the proposed models relative to classical Poisson, negative binomial, and Poisson&amp;amp;ndash;Lindley regressions. These developments substantially broaden the applicability of the NPED in actuarial science, reliability engineering, and applied statistics.</description>
	<pubDate>2026-01-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 26: Regression Extensions of the New Polynomial Exponential Distribution: NPED-GLM and Poisson&amp;ndash;NPED Count Models with Applications in Engineering and Insurance</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/26">doi: 10.3390/computation14010026</a></p>
	<p>Authors:
		Halim Zeghdoudi
		Sandra S. Ferreira
		Vinoth Raman
		Dário Ferreira
		</p>
	<p>The New Polynomial Exponential Distribution (NPED), introduced by Beghriche et al. (2022), provides a flexible one-parameter family capable of representing diverse hazard shapes and heavy-tailed behavior. Regression frameworks based on the NPED, however, have not yet been established. This paper introduces two methodological extensions: (i) a generalized linear model (NPED-GLM) in which the distribution parameter depends on covariates, and (ii) a Poisson&amp;amp;ndash;NPED count regression model suitable for overdispersed and heavy-tailed count data. Likelihood-based inference, asymptotic properties, and simulation studies are developed to investigate the performance of the estimators. Applications to engineering failure-count data and insurance claim frequencies illustrate the advantages of the proposed models relative to classical Poisson, negative binomial, and Poisson&amp;amp;ndash;Lindley regressions. These developments substantially broaden the applicability of the NPED in actuarial science, reliability engineering, and applied statistics.</p>
	]]></content:encoded>

	<dc:title>Regression Extensions of the New Polynomial Exponential Distribution: NPED-GLM and Poisson&amp;amp;ndash;NPED Count Models with Applications in Engineering and Insurance</dc:title>
			<dc:creator>Halim Zeghdoudi</dc:creator>
			<dc:creator>Sandra S. Ferreira</dc:creator>
			<dc:creator>Vinoth Raman</dc:creator>
			<dc:creator>Dário Ferreira</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010026</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-21</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-21</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>26</prism:startingPage>
		<prism:doi>10.3390/computation14010026</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/26</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/25">

	<title>Computation, Vol. 14, Pages 25: Embedding-Based Alignments Capture Structural and Sequence Domains of Distantly Related Multifunctional Human Proteins</title>
	<link>https://www.mdpi.com/2079-3197/14/1/25</link>
	<description>Protein embedding is a protein representation that carries along the information derived from filtering large volumes of sequences stored in large archives. Routinely, the protein is represented by a matrix in which each residue is a context-specific vector whose dimensions reflect the size of the large architectures of neural networks (transformers) trained with deep learning algorithms on large volumes of sequences. A recently introduced method (Embedding-Based Alignment, EBA) is particularly suited for pairwise embedding comparisons and, as we report here, allows for remote homolog detection under specific constraints, including protein sequence length similarity. Multifunctional proteins are present in different species. However, particularly in humans, the problem of their structural and functional annotation is urgent since, according to recent statistics, they comprise up to 50% of the human reference proteome. In this paper we show that when EBA is applied to a set of randomly selected multifunctional human proteins, it retrieves, after a clustering procedure and rigorous validation on the reference Swiss-Prot database, proteins that are remote homologs to each other and carry similar structural and functional features as the query protein.</description>
	<pubDate>2026-01-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 25: Embedding-Based Alignments Capture Structural and Sequence Domains of Distantly Related Multifunctional Human Proteins</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/25">doi: 10.3390/computation14010025</a></p>
	<p>Authors:
		Gabriele Vazzana
		Matteo Manfredi
		Castrense Savojardo
		Pier Luigi Martelli
		Rita Casadio
		</p>
	<p>Protein embedding is a protein representation that carries along the information derived from filtering large volumes of sequences stored in large archives. Routinely, the protein is represented by a matrix in which each residue is a context-specific vector whose dimensions reflect the size of the large architectures of neural networks (transformers) trained with deep learning algorithms on large volumes of sequences. A recently introduced method (Embedding-Based Alignment, EBA) is particularly suited for pairwise embedding comparisons and, as we report here, allows for remote homolog detection under specific constraints, including protein sequence length similarity. Multifunctional proteins are present in different species. However, particularly in humans, the problem of their structural and functional annotation is urgent since, according to recent statistics, they comprise up to 50% of the human reference proteome. In this paper we show that when EBA is applied to a set of randomly selected multifunctional human proteins, it retrieves, after a clustering procedure and rigorous validation on the reference Swiss-Prot database, proteins that are remote homologs to each other and carry similar structural and functional features as the query protein.</p>
	]]></content:encoded>

	<dc:title>Embedding-Based Alignments Capture Structural and Sequence Domains of Distantly Related Multifunctional Human Proteins</dc:title>
			<dc:creator>Gabriele Vazzana</dc:creator>
			<dc:creator>Matteo Manfredi</dc:creator>
			<dc:creator>Castrense Savojardo</dc:creator>
			<dc:creator>Pier Luigi Martelli</dc:creator>
			<dc:creator>Rita Casadio</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010025</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-20</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-20</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>25</prism:startingPage>
		<prism:doi>10.3390/computation14010025</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/25</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/24">

	<title>Computation, Vol. 14, Pages 24: Development and Assessment of Simplified Conductance Models for the Particle Exhaust in Wendelstein 7-X</title>
	<link>https://www.mdpi.com/2079-3197/14/1/24</link>
	<description>The particle exhaust system plays a pivotal role in fusion reactors and is essential for ensuring both the feasibility and sustained operation of the fusion reaction. For the successful development of such a system, density control is of great importance and some key design parameters include the neutral gas pressure and the resulting particle fluxes. This study presents a simplified conductance-based model for estimating neutral gas pressure distributions in the particle exhaust system of fusion reactors, focusing specifically on the sub-divertor region. In the proposed model, the pumping region is represented as an interconnected set of reservoirs and channels. Mass conservation and conductance relations, appropriate for all flow regimes, are applied. The model was benchmarked against complex 3D DIVGAS simulations across representative operating scenarios of the Wendelstein 7-X (W7-X) stellarator. Despite geometric simplifications, the model is capable of predicting pressure values at several key locations inside the particle exhaust area of W7-X, as well as various types of particle fluxes. The developed model is computationally efficient for large-scale parametric studies, exhibiting an average deviation of approximately 20%, which indicates reasonable predictive accuracy considering the model simplifications and the flow problem complexity. Its application may assist early-stage engineering design, pumping performance improvement, and operational planning for W7-X and other future fusion reactors.</description>
	<pubDate>2026-01-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 24: Development and Assessment of Simplified Conductance Models for the Particle Exhaust in Wendelstein 7-X</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/24">doi: 10.3390/computation14010024</a></p>
	<p>Authors:
		Foteini Litovoli
		Christos Tantos
		Volker Hauer
		Victoria Haak
		Dirk Naujoks
		Chandra-Prakash Dhard
		W7-X Team W7-X Team
		</p>
	<p>The particle exhaust system plays a pivotal role in fusion reactors and is essential for ensuring both the feasibility and sustained operation of the fusion reaction. For the successful development of such a system, density control is of great importance and some key design parameters include the neutral gas pressure and the resulting particle fluxes. This study presents a simplified conductance-based model for estimating neutral gas pressure distributions in the particle exhaust system of fusion reactors, focusing specifically on the sub-divertor region. In the proposed model, the pumping region is represented as an interconnected set of reservoirs and channels. Mass conservation and conductance relations, appropriate for all flow regimes, are applied. The model was benchmarked against complex 3D DIVGAS simulations across representative operating scenarios of the Wendelstein 7-X (W7-X) stellarator. Despite geometric simplifications, the model is capable of predicting pressure values at several key locations inside the particle exhaust area of W7-X, as well as various types of particle fluxes. The developed model is computationally efficient for large-scale parametric studies, exhibiting an average deviation of approximately 20%, which indicates reasonable predictive accuracy considering the model simplifications and the flow problem complexity. Its application may assist early-stage engineering design, pumping performance improvement, and operational planning for W7-X and other future fusion reactors.</p>
	]]></content:encoded>

	<dc:title>Development and Assessment of Simplified Conductance Models for the Particle Exhaust in Wendelstein 7-X</dc:title>
			<dc:creator>Foteini Litovoli</dc:creator>
			<dc:creator>Christos Tantos</dc:creator>
			<dc:creator>Volker Hauer</dc:creator>
			<dc:creator>Victoria Haak</dc:creator>
			<dc:creator>Dirk Naujoks</dc:creator>
			<dc:creator>Chandra-Prakash Dhard</dc:creator>
			<dc:creator>W7-X Team W7-X Team</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010024</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-19</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-19</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>24</prism:startingPage>
		<prism:doi>10.3390/computation14010024</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/24</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/23">

	<title>Computation, Vol. 14, Pages 23: Topology Optimisation of Heat Sinks Embedded with Phase-Change Material for Minimising Temperature Oscillations</title>
	<link>https://www.mdpi.com/2079-3197/14/1/23</link>
	<description>This study presents a gradient-based topology optimisation framework for heat sinks embedded with phase-change material (PCM) that targets the mitigation of temperature oscillations under cyclic thermal loads. The approach couples transient thermal diffusion modelling in FEniCS with automatic adjoint sensitivities and GCMMA, and uses a simple analytical homogenisation to parametrise a composite of PCM and conductive material. With latent-heat buffering using PCM, the optimised layouts reduce the temperature variance by 41% when the full time history is used and by 32% when only the quasi-steady-state cycle is used. To improve physical manufacturability, explicit penalisation yields near-discrete designs with only &amp;amp;sim;10% performance loss, preserving most oscillation reduction benefits. The results demonstrate that adjoint-driven PCM topology optimisation can systematically suppress thermal oscillations.</description>
	<pubDate>2026-01-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 23: Topology Optimisation of Heat Sinks Embedded with Phase-Change Material for Minimising Temperature Oscillations</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/23">doi: 10.3390/computation14010023</a></p>
	<p>Authors:
		Mark Bjerre Müller Christensen
		Joe Alexandersen
		</p>
	<p>This study presents a gradient-based topology optimisation framework for heat sinks embedded with phase-change material (PCM) that targets the mitigation of temperature oscillations under cyclic thermal loads. The approach couples transient thermal diffusion modelling in FEniCS with automatic adjoint sensitivities and GCMMA, and uses a simple analytical homogenisation to parametrise a composite of PCM and conductive material. With latent-heat buffering using PCM, the optimised layouts reduce the temperature variance by 41% when the full time history is used and by 32% when only the quasi-steady-state cycle is used. To improve physical manufacturability, explicit penalisation yields near-discrete designs with only &amp;amp;sim;10% performance loss, preserving most oscillation reduction benefits. The results demonstrate that adjoint-driven PCM topology optimisation can systematically suppress thermal oscillations.</p>
	]]></content:encoded>

	<dc:title>Topology Optimisation of Heat Sinks Embedded with Phase-Change Material for Minimising Temperature Oscillations</dc:title>
			<dc:creator>Mark Bjerre Müller Christensen</dc:creator>
			<dc:creator>Joe Alexandersen</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010023</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-16</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-16</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>23</prism:startingPage>
		<prism:doi>10.3390/computation14010023</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/23</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/22">

	<title>Computation, Vol. 14, Pages 22: The Health-Wealth Gradient in Labor Markets: Integrating Health, Insurance, and Social Metrics to Predict Employment Density</title>
	<link>https://www.mdpi.com/2079-3197/14/1/22</link>
	<description>Labor market forecasting relies heavily on economic time-series data, often overlooking the &amp;amp;ldquo;health&amp;amp;ndash;wealth&amp;amp;rdquo; gradient that links population health to workforce participation. This study develops a machine learning framework integrating non-traditional health and social metrics to predict state-level employment density. Methods: We constructed a multi-source longitudinal dataset (2014&amp;amp;ndash;2024) by aggregating county-level Quarterly Census of Employment and Wages (QCEW) data with County Health Rankings to the state level. Using a time-aware split to evaluate performance across the COVID-19 structural break, we compared LASSO, Random Forest, and regularized XGBoost models, employing SHAP values for interpretability. Results: The tuned, regularized XGBoost model achieved strong out-of-sample performance (Test R2 = 0.800). A leakage-safe stacked Ridge ensemble yielded comparable performance (Test R2 = 0.827), while preserving the interpretability of the underlying tree model used for SHAP analysis.</description>
	<pubDate>2026-01-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 22: The Health-Wealth Gradient in Labor Markets: Integrating Health, Insurance, and Social Metrics to Predict Employment Density</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/22">doi: 10.3390/computation14010022</a></p>
	<p>Authors:
		Dingyuan Liu
		Qiannan Shen
		Jiaci Liu
		</p>
	<p>Labor market forecasting relies heavily on economic time-series data, often overlooking the &amp;amp;ldquo;health&amp;amp;ndash;wealth&amp;amp;rdquo; gradient that links population health to workforce participation. This study develops a machine learning framework integrating non-traditional health and social metrics to predict state-level employment density. Methods: We constructed a multi-source longitudinal dataset (2014&amp;amp;ndash;2024) by aggregating county-level Quarterly Census of Employment and Wages (QCEW) data with County Health Rankings to the state level. Using a time-aware split to evaluate performance across the COVID-19 structural break, we compared LASSO, Random Forest, and regularized XGBoost models, employing SHAP values for interpretability. Results: The tuned, regularized XGBoost model achieved strong out-of-sample performance (Test R2 = 0.800). A leakage-safe stacked Ridge ensemble yielded comparable performance (Test R2 = 0.827), while preserving the interpretability of the underlying tree model used for SHAP analysis.</p>
	]]></content:encoded>

	<dc:title>The Health-Wealth Gradient in Labor Markets: Integrating Health, Insurance, and Social Metrics to Predict Employment Density</dc:title>
			<dc:creator>Dingyuan Liu</dc:creator>
			<dc:creator>Qiannan Shen</dc:creator>
			<dc:creator>Jiaci Liu</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010022</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-15</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-15</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>22</prism:startingPage>
		<prism:doi>10.3390/computation14010022</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/22</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/21">

	<title>Computation, Vol. 14, Pages 21: Solving the Synthesis Problem Self-Organizing Control System in the Class of Elliptical Accidents Optics for Objects with One Input and One Output</title>
	<link>https://www.mdpi.com/2079-3197/14/1/21</link>
	<description>Nonlinear single-input single-output (SISO) systems operating under parametric uncertainty often exhibit bifurcations, multistability, and deterministic chaos, which significantly limit the effectiveness of classical linear, adaptive, and switching control methods. This paper proposes a novel synthesis framework for self-organizing control systems based on catastrophe theory, specifically within the class of elliptic catastrophes. Unlike conventional approaches that stabilize a predefined system structure, the proposed method embeds the control law directly into a structurally stable catastrophe model, enabling autonomous bifurcation-driven transitions between stable equilibria. The synthesis procedure is formulated using a Lyapunov vector-function gradient&amp;amp;ndash;velocity method, which guarantees aperiodic robust stability under parametric uncertainty. The definiteness of the Lyapunov functions is established using Morse&amp;amp;rsquo;s lemma, providing a rigorous stability foundation. To support practical implementation, a data-driven parameter tuning mechanism based on self-organizing maps (SOM) is integrated, allowing adaptive adjustment of controller coefficients while preserving Lyapunov stability conditions. Simulation results demonstrate suppression of chaotic regimes, smooth bifurcation-induced transitions between stable operating modes, and improved transient performance compared to benchmark adaptive control schemes. The proposed framework provides a structurally robust alternative for controlling nonlinear systems in uncertain and dynamically changing environments.</description>
	<pubDate>2026-01-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 21: Solving the Synthesis Problem Self-Organizing Control System in the Class of Elliptical Accidents Optics for Objects with One Input and One Output</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/21">doi: 10.3390/computation14010021</a></p>
	<p>Authors:
		Maxot Rakhmetov
		Ainagul Adiyeva
		Balaussa Orazbayeva
		Shynar Yelezhanova
		Raigul Tuleuova
		Raushan Moldasheva
		</p>
	<p>Nonlinear single-input single-output (SISO) systems operating under parametric uncertainty often exhibit bifurcations, multistability, and deterministic chaos, which significantly limit the effectiveness of classical linear, adaptive, and switching control methods. This paper proposes a novel synthesis framework for self-organizing control systems based on catastrophe theory, specifically within the class of elliptic catastrophes. Unlike conventional approaches that stabilize a predefined system structure, the proposed method embeds the control law directly into a structurally stable catastrophe model, enabling autonomous bifurcation-driven transitions between stable equilibria. The synthesis procedure is formulated using a Lyapunov vector-function gradient&amp;amp;ndash;velocity method, which guarantees aperiodic robust stability under parametric uncertainty. The definiteness of the Lyapunov functions is established using Morse&amp;amp;rsquo;s lemma, providing a rigorous stability foundation. To support practical implementation, a data-driven parameter tuning mechanism based on self-organizing maps (SOM) is integrated, allowing adaptive adjustment of controller coefficients while preserving Lyapunov stability conditions. Simulation results demonstrate suppression of chaotic regimes, smooth bifurcation-induced transitions between stable operating modes, and improved transient performance compared to benchmark adaptive control schemes. The proposed framework provides a structurally robust alternative for controlling nonlinear systems in uncertain and dynamically changing environments.</p>
	]]></content:encoded>

	<dc:title>Solving the Synthesis Problem Self-Organizing Control System in the Class of Elliptical Accidents Optics for Objects with One Input and One Output</dc:title>
			<dc:creator>Maxot Rakhmetov</dc:creator>
			<dc:creator>Ainagul Adiyeva</dc:creator>
			<dc:creator>Balaussa Orazbayeva</dc:creator>
			<dc:creator>Shynar Yelezhanova</dc:creator>
			<dc:creator>Raigul Tuleuova</dc:creator>
			<dc:creator>Raushan Moldasheva</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010021</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-14</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-14</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>21</prism:startingPage>
		<prism:doi>10.3390/computation14010021</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/21</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/20">

	<title>Computation, Vol. 14, Pages 20: AFAD-MSA: Dataset and Models for Arabic Fake Audio Detection</title>
	<link>https://www.mdpi.com/2079-3197/14/1/20</link>
	<description>As generative speech synthesis produces near-human synthetic voices and reliance on online media grows, robust audio-deepfake detection is essential to fight misuse and misinformation. In this study, we introduce the Arabic Fake Audio Dataset for Modern Standard Arabic (AFAD-MSA), a curated corpus of authentic and synthetic Arabic speech designed to advance research on Arabic deepfake and spoofed-speech detection. The synthetic subset is generated with four state-of-the-art proprietary text-to-speech and voice-conversion models. Rich metadata&amp;amp;mdash;covering speaker attributes and generation information&amp;amp;mdash;is provided to support reproducibility and benchmarking. To establish reference performance, we trained three AASIST models and compared their performance to two baseline transformer detectors (Wav2Vec 2.0 and Whisper). On the AFAD-MSA test split, AASIST-2 achieved perfect accuracy, surpassing the baseline models. However, its performance declined under cross-dataset evaluation. These results underscore the importance of data construction. Detectors generalize best when exposed to diverse attack types. In addition, continual or contrastive training that interleaves bona fide speech with large, heterogeneous spoofed corpora will further improve detectors&amp;amp;rsquo; robustness.</description>
	<pubDate>2026-01-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 20: AFAD-MSA: Dataset and Models for Arabic Fake Audio Detection</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/20">doi: 10.3390/computation14010020</a></p>
	<p>Authors:
		Elsayed Issa
		</p>
	<p>As generative speech synthesis produces near-human synthetic voices and reliance on online media grows, robust audio-deepfake detection is essential to fight misuse and misinformation. In this study, we introduce the Arabic Fake Audio Dataset for Modern Standard Arabic (AFAD-MSA), a curated corpus of authentic and synthetic Arabic speech designed to advance research on Arabic deepfake and spoofed-speech detection. The synthetic subset is generated with four state-of-the-art proprietary text-to-speech and voice-conversion models. Rich metadata&amp;amp;mdash;covering speaker attributes and generation information&amp;amp;mdash;is provided to support reproducibility and benchmarking. To establish reference performance, we trained three AASIST models and compared their performance to two baseline transformer detectors (Wav2Vec 2.0 and Whisper). On the AFAD-MSA test split, AASIST-2 achieved perfect accuracy, surpassing the baseline models. However, its performance declined under cross-dataset evaluation. These results underscore the importance of data construction. Detectors generalize best when exposed to diverse attack types. In addition, continual or contrastive training that interleaves bona fide speech with large, heterogeneous spoofed corpora will further improve detectors&amp;amp;rsquo; robustness.</p>
	]]></content:encoded>

	<dc:title>AFAD-MSA: Dataset and Models for Arabic Fake Audio Detection</dc:title>
			<dc:creator>Elsayed Issa</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010020</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-14</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-14</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>20</prism:startingPage>
		<prism:doi>10.3390/computation14010020</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/20</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/19">

	<title>Computation, Vol. 14, Pages 19: Multifidelity Topology Design for Thermal&amp;ndash;Fluid Devices via SEMDOT Algorithm</title>
	<link>https://www.mdpi.com/2079-3197/14/1/19</link>
	<description>Designing thermal&amp;amp;ndash;fluid devices that reduce peak temperature while limiting pressure loss is challenging because high-fidelity (HF) Navier&amp;amp;ndash;Stokes&amp;amp;ndash;convection simulations make direct HF-driven topology optimization computationally expensive. This study presents a two-dimensional, steady, laminar multifidelity topology design framework for thermal&amp;amp;ndash;fluid devices operating in a low-to-moderate Reynolds number regime. A computationally efficient low-fidelity (LF) Darcy&amp;amp;ndash;convection model is used for topology optimization, where SEMDOT decouples geometric smoothness from the analysis field to produce CAD-ready boundaries. The LF optimization minimizes a P-norm aggregated temperature subject to a prescribed volume fraction constraint; the inlet&amp;amp;ndash;outlet pressure difference and the P-norm parameter are varied to generate a diverse candidate set. All candidates are then evaluated using a steady incompressible HF Navier&amp;amp;ndash;Stokes&amp;amp;ndash;convection model in COMSOL 6.3 under a consistent operating condition (fixed flow; pressure drop reported as an output). In representative single- and multi-channel case studies, SEMDOT designs reduce the HF peak temperature (e.g., ~337 K to ~323 K) while also reducing the pressure drop (e.g., ~18.7 Pa to ~12.6 Pa) relative to conventional straight-channel layouts under the same operating point. Compared with a conventional RAMP-based pipeline under the tested settings, the proposed approach yields a more favorable Pareto distribution (normalized hypervolume 1.000 vs. 0.923).</description>
	<pubDate>2026-01-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 19: Multifidelity Topology Design for Thermal&amp;ndash;Fluid Devices via SEMDOT Algorithm</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/19">doi: 10.3390/computation14010019</a></p>
	<p>Authors:
		Yiding Sun
		Yun-Fei Fu
		Shuzhi Xu
		Yifan Guo
		</p>
	<p>Designing thermal&amp;amp;ndash;fluid devices that reduce peak temperature while limiting pressure loss is challenging because high-fidelity (HF) Navier&amp;amp;ndash;Stokes&amp;amp;ndash;convection simulations make direct HF-driven topology optimization computationally expensive. This study presents a two-dimensional, steady, laminar multifidelity topology design framework for thermal&amp;amp;ndash;fluid devices operating in a low-to-moderate Reynolds number regime. A computationally efficient low-fidelity (LF) Darcy&amp;amp;ndash;convection model is used for topology optimization, where SEMDOT decouples geometric smoothness from the analysis field to produce CAD-ready boundaries. The LF optimization minimizes a P-norm aggregated temperature subject to a prescribed volume fraction constraint; the inlet&amp;amp;ndash;outlet pressure difference and the P-norm parameter are varied to generate a diverse candidate set. All candidates are then evaluated using a steady incompressible HF Navier&amp;amp;ndash;Stokes&amp;amp;ndash;convection model in COMSOL 6.3 under a consistent operating condition (fixed flow; pressure drop reported as an output). In representative single- and multi-channel case studies, SEMDOT designs reduce the HF peak temperature (e.g., ~337 K to ~323 K) while also reducing the pressure drop (e.g., ~18.7 Pa to ~12.6 Pa) relative to conventional straight-channel layouts under the same operating point. Compared with a conventional RAMP-based pipeline under the tested settings, the proposed approach yields a more favorable Pareto distribution (normalized hypervolume 1.000 vs. 0.923).</p>
	]]></content:encoded>

	<dc:title>Multifidelity Topology Design for Thermal&amp;amp;ndash;Fluid Devices via SEMDOT Algorithm</dc:title>
			<dc:creator>Yiding Sun</dc:creator>
			<dc:creator>Yun-Fei Fu</dc:creator>
			<dc:creator>Shuzhi Xu</dc:creator>
			<dc:creator>Yifan Guo</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010019</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-12</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-12</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>19</prism:startingPage>
		<prism:doi>10.3390/computation14010019</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/19</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/18">

	<title>Computation, Vol. 14, Pages 18: Finite Element Analysis of Stress and Displacement in the Distal Femur: A Comparative Study of Normal and Osteoarthritic Bone Under Knee Flexion</title>
	<link>https://www.mdpi.com/2079-3197/14/1/18</link>
	<description>Osteoarthritis (OA) is a progressive degenerative joint disease that fundamentally alters the mechanical environment of the knee. This study utilizes a finite element framework to evaluate the biomechanical response of the distal femur in healthy and osteoarthritic conditions across critical functional postures. To isolate the bone&amp;amp;rsquo;s inherent structural stiffness and avoid numerical artifacts, a free-body computational approach was implemented, omitting external surface fixations. The distal femur was modeled as a linearly elastic domain with material properties representing healthy tissue and OA-induced degradation. Simulations were performed under passive gravitational loading at knee flexion angles of 0&amp;amp;#8728;,60&amp;amp;#8728;, and 90&amp;amp;#8728;. The results demonstrate that the mechanical response is highly sensitive to postural orientation, with peak von Mises stress consistently occurring at 60&amp;amp;#8728; of flexion for both models. Quantitative analysis revealed that the stiffer Normal bone attracted significantly higher internal stress, with a reduction of over 30% in peak stress magnitude observed in the OA model at the most critical flexion angle. Total displacement magnitudes remained relatively stable across conditions, suggesting that OA-induced material softening primarily influences internal stress redistribution rather than global structural sag under passive loads. These findings provide a quantitative index of skeletal vulnerability, supporting the development of patient-specific orthopedic treatments and rehabilitation strategies.</description>
	<pubDate>2026-01-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 18: Finite Element Analysis of Stress and Displacement in the Distal Femur: A Comparative Study of Normal and Osteoarthritic Bone Under Knee Flexion</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/18">doi: 10.3390/computation14010018</a></p>
	<p>Authors:
		Kamonchat Trachoo
		Inthira Chaiya
		Din Prathumwan
		</p>
	<p>Osteoarthritis (OA) is a progressive degenerative joint disease that fundamentally alters the mechanical environment of the knee. This study utilizes a finite element framework to evaluate the biomechanical response of the distal femur in healthy and osteoarthritic conditions across critical functional postures. To isolate the bone&amp;amp;rsquo;s inherent structural stiffness and avoid numerical artifacts, a free-body computational approach was implemented, omitting external surface fixations. The distal femur was modeled as a linearly elastic domain with material properties representing healthy tissue and OA-induced degradation. Simulations were performed under passive gravitational loading at knee flexion angles of 0&amp;amp;#8728;,60&amp;amp;#8728;, and 90&amp;amp;#8728;. The results demonstrate that the mechanical response is highly sensitive to postural orientation, with peak von Mises stress consistently occurring at 60&amp;amp;#8728; of flexion for both models. Quantitative analysis revealed that the stiffer Normal bone attracted significantly higher internal stress, with a reduction of over 30% in peak stress magnitude observed in the OA model at the most critical flexion angle. Total displacement magnitudes remained relatively stable across conditions, suggesting that OA-induced material softening primarily influences internal stress redistribution rather than global structural sag under passive loads. These findings provide a quantitative index of skeletal vulnerability, supporting the development of patient-specific orthopedic treatments and rehabilitation strategies.</p>
	]]></content:encoded>

	<dc:title>Finite Element Analysis of Stress and Displacement in the Distal Femur: A Comparative Study of Normal and Osteoarthritic Bone Under Knee Flexion</dc:title>
			<dc:creator>Kamonchat Trachoo</dc:creator>
			<dc:creator>Inthira Chaiya</dc:creator>
			<dc:creator>Din Prathumwan</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010018</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-12</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-12</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>18</prism:startingPage>
		<prism:doi>10.3390/computation14010018</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/18</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/17">

	<title>Computation, Vol. 14, Pages 17: Approximate Analytical Solutions of Nonlinear Jerk Equations Using the Parameter Expansion Method</title>
	<link>https://www.mdpi.com/2079-3197/14/1/17</link>
	<description>The Parameter Expansion Method (PEM) is employed to study nonlinear Jerk equations, which are often difficult to solve because of their strong nonlinearity. This method provides higher accuracy and broader applicability, enabling analytical insights and closed-form approximations. This study explores the use of Prof. He&amp;amp;rsquo;s PEM to derive approximate analytical solutions of the nonlinear third-order Jerk equation, this model is commonly encountered in the analysis of complex dynamical systems across physics and engineering. Owing to the strong nonlinearity inherent in Jerk equations, exact solutions are often unattainable. The PEM provides a simple, effective framework by expanding the solution with respect to an embedding parameter, allowing accurate approximations without the need of small parameters or linearization. The method&amp;amp;rsquo;s reliability and precision are validated through comparisons with numerical simulations, demonstrating its practicality and robustness in tackling nonlinear problems. The results indicate that PEM provides highly accurate approximations of nonlinear Jerk equation, showcasing greater simplicity and efficiency relative to other analytical methods, along with excellent concordance with numerical simulations. Additionally, the nonlinear Jerk equation demonstrates exact approximate solutions via PEM, closely mirroring numerical results and surpassing several contemporary analytical techniques in efficiency and usability. Furthermore, the study indicates that PEM is a straightforward and effective approach in solving nonlinear Jerk equation. It generates accurate estimates that nearly align with numerical simulations and surpass numerous other analytical methods.</description>
	<pubDate>2026-01-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 17: Approximate Analytical Solutions of Nonlinear Jerk Equations Using the Parameter Expansion Method</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/17">doi: 10.3390/computation14010017</a></p>
	<p>Authors:
		Gamal M. Ismail
		Galal M. Moatimid
		Stylianos V. Kontomaris
		</p>
	<p>The Parameter Expansion Method (PEM) is employed to study nonlinear Jerk equations, which are often difficult to solve because of their strong nonlinearity. This method provides higher accuracy and broader applicability, enabling analytical insights and closed-form approximations. This study explores the use of Prof. He&amp;amp;rsquo;s PEM to derive approximate analytical solutions of the nonlinear third-order Jerk equation, this model is commonly encountered in the analysis of complex dynamical systems across physics and engineering. Owing to the strong nonlinearity inherent in Jerk equations, exact solutions are often unattainable. The PEM provides a simple, effective framework by expanding the solution with respect to an embedding parameter, allowing accurate approximations without the need of small parameters or linearization. The method&amp;amp;rsquo;s reliability and precision are validated through comparisons with numerical simulations, demonstrating its practicality and robustness in tackling nonlinear problems. The results indicate that PEM provides highly accurate approximations of nonlinear Jerk equation, showcasing greater simplicity and efficiency relative to other analytical methods, along with excellent concordance with numerical simulations. Additionally, the nonlinear Jerk equation demonstrates exact approximate solutions via PEM, closely mirroring numerical results and surpassing several contemporary analytical techniques in efficiency and usability. Furthermore, the study indicates that PEM is a straightforward and effective approach in solving nonlinear Jerk equation. It generates accurate estimates that nearly align with numerical simulations and surpass numerous other analytical methods.</p>
	]]></content:encoded>

	<dc:title>Approximate Analytical Solutions of Nonlinear Jerk Equations Using the Parameter Expansion Method</dc:title>
			<dc:creator>Gamal M. Ismail</dc:creator>
			<dc:creator>Galal M. Moatimid</dc:creator>
			<dc:creator>Stylianos V. Kontomaris</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010017</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-12</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-12</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>17</prism:startingPage>
		<prism:doi>10.3390/computation14010017</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/17</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/16">

	<title>Computation, Vol. 14, Pages 16: Linguistic Influence on Multidimensional Word Embeddings: Analysis of Ten Languages</title>
	<link>https://www.mdpi.com/2079-3197/14/1/16</link>
	<description>Understanding how linguistic typology shapes multilingual embeddings is important for cross-lingual NLP. We examine static MUSE word embedding for ten diverse languages (English, Russian, Chinese, Arabic, Indonesian, German, Lithuanian, Hindi, Tajik and Persian). Using pairwise cosine distances, Random Forest classification, and UMAP visualization, we find that language identity and script type largely determine embedding clusters, with morphological complexity affecting cluster compactness and lexical overlap connecting clusters. The Random Forest model predicts language labels with high accuracy (&amp;amp;asymp;98%), indicating strong language-specific patterns in embedding space. These results highlight script, morphology, and lexicon as key factors influencing multilingual embedding structures, informing linguistically aware design of cross-lingual models.</description>
	<pubDate>2026-01-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 16: Linguistic Influence on Multidimensional Word Embeddings: Analysis of Ten Languages</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/16">doi: 10.3390/computation14010016</a></p>
	<p>Authors:
		Anna V. Aleshina
		Andrey L. Bulgakov
		Yanliang Xin
		Larisa S. Skrebkova
		</p>
	<p>Understanding how linguistic typology shapes multilingual embeddings is important for cross-lingual NLP. We examine static MUSE word embedding for ten diverse languages (English, Russian, Chinese, Arabic, Indonesian, German, Lithuanian, Hindi, Tajik and Persian). Using pairwise cosine distances, Random Forest classification, and UMAP visualization, we find that language identity and script type largely determine embedding clusters, with morphological complexity affecting cluster compactness and lexical overlap connecting clusters. The Random Forest model predicts language labels with high accuracy (&amp;amp;asymp;98%), indicating strong language-specific patterns in embedding space. These results highlight script, morphology, and lexicon as key factors influencing multilingual embedding structures, informing linguistically aware design of cross-lingual models.</p>
	]]></content:encoded>

	<dc:title>Linguistic Influence on Multidimensional Word Embeddings: Analysis of Ten Languages</dc:title>
			<dc:creator>Anna V. Aleshina</dc:creator>
			<dc:creator>Andrey L. Bulgakov</dc:creator>
			<dc:creator>Yanliang Xin</dc:creator>
			<dc:creator>Larisa S. Skrebkova</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010016</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-09</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-09</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Communication</prism:section>
	<prism:startingPage>16</prism:startingPage>
		<prism:doi>10.3390/computation14010016</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/16</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/15">

	<title>Computation, Vol. 14, Pages 15: Numerical Simulation of Diffusion in Cylindrical Pores: The Influence of Pore Radius on Particle Capture Kinetics</title>
	<link>https://www.mdpi.com/2079-3197/14/1/15</link>
	<description>The diffusion and trapping of particles in complex porous media are fundamental processes in materials science and bioengineering. This study systematically investigates the influence of pore radius on particle capture kinetics within a three-dimensional cylindrical pore containing randomly distributed absorbing traps. Numerical simulations were performed for a wide range of pore radii (from 3a to 81a, a is a minimal length of the problem, arbitrary unit) and trap concentrations M (from 100 to 5090, these numbers are determined by the pore geometry) using a random walk algorithm. The particle lifetime (&amp;amp;tau;), characterizing the capture rate, was calculated and analyzed. Results reveal three distinct capture regimes dependent on trap concentration: a diffusion-limited regime at low concentration M (&amp;amp;lt;1000), a transition regime at medium M (1000 &amp;amp;lt; M &amp;amp;lt; 2000), and a trap-density-dominated saturation regime at high M (&amp;amp;gt;2000). For each regime, optimal approximating functions for &amp;amp;tau;(M) were identified. Furthermore, empirical relationships between the approximating coefficients and the pore radius were derived, which enable the prediction of particle lifetimes. The findings demonstrate that while the pore radius significantly impacts capture kinetics at low trap densities, its influence diminishes as trap concentration increases, converging towards a universal behavior dominated by trap density.</description>
	<pubDate>2026-01-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 15: Numerical Simulation of Diffusion in Cylindrical Pores: The Influence of Pore Radius on Particle Capture Kinetics</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/15">doi: 10.3390/computation14010015</a></p>
	<p>Authors:
		Valeriy E. Arkhincheev
		Bair V. Khabituev
		Daniil F. Deriugin
		Stanislav P. Maltsev
		</p>
	<p>The diffusion and trapping of particles in complex porous media are fundamental processes in materials science and bioengineering. This study systematically investigates the influence of pore radius on particle capture kinetics within a three-dimensional cylindrical pore containing randomly distributed absorbing traps. Numerical simulations were performed for a wide range of pore radii (from 3a to 81a, a is a minimal length of the problem, arbitrary unit) and trap concentrations M (from 100 to 5090, these numbers are determined by the pore geometry) using a random walk algorithm. The particle lifetime (&amp;amp;tau;), characterizing the capture rate, was calculated and analyzed. Results reveal three distinct capture regimes dependent on trap concentration: a diffusion-limited regime at low concentration M (&amp;amp;lt;1000), a transition regime at medium M (1000 &amp;amp;lt; M &amp;amp;lt; 2000), and a trap-density-dominated saturation regime at high M (&amp;amp;gt;2000). For each regime, optimal approximating functions for &amp;amp;tau;(M) were identified. Furthermore, empirical relationships between the approximating coefficients and the pore radius were derived, which enable the prediction of particle lifetimes. The findings demonstrate that while the pore radius significantly impacts capture kinetics at low trap densities, its influence diminishes as trap concentration increases, converging towards a universal behavior dominated by trap density.</p>
	]]></content:encoded>

	<dc:title>Numerical Simulation of Diffusion in Cylindrical Pores: The Influence of Pore Radius on Particle Capture Kinetics</dc:title>
			<dc:creator>Valeriy E. Arkhincheev</dc:creator>
			<dc:creator>Bair V. Khabituev</dc:creator>
			<dc:creator>Daniil F. Deriugin</dc:creator>
			<dc:creator>Stanislav P. Maltsev</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010015</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-08</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-08</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>15</prism:startingPage>
		<prism:doi>10.3390/computation14010015</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/15</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/14">

	<title>Computation, Vol. 14, Pages 14: Ab Initio Computational Investigations of Low-Lying Electronic States of Yttrium Lithide and Scandium Lithide</title>
	<link>https://www.mdpi.com/2079-3197/14/1/14</link>
	<description>Ab initio studies using CASSCF/MRCI calculations have been performed to investigate the spectroscopic properties of YLi and ScLi molecules. Our calculations have computed 25 singlet and triplet states for YLi and 37 electronic states for ScLi. The lowest lying states, including the ground state 1&amp;amp;sum;+ of YLi, have been investigated for the first time. The spin&amp;amp;ndash;orbit coupling in YLi has also been assessed from the splitting between &amp;amp;Omega; components generated from the lowest triplet lying &amp;amp;Lambda;&amp;amp;ndash;S states. Regarding ScLi, the ground state is found to be the (1)3&amp;amp;Delta; state. Spectroscopic constants, energy levels at equilibrium, permanent dipole moments, and transition dipole moments have also been calculated. The potential energy curves for all calculated states have been displayed to large bond internuclear distances. In both ScLi and YLi, the potential energy curves have shown a small dissociation energy for the lowest states (1) 1,3&amp;amp;Delta;, (1) 1,3&amp;amp;Pi; and (1) 1,3&amp;amp;sum;+.</description>
	<pubDate>2026-01-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 14: Ab Initio Computational Investigations of Low-Lying Electronic States of Yttrium Lithide and Scandium Lithide</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/14">doi: 10.3390/computation14010014</a></p>
	<p>Authors:
		Jean Tabet
		Nancy Zgheib
		Sylvie Magnier
		Fadia Taher
		</p>
	<p>Ab initio studies using CASSCF/MRCI calculations have been performed to investigate the spectroscopic properties of YLi and ScLi molecules. Our calculations have computed 25 singlet and triplet states for YLi and 37 electronic states for ScLi. The lowest lying states, including the ground state 1&amp;amp;sum;+ of YLi, have been investigated for the first time. The spin&amp;amp;ndash;orbit coupling in YLi has also been assessed from the splitting between &amp;amp;Omega; components generated from the lowest triplet lying &amp;amp;Lambda;&amp;amp;ndash;S states. Regarding ScLi, the ground state is found to be the (1)3&amp;amp;Delta; state. Spectroscopic constants, energy levels at equilibrium, permanent dipole moments, and transition dipole moments have also been calculated. The potential energy curves for all calculated states have been displayed to large bond internuclear distances. In both ScLi and YLi, the potential energy curves have shown a small dissociation energy for the lowest states (1) 1,3&amp;amp;Delta;, (1) 1,3&amp;amp;Pi; and (1) 1,3&amp;amp;sum;+.</p>
	]]></content:encoded>

	<dc:title>Ab Initio Computational Investigations of Low-Lying Electronic States of Yttrium Lithide and Scandium Lithide</dc:title>
			<dc:creator>Jean Tabet</dc:creator>
			<dc:creator>Nancy Zgheib</dc:creator>
			<dc:creator>Sylvie Magnier</dc:creator>
			<dc:creator>Fadia Taher</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010014</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-08</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-08</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>14</prism:startingPage>
		<prism:doi>10.3390/computation14010014</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/14</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/13">

	<title>Computation, Vol. 14, Pages 13: Numerical Error Analysis of the Poisson Equation Under RHS Inaccuracies in Particle-in-Cell Simulations</title>
	<link>https://www.mdpi.com/2079-3197/14/1/13</link>
	<description>Particle-in-Cell (PIC) simulations require accurate solutions of the electrostatic Poisson equation, yet accuracy often degrades near irregular Dirichlet boundaries on Cartesian meshes. While prior work has focused on left-hand-side (LHS) discretization errors, the impact of right-hand-side (RHS) inaccuracies arising from charge deposition near boundaries remains largely unexplored. This study analyzes numerical errors induced by underestimated RHS values at near-boundary nodes when using embedded finite difference schemes with linear and quadratic boundary treatments. Analytical results in one dimension and truncation error analyses in two dimensions show that RHS inaccuracies affect the two schemes in fundamentally different ways: They reduce boundary-induced errors in the linear scheme but introduce zeroth-order truncation errors in the quadratic scheme, leading to larger global errors. Numerical experiments in one, two, and three dimensions confirm these predictions. In two-dimensional tests, RHS inaccuracies reduce the L&amp;amp;infin; error of the linear scheme by a factor of 2&amp;amp;ndash;3, while increasing the quadratic-scheme error by several times, and in some cases by nearly an order of magnitude, with both schemes retaining second-order global convergence. A simple &amp;amp;delta;&amp;amp;macr;-based RHS calibration is proposed and shown to effectively restore the accuracy of the quadratic scheme.</description>
	<pubDate>2026-01-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 13: Numerical Error Analysis of the Poisson Equation Under RHS Inaccuracies in Particle-in-Cell Simulations</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/13">doi: 10.3390/computation14010013</a></p>
	<p>Authors:
		Kai Zhang
		Tao Xiao
		Weizong Wang
		Bijiao He
		</p>
	<p>Particle-in-Cell (PIC) simulations require accurate solutions of the electrostatic Poisson equation, yet accuracy often degrades near irregular Dirichlet boundaries on Cartesian meshes. While prior work has focused on left-hand-side (LHS) discretization errors, the impact of right-hand-side (RHS) inaccuracies arising from charge deposition near boundaries remains largely unexplored. This study analyzes numerical errors induced by underestimated RHS values at near-boundary nodes when using embedded finite difference schemes with linear and quadratic boundary treatments. Analytical results in one dimension and truncation error analyses in two dimensions show that RHS inaccuracies affect the two schemes in fundamentally different ways: They reduce boundary-induced errors in the linear scheme but introduce zeroth-order truncation errors in the quadratic scheme, leading to larger global errors. Numerical experiments in one, two, and three dimensions confirm these predictions. In two-dimensional tests, RHS inaccuracies reduce the L&amp;amp;infin; error of the linear scheme by a factor of 2&amp;amp;ndash;3, while increasing the quadratic-scheme error by several times, and in some cases by nearly an order of magnitude, with both schemes retaining second-order global convergence. A simple &amp;amp;delta;&amp;amp;macr;-based RHS calibration is proposed and shown to effectively restore the accuracy of the quadratic scheme.</p>
	]]></content:encoded>

	<dc:title>Numerical Error Analysis of the Poisson Equation Under RHS Inaccuracies in Particle-in-Cell Simulations</dc:title>
			<dc:creator>Kai Zhang</dc:creator>
			<dc:creator>Tao Xiao</dc:creator>
			<dc:creator>Weizong Wang</dc:creator>
			<dc:creator>Bijiao He</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010013</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-07</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-07</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>13</prism:startingPage>
		<prism:doi>10.3390/computation14010013</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/13</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/12">

	<title>Computation, Vol. 14, Pages 12: A Physics-Informed Neural Network Aided Venturi&amp;ndash;Microwave Co-Sensing Method for Three-Phase Metering</title>
	<link>https://www.mdpi.com/2079-3197/14/1/12</link>
	<description>Addressing the challenges of online measurement of oil-gas-water three-phase flow under high gas&amp;amp;ndash;liquid ratio (GVF &amp;amp;gt; 90%) conditions (fire-driven mining, gas injection mining, natural gas mining), which rely heavily on radioactive sources, this study proposes an integrated, radiation-source-free three-phase measurement scheme utilizing a &amp;amp;ldquo;Venturi tube-microwave resonator&amp;amp;rdquo;. Additionally, a physics-informed neural network (PINN) is introduced to predict the volumetric flow rate of oil-gas-water three-phase flow. Methodologically, the main features are the Venturi differential pressure signal (&amp;amp;Delta;P) and microwave resonance amplitude (V). A PINN model is constructed by embedding an improved L-M model, a cross-sectional water content model, and physical constraint equations into the loss function, thereby maintaining physical consistency and generalization ability under small sample sizes and across different operating conditions. Through experiments on oil-gas-water three-phase flow, the PINN model is compared with an artificial neural network (ANN) and a support vector machine (SVM). The results showed that under high gas&amp;amp;ndash;liquid ratio conditions (GVF &amp;amp;gt; 90%), the relative errors (REL) of PINN in predicting the volumetric flow rates of oil, gas, and water were 0.1865, 0.0397, and 0.0619, respectively, which were better than ANN and SVM, and the output met physical constraints. The results indicate that under current laboratory conditions and working conditions, the PINN model has good performance in predicting the flow rate of oil-gas-water three-phase flow. However, in order to apply it to the field in the future, experiments with a wider range of working conditions and long-term stability testing should be conducted. This study provides a new technological solution for developing three-phase measurement and machine learning models that are radiation-free, real-time, and engineering-feasible.</description>
	<pubDate>2026-01-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 12: A Physics-Informed Neural Network Aided Venturi&amp;ndash;Microwave Co-Sensing Method for Three-Phase Metering</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/12">doi: 10.3390/computation14010012</a></p>
	<p>Authors:
		Jinhua Tan
		Yuxiao Yuan
		Ying Xu
		Jingya Wang
		Zirui Song
		Rongji Zuo
		Zhengyang Chen
		Chao Yuan
		</p>
	<p>Addressing the challenges of online measurement of oil-gas-water three-phase flow under high gas&amp;amp;ndash;liquid ratio (GVF &amp;amp;gt; 90%) conditions (fire-driven mining, gas injection mining, natural gas mining), which rely heavily on radioactive sources, this study proposes an integrated, radiation-source-free three-phase measurement scheme utilizing a &amp;amp;ldquo;Venturi tube-microwave resonator&amp;amp;rdquo;. Additionally, a physics-informed neural network (PINN) is introduced to predict the volumetric flow rate of oil-gas-water three-phase flow. Methodologically, the main features are the Venturi differential pressure signal (&amp;amp;Delta;P) and microwave resonance amplitude (V). A PINN model is constructed by embedding an improved L-M model, a cross-sectional water content model, and physical constraint equations into the loss function, thereby maintaining physical consistency and generalization ability under small sample sizes and across different operating conditions. Through experiments on oil-gas-water three-phase flow, the PINN model is compared with an artificial neural network (ANN) and a support vector machine (SVM). The results showed that under high gas&amp;amp;ndash;liquid ratio conditions (GVF &amp;amp;gt; 90%), the relative errors (REL) of PINN in predicting the volumetric flow rates of oil, gas, and water were 0.1865, 0.0397, and 0.0619, respectively, which were better than ANN and SVM, and the output met physical constraints. The results indicate that under current laboratory conditions and working conditions, the PINN model has good performance in predicting the flow rate of oil-gas-water three-phase flow. However, in order to apply it to the field in the future, experiments with a wider range of working conditions and long-term stability testing should be conducted. This study provides a new technological solution for developing three-phase measurement and machine learning models that are radiation-free, real-time, and engineering-feasible.</p>
	]]></content:encoded>

	<dc:title>A Physics-Informed Neural Network Aided Venturi&amp;amp;ndash;Microwave Co-Sensing Method for Three-Phase Metering</dc:title>
			<dc:creator>Jinhua Tan</dc:creator>
			<dc:creator>Yuxiao Yuan</dc:creator>
			<dc:creator>Ying Xu</dc:creator>
			<dc:creator>Jingya Wang</dc:creator>
			<dc:creator>Zirui Song</dc:creator>
			<dc:creator>Rongji Zuo</dc:creator>
			<dc:creator>Zhengyang Chen</dc:creator>
			<dc:creator>Chao Yuan</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010012</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-05</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-05</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>12</prism:startingPage>
		<prism:doi>10.3390/computation14010012</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/12</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/11">

	<title>Computation, Vol. 14, Pages 11: A Hybrid Gradient-Based Optimiser for Solving Complex Engineering Design Problems</title>
	<link>https://www.mdpi.com/2079-3197/14/1/11</link>
	<description>This paper proposes JADEGBO, a hybrid gradient-based metaheuristic for solving complex single- and multi-constraint engineering design problems as well as cost-sensitive security optimisation tasks. The method combines Adaptive Differential Evolution with Optional External Archive (JADE), which provides self-adaptive exploration through p-best mutation, an external archive, and success-based parameter learning, with the Gradient-Based Optimiser (GBO), which contributes Newton-inspired gradient search rules and a local escaping operator. In the proposed scheme, JADE is first employed to discover promising regions of the search space, after which GBO performs an intensified local refinement of the best individuals inherited from JADE. The performance of JADEGBO is assessed on the CEC2017 single-objective benchmark suite and compared against a broad set of classical and recent metaheuristics. Statistical indicators, convergence curves, box plots, histograms, sensitivity analyses, and scatter plots show that the hybrid typically attains the best or near-best mean fitness, exhibits low run-to-run variance, and maintains a favourable balance between exploration and exploitation across rotated, shifted, and composite landscapes. To demonstrate practical relevance, JADEGBO is further applied to the following four well-known constrained engineering design problems: welded beam, pressure vessel, speed reducer, and three-bar truss design. The algorithm consistently produces feasible high-quality designs and closely matches or improves upon the best reported results while keeping computation time competitive.</description>
	<pubDate>2026-01-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 11: A Hybrid Gradient-Based Optimiser for Solving Complex Engineering Design Problems</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/11">doi: 10.3390/computation14010011</a></p>
	<p>Authors:
		Jamal Zraqou
		Riyad Alrousan
		Zaid Khrisat
		Faten Hamad
		Niveen Halalsheh
		Hussam Fakhouri
		</p>
	<p>This paper proposes JADEGBO, a hybrid gradient-based metaheuristic for solving complex single- and multi-constraint engineering design problems as well as cost-sensitive security optimisation tasks. The method combines Adaptive Differential Evolution with Optional External Archive (JADE), which provides self-adaptive exploration through p-best mutation, an external archive, and success-based parameter learning, with the Gradient-Based Optimiser (GBO), which contributes Newton-inspired gradient search rules and a local escaping operator. In the proposed scheme, JADE is first employed to discover promising regions of the search space, after which GBO performs an intensified local refinement of the best individuals inherited from JADE. The performance of JADEGBO is assessed on the CEC2017 single-objective benchmark suite and compared against a broad set of classical and recent metaheuristics. Statistical indicators, convergence curves, box plots, histograms, sensitivity analyses, and scatter plots show that the hybrid typically attains the best or near-best mean fitness, exhibits low run-to-run variance, and maintains a favourable balance between exploration and exploitation across rotated, shifted, and composite landscapes. To demonstrate practical relevance, JADEGBO is further applied to the following four well-known constrained engineering design problems: welded beam, pressure vessel, speed reducer, and three-bar truss design. The algorithm consistently produces feasible high-quality designs and closely matches or improves upon the best reported results while keeping computation time competitive.</p>
	]]></content:encoded>

	<dc:title>A Hybrid Gradient-Based Optimiser for Solving Complex Engineering Design Problems</dc:title>
			<dc:creator>Jamal Zraqou</dc:creator>
			<dc:creator>Riyad Alrousan</dc:creator>
			<dc:creator>Zaid Khrisat</dc:creator>
			<dc:creator>Faten Hamad</dc:creator>
			<dc:creator>Niveen Halalsheh</dc:creator>
			<dc:creator>Hussam Fakhouri</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010011</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-04</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-04</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>11</prism:startingPage>
		<prism:doi>10.3390/computation14010011</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/11</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/10">

	<title>Computation, Vol. 14, Pages 10: Do LLMs Speak BPMN? An Evaluation of Their Process Modeling Capabilities Based on Quality Measures</title>
	<link>https://www.mdpi.com/2079-3197/14/1/10</link>
	<description>Large Language Models (LLMs) are emerging as powerful tools for automating business process modeling, promising to streamline the translation of textual process descriptions into Business Process Model and Notation (BPMN) diagrams. However, the extent to which these Al systems can produce high-quality BPMN models has not yet been rigorously evaluated. This paper presents an early evaluation of five LLM-powered BPMN generation tools that automatically convert textual process descriptions into BPMN models. To assess the external quality of these Al-generated models, we introduce a novel structured evaluation framework that scores each BPMN diagram across three key process model quality dimensions: clarity, correctness, and completeness, covering both accuracy and diagram understandability. Using this framework, we conducted experiments where each tool was tasked with modeling the same set of textual process scenarios, and the resulting diagrams were systematically scored based on the criteria. This approach provides a consistent and repeatable evaluation procedure and offers a new lens for comparing LLM-based modeling capabilities. Given the focused scope of the study, the results should be interpreted as an exploratory benchmark that surfaces initial observations about tool performance rather than definitive conclusions. Our findings reveal that while current LLM-based tools can produce BPMN diagrams that capture the main elements of a process description, they often exhibit errors such as missing steps, inconsistent logic, or modeling rule violations, highlighting limitations in achieving fully correct and complete models. The clarity and readability of the generated diagrams also vary, indicating that these Al models are still maturing in generating easily interpretable process flows. We conclude that although LLMs show promise in automating BPMN modeling, significant improvements are needed for them to consistently generate both syntactically and semantically valid process models.</description>
	<pubDate>2026-01-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 10: Do LLMs Speak BPMN? An Evaluation of Their Process Modeling Capabilities Based on Quality Measures</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/10">doi: 10.3390/computation14010010</a></p>
	<p>Authors:
		Panagiotis Drakopoulos
		Panagiotis Malousoudis
		Nikolaos Nousias
		George Tsakalidis
		Kostas Vergidis
		</p>
	<p>Large Language Models (LLMs) are emerging as powerful tools for automating business process modeling, promising to streamline the translation of textual process descriptions into Business Process Model and Notation (BPMN) diagrams. However, the extent to which these Al systems can produce high-quality BPMN models has not yet been rigorously evaluated. This paper presents an early evaluation of five LLM-powered BPMN generation tools that automatically convert textual process descriptions into BPMN models. To assess the external quality of these Al-generated models, we introduce a novel structured evaluation framework that scores each BPMN diagram across three key process model quality dimensions: clarity, correctness, and completeness, covering both accuracy and diagram understandability. Using this framework, we conducted experiments where each tool was tasked with modeling the same set of textual process scenarios, and the resulting diagrams were systematically scored based on the criteria. This approach provides a consistent and repeatable evaluation procedure and offers a new lens for comparing LLM-based modeling capabilities. Given the focused scope of the study, the results should be interpreted as an exploratory benchmark that surfaces initial observations about tool performance rather than definitive conclusions. Our findings reveal that while current LLM-based tools can produce BPMN diagrams that capture the main elements of a process description, they often exhibit errors such as missing steps, inconsistent logic, or modeling rule violations, highlighting limitations in achieving fully correct and complete models. The clarity and readability of the generated diagrams also vary, indicating that these Al models are still maturing in generating easily interpretable process flows. We conclude that although LLMs show promise in automating BPMN modeling, significant improvements are needed for them to consistently generate both syntactically and semantically valid process models.</p>
	]]></content:encoded>

	<dc:title>Do LLMs Speak BPMN? An Evaluation of Their Process Modeling Capabilities Based on Quality Measures</dc:title>
			<dc:creator>Panagiotis Drakopoulos</dc:creator>
			<dc:creator>Panagiotis Malousoudis</dc:creator>
			<dc:creator>Nikolaos Nousias</dc:creator>
			<dc:creator>George Tsakalidis</dc:creator>
			<dc:creator>Kostas Vergidis</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010010</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-04</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-04</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>10</prism:startingPage>
		<prism:doi>10.3390/computation14010010</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/10</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/9">

	<title>Computation, Vol. 14, Pages 9: A Consumer Digital Twin for Energy Demand Prediction: Development and Implementation Under the SENDER Project (HORIZON 2020)</title>
	<link>https://www.mdpi.com/2079-3197/14/1/9</link>
	<description>This paper presents the development and implementation of a consumer Digital Twin (DT) for energy demand prediction under the SENDER (Sustainable Consumer Engagement and Demand Response) project, funded by HORIZON 2020. This project aims to engage consumers in the energy sector with innovative energy service applications to achieve proactive Demand Response (DR) and optimized usage of Renewable Energy Sources (RES). The proposed DT model is designed to digitally represent occupant behaviors and energy consumption patterns using Artificial Neural Networks (ANN), which enable continuous learning by processing real-time and historical data in different pilot sites and seasons. The DT development incorporates the International Energy Agency (IEA)&amp;amp;mdash;Energy in Buildings and Communities (EBC) Annex 66 and Drivers-Needs-Actions-Systems (DNAS) framework to standardize occupant behavior modeling. The research methodology consists of the following steps: (i) a mock-up simulation environment for three pilot sites was created, (ii) the DT was trained and calibrated using the artificial data from the previous step, and (iii) the DT model was validated with real data from the Alginet pilot site in Spain. Results showed a strong correlation between DT predictions and mock-up data, with a maximum deviation of &amp;amp;plusmn;2%. Finally, a set of selected Key Performance Indicators (KPIs) was defined and categorized in order to evaluate the system&amp;amp;rsquo;s technical effectiveness.</description>
	<pubDate>2026-01-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 9: A Consumer Digital Twin for Energy Demand Prediction: Development and Implementation Under the SENDER Project (HORIZON 2020)</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/9">doi: 10.3390/computation14010009</a></p>
	<p>Authors:
		Dimitra Douvi
		Eleni Douvi
		Jason Tsahalis
		Haralabos-Theodoros Tsahalis
		</p>
	<p>This paper presents the development and implementation of a consumer Digital Twin (DT) for energy demand prediction under the SENDER (Sustainable Consumer Engagement and Demand Response) project, funded by HORIZON 2020. This project aims to engage consumers in the energy sector with innovative energy service applications to achieve proactive Demand Response (DR) and optimized usage of Renewable Energy Sources (RES). The proposed DT model is designed to digitally represent occupant behaviors and energy consumption patterns using Artificial Neural Networks (ANN), which enable continuous learning by processing real-time and historical data in different pilot sites and seasons. The DT development incorporates the International Energy Agency (IEA)&amp;amp;mdash;Energy in Buildings and Communities (EBC) Annex 66 and Drivers-Needs-Actions-Systems (DNAS) framework to standardize occupant behavior modeling. The research methodology consists of the following steps: (i) a mock-up simulation environment for three pilot sites was created, (ii) the DT was trained and calibrated using the artificial data from the previous step, and (iii) the DT model was validated with real data from the Alginet pilot site in Spain. Results showed a strong correlation between DT predictions and mock-up data, with a maximum deviation of &amp;amp;plusmn;2%. Finally, a set of selected Key Performance Indicators (KPIs) was defined and categorized in order to evaluate the system&amp;amp;rsquo;s technical effectiveness.</p>
	]]></content:encoded>

	<dc:title>A Consumer Digital Twin for Energy Demand Prediction: Development and Implementation Under the SENDER Project (HORIZON 2020)</dc:title>
			<dc:creator>Dimitra Douvi</dc:creator>
			<dc:creator>Eleni Douvi</dc:creator>
			<dc:creator>Jason Tsahalis</dc:creator>
			<dc:creator>Haralabos-Theodoros Tsahalis</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010009</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-03</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-03</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Communication</prism:section>
	<prism:startingPage>9</prism:startingPage>
		<prism:doi>10.3390/computation14010009</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/9</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/8">

	<title>Computation, Vol. 14, Pages 8: Attention Bidirectional Recurrent Neural Zero-Shot Semantic Classifier for Emotional Footprint Identification</title>
	<link>https://www.mdpi.com/2079-3197/14/1/8</link>
	<description>Exploring emotions in organization settings, particularly in feedback on organizational welfare programs, is critical for understanding employee experiences and enhancing organizational policies. Recognizing emotions from a conversation (i.e., leaving an emotional footprint) is a predominant task for a machine to comprehend the full context of the conversation. While fine-tuning of pre-trained models has invariably provided state-of-the-art results in emotion footprint recognition tasks, the prospect of a zero-shot learned model in this sphere is, on the whole, unexplored. The objective here remains to identify the emotional footprint of the members participating in the conversation after the conversation is over with improved accuracy, time and minimal error rate. To address these gaps, in this work, a method called Attention Bidirectional Recurrent Neural Zero-Shot Semantic Classifier (ABRN-ZSSC) for emotional footprint identification is proposed. The ABRN-ZSSC for emotional footprint identification is split into two sections. First, the raw data from a Two-Party Conversation with Emotional Footprint and Emotional Intensity are subjected to the Attention Bidirectional Recurrent Neural Network model with the intent of identifying the emotional footprint for each party near the conclusion of the conversation and, second, with the identified emotional footprint in a conversation. The Zero-Shot Learning-based classifier is applied to train and classify emotions both accurately and precisely. We verify the utility of these approaches (i.e., emotional footprint identification and classification) by performing an extensive experimental evaluation on two corpora on four aspects, training time, accuracy, precision, and error rate for varying samples. Experimental results demonstrate that the ABRN-ZSSC method outperforms two existing baseline models in emotion inference tasks across the dataset. An outcome of the proposed ABRN-ZSSC method is that it obtains superior performance in terms of 10% precision, 17% accuracy and 8% recall as well as 19% training time and 18% error rate compared to the conventional methods.</description>
	<pubDate>2026-01-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 8: Attention Bidirectional Recurrent Neural Zero-Shot Semantic Classifier for Emotional Footprint Identification</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/8">doi: 10.3390/computation14010008</a></p>
	<p>Authors:
		Karthikeyan Jagadeesan
		Annapurani Kumarappan
		</p>
	<p>Exploring emotions in organization settings, particularly in feedback on organizational welfare programs, is critical for understanding employee experiences and enhancing organizational policies. Recognizing emotions from a conversation (i.e., leaving an emotional footprint) is a predominant task for a machine to comprehend the full context of the conversation. While fine-tuning of pre-trained models has invariably provided state-of-the-art results in emotion footprint recognition tasks, the prospect of a zero-shot learned model in this sphere is, on the whole, unexplored. The objective here remains to identify the emotional footprint of the members participating in the conversation after the conversation is over with improved accuracy, time and minimal error rate. To address these gaps, in this work, a method called Attention Bidirectional Recurrent Neural Zero-Shot Semantic Classifier (ABRN-ZSSC) for emotional footprint identification is proposed. The ABRN-ZSSC for emotional footprint identification is split into two sections. First, the raw data from a Two-Party Conversation with Emotional Footprint and Emotional Intensity are subjected to the Attention Bidirectional Recurrent Neural Network model with the intent of identifying the emotional footprint for each party near the conclusion of the conversation and, second, with the identified emotional footprint in a conversation. The Zero-Shot Learning-based classifier is applied to train and classify emotions both accurately and precisely. We verify the utility of these approaches (i.e., emotional footprint identification and classification) by performing an extensive experimental evaluation on two corpora on four aspects, training time, accuracy, precision, and error rate for varying samples. Experimental results demonstrate that the ABRN-ZSSC method outperforms two existing baseline models in emotion inference tasks across the dataset. An outcome of the proposed ABRN-ZSSC method is that it obtains superior performance in terms of 10% precision, 17% accuracy and 8% recall as well as 19% training time and 18% error rate compared to the conventional methods.</p>
	]]></content:encoded>

	<dc:title>Attention Bidirectional Recurrent Neural Zero-Shot Semantic Classifier for Emotional Footprint Identification</dc:title>
			<dc:creator>Karthikeyan Jagadeesan</dc:creator>
			<dc:creator>Annapurani Kumarappan</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010008</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-02</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>8</prism:startingPage>
		<prism:doi>10.3390/computation14010008</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/8</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/7">

	<title>Computation, Vol. 14, Pages 7: Multiphysics Modelling and Experimental Validation of Road Tanker Dynamics: Stress Analysis and Material Characterization</title>
	<link>https://www.mdpi.com/2079-3197/14/1/7</link>
	<description>Crossland Tankers is a leading manufacturer of bulk-load road tankers in Northern Ireland. These tankers transport up to forty thousand litres of liquid over long distances across diverse road conditions. Liquid sloshing within the tank has a significant impact on driveability and the tanker&amp;amp;rsquo;s lifespan. This study introduces a novel Multiphysics model combining Smooth Particle Hydrodynamics (SPH) and Finite Element Analysis (FEA) to simulate fluid&amp;amp;ndash;structure interactions in a full-scale road tanker, validated with real-world road test data. The model reveals high-stress zones under braking and turning, with peak stresses at critical chassis locations, offering design insights for weight reduction and enhanced safety. Results demonstrate the approach&amp;amp;rsquo;s effectiveness in optimising tanker design, reducing prototyping costs, and improving longevity, providing a valuable computational tool for industry applications.</description>
	<pubDate>2026-01-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 7: Multiphysics Modelling and Experimental Validation of Road Tanker Dynamics: Stress Analysis and Material Characterization</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/7">doi: 10.3390/computation14010007</a></p>
	<p>Authors:
		Conor Robb
		Gasser Abdelal
		Pearse McKeefry
		Conor Quinn
		</p>
	<p>Crossland Tankers is a leading manufacturer of bulk-load road tankers in Northern Ireland. These tankers transport up to forty thousand litres of liquid over long distances across diverse road conditions. Liquid sloshing within the tank has a significant impact on driveability and the tanker&amp;amp;rsquo;s lifespan. This study introduces a novel Multiphysics model combining Smooth Particle Hydrodynamics (SPH) and Finite Element Analysis (FEA) to simulate fluid&amp;amp;ndash;structure interactions in a full-scale road tanker, validated with real-world road test data. The model reveals high-stress zones under braking and turning, with peak stresses at critical chassis locations, offering design insights for weight reduction and enhanced safety. Results demonstrate the approach&amp;amp;rsquo;s effectiveness in optimising tanker design, reducing prototyping costs, and improving longevity, providing a valuable computational tool for industry applications.</p>
	]]></content:encoded>

	<dc:title>Multiphysics Modelling and Experimental Validation of Road Tanker Dynamics: Stress Analysis and Material Characterization</dc:title>
			<dc:creator>Conor Robb</dc:creator>
			<dc:creator>Gasser Abdelal</dc:creator>
			<dc:creator>Pearse McKeefry</dc:creator>
			<dc:creator>Conor Quinn</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010007</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-02</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>7</prism:startingPage>
		<prism:doi>10.3390/computation14010007</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/7</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/6">

	<title>Computation, Vol. 14, Pages 6: Advances in Single-Cell Sequencing for Understanding and Treating Kidney Disease</title>
	<link>https://www.mdpi.com/2079-3197/14/1/6</link>
	<description>The fields of medical diagnostics, nephrology, and the sequencing of cellular genetic material are pivotal for precise quantification of kidney diseases. Single-cell sequencing, enhanced by automation and software tools, enables efficient examination of biopsies at the individual cell level. This approach shows the complex cellular mosaic that shapes organ function. By quantifying gene expression following injury, single-cell analysis provides insight into disease progression. In this review, new developments in single-cell analysis methods, spatial integration of single-cell analysis, single-nucleus RNA sequencing, and emerging methods, including expression quantitative trait loci, whole-genome sequencing, and whole-exome sequencing in nephrology, are discussed. These advancements are poised to enhance kidney disease diagnostic processes, therapeutic strategies, and patient prognosis.</description>
	<pubDate>2026-01-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 6: Advances in Single-Cell Sequencing for Understanding and Treating Kidney Disease</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/6">doi: 10.3390/computation14010006</a></p>
	<p>Authors:
		Jose L. Agraz
		Amit Verma
		Claudia M. Agraz
		</p>
	<p>The fields of medical diagnostics, nephrology, and the sequencing of cellular genetic material are pivotal for precise quantification of kidney diseases. Single-cell sequencing, enhanced by automation and software tools, enables efficient examination of biopsies at the individual cell level. This approach shows the complex cellular mosaic that shapes organ function. By quantifying gene expression following injury, single-cell analysis provides insight into disease progression. In this review, new developments in single-cell analysis methods, spatial integration of single-cell analysis, single-nucleus RNA sequencing, and emerging methods, including expression quantitative trait loci, whole-genome sequencing, and whole-exome sequencing in nephrology, are discussed. These advancements are poised to enhance kidney disease diagnostic processes, therapeutic strategies, and patient prognosis.</p>
	]]></content:encoded>

	<dc:title>Advances in Single-Cell Sequencing for Understanding and Treating Kidney Disease</dc:title>
			<dc:creator>Jose L. Agraz</dc:creator>
			<dc:creator>Amit Verma</dc:creator>
			<dc:creator>Claudia M. Agraz</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010006</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2026-01-02</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2026-01-02</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>6</prism:startingPage>
		<prism:doi>10.3390/computation14010006</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/6</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/5">

	<title>Computation, Vol. 14, Pages 5: SARIMA vs. Prophet: Comparative Efficacy in Forecasting Traffic Accidents Across Ecuadorian Provinces</title>
	<link>https://www.mdpi.com/2079-3197/14/1/5</link>
	<description>This study aimed to evaluate the comparative predictive efficacy of the SARIMA statistical model and the Prophet machine learning model for forecasting monthly traffic accidents across the 24 provinces of Ecuador, addressing a critical research gap in model selection for geographically and socioeconomically heterogeneous regions. By integrating classical time series modeling with algorithmic decomposition techniques, the research sought to determine whether a universally superior model exists or if predictive performance is inherently context-dependent. Monthly accident data from January 2013 to June 2025 were analyzed using a rolling-window evaluation framework. Model accuracy was assessed through Mean Absolute Percentage Error (MAPE) and Root Mean Square Error (RMSE) metrics to ensure consistency and comparability across provinces. The results revealed a global tie, with 12 provinces favoring SARIMA and 12 favoring Prophet, indicating the absence of a single dominant model. However, regional patterns of superiority emerged: Prophet achieved exceptional precision in coastal and urban provinces with stationary and high-volume time series&amp;amp;mdash;such as Guayas, which recorded the lowest MAPE (4.91%)&amp;amp;mdash;while SARIMA outperformed Prophet in the Andean highlands, particularly in non-stationary, medium-to-high-volume provinces such as Tungurahua (MAPE 6.07%) and Pichincha (MAPE 13.38%). Computational instability in MAPE was noted for provinces with extremely low accident counts (e.g., Gal&amp;amp;aacute;pagos, Carchi), though RMSE values remained low, indicating a metric rather than model limitation. Overall, the findings invalidate the notion of a universally optimal model and underscore the necessity of adopting adaptive, region-specific modeling frameworks that account for local geographic, demographic, and structural factors in predictive road safety analytics.</description>
	<pubDate>2025-12-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 5: SARIMA vs. Prophet: Comparative Efficacy in Forecasting Traffic Accidents Across Ecuadorian Provinces</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/5">doi: 10.3390/computation14010005</a></p>
	<p>Authors:
		Wilson Chango
		Ana Salguero
		Tatiana Landivar
		Roberto Vásconez
		Geovanny Silva
		Pedro Peñafiel-Arcos
		Lucía Núñez
		Homero Velasteguí-Izurieta
		</p>
	<p>This study aimed to evaluate the comparative predictive efficacy of the SARIMA statistical model and the Prophet machine learning model for forecasting monthly traffic accidents across the 24 provinces of Ecuador, addressing a critical research gap in model selection for geographically and socioeconomically heterogeneous regions. By integrating classical time series modeling with algorithmic decomposition techniques, the research sought to determine whether a universally superior model exists or if predictive performance is inherently context-dependent. Monthly accident data from January 2013 to June 2025 were analyzed using a rolling-window evaluation framework. Model accuracy was assessed through Mean Absolute Percentage Error (MAPE) and Root Mean Square Error (RMSE) metrics to ensure consistency and comparability across provinces. The results revealed a global tie, with 12 provinces favoring SARIMA and 12 favoring Prophet, indicating the absence of a single dominant model. However, regional patterns of superiority emerged: Prophet achieved exceptional precision in coastal and urban provinces with stationary and high-volume time series&amp;amp;mdash;such as Guayas, which recorded the lowest MAPE (4.91%)&amp;amp;mdash;while SARIMA outperformed Prophet in the Andean highlands, particularly in non-stationary, medium-to-high-volume provinces such as Tungurahua (MAPE 6.07%) and Pichincha (MAPE 13.38%). Computational instability in MAPE was noted for provinces with extremely low accident counts (e.g., Gal&amp;amp;aacute;pagos, Carchi), though RMSE values remained low, indicating a metric rather than model limitation. Overall, the findings invalidate the notion of a universally optimal model and underscore the necessity of adopting adaptive, region-specific modeling frameworks that account for local geographic, demographic, and structural factors in predictive road safety analytics.</p>
	]]></content:encoded>

	<dc:title>SARIMA vs. Prophet: Comparative Efficacy in Forecasting Traffic Accidents Across Ecuadorian Provinces</dc:title>
			<dc:creator>Wilson Chango</dc:creator>
			<dc:creator>Ana Salguero</dc:creator>
			<dc:creator>Tatiana Landivar</dc:creator>
			<dc:creator>Roberto Vásconez</dc:creator>
			<dc:creator>Geovanny Silva</dc:creator>
			<dc:creator>Pedro Peñafiel-Arcos</dc:creator>
			<dc:creator>Lucía Núñez</dc:creator>
			<dc:creator>Homero Velasteguí-Izurieta</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010005</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2025-12-31</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2025-12-31</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>5</prism:startingPage>
		<prism:doi>10.3390/computation14010005</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/5</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/4">

	<title>Computation, Vol. 14, Pages 4: Experimental and Numerical Investigation of Hydrodynamic Characteristics of Aquaculture Nets: The Critical Role of Solidity Ratio in Biofouling Assessment</title>
	<link>https://www.mdpi.com/2079-3197/14/1/4</link>
	<description>Biofouling on aquaculture netting increases hydrodynamic drag and restricts water exchange across net cages. The solidity ratio is introduced as a quantitative parameter to characterize fouling severity. Towing tank experiments and computational fluid dynamics (CFD) simulations were used to assess the hydrodynamic behavior of netting under different fouling conditions. Experimental results indicated a nonlinear increase in drag force with increasing solidity. At a flow velocity of 0.90 m/s, the drag force increased by 112.2%, 195.1%, and 295.7% for netting with solidity ratios of 0.445, 0.733, and 0.787, respectively, compared to clean netting (Sn = 0.211). The drag coefficient remained stable within 1.445&amp;amp;ndash;1.573 across Re of 995&amp;amp;ndash;2189. Numerical simulations demonstrated the evolution of flow fields around netting, including jet flow formation in mesh openings and reverse flow regions and vortex structures behind knots. Under high solidity (Sn = 0.733&amp;amp;ndash;0.787), complex wake patterns such as dual-peak vortex streets appeared. Therefore, this study confirmed that the solidity ratio is an effective comprehensive parameter for evaluating biofouling effects, providing a theoretical basis for antifouling design and cleaning strategy development for aquaculture cages.</description>
	<pubDate>2025-12-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 4: Experimental and Numerical Investigation of Hydrodynamic Characteristics of Aquaculture Nets: The Critical Role of Solidity Ratio in Biofouling Assessment</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/4">doi: 10.3390/computation14010004</a></p>
	<p>Authors:
		Wei Liu
		Lei Wang
		Yongli Liu
		Yuyan Li
		Guangrui Qi
		Dawen Mao
		</p>
	<p>Biofouling on aquaculture netting increases hydrodynamic drag and restricts water exchange across net cages. The solidity ratio is introduced as a quantitative parameter to characterize fouling severity. Towing tank experiments and computational fluid dynamics (CFD) simulations were used to assess the hydrodynamic behavior of netting under different fouling conditions. Experimental results indicated a nonlinear increase in drag force with increasing solidity. At a flow velocity of 0.90 m/s, the drag force increased by 112.2%, 195.1%, and 295.7% for netting with solidity ratios of 0.445, 0.733, and 0.787, respectively, compared to clean netting (Sn = 0.211). The drag coefficient remained stable within 1.445&amp;amp;ndash;1.573 across Re of 995&amp;amp;ndash;2189. Numerical simulations demonstrated the evolution of flow fields around netting, including jet flow formation in mesh openings and reverse flow regions and vortex structures behind knots. Under high solidity (Sn = 0.733&amp;amp;ndash;0.787), complex wake patterns such as dual-peak vortex streets appeared. Therefore, this study confirmed that the solidity ratio is an effective comprehensive parameter for evaluating biofouling effects, providing a theoretical basis for antifouling design and cleaning strategy development for aquaculture cages.</p>
	]]></content:encoded>

	<dc:title>Experimental and Numerical Investigation of Hydrodynamic Characteristics of Aquaculture Nets: The Critical Role of Solidity Ratio in Biofouling Assessment</dc:title>
			<dc:creator>Wei Liu</dc:creator>
			<dc:creator>Lei Wang</dc:creator>
			<dc:creator>Yongli Liu</dc:creator>
			<dc:creator>Yuyan Li</dc:creator>
			<dc:creator>Guangrui Qi</dc:creator>
			<dc:creator>Dawen Mao</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010004</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2025-12-30</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2025-12-30</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>4</prism:startingPage>
		<prism:doi>10.3390/computation14010004</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/4</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/3">

	<title>Computation, Vol. 14, Pages 3: Reaction-Diffusion Model of CAR-T Cell Therapy in Solid Tumours with Antigen Escape</title>
	<link>https://www.mdpi.com/2079-3197/14/1/3</link>
	<description>Developing effective CAR-T cell therapy for solid tumours remains challenging because of biological barriers such as antigen escape and an immunosuppressive microenvironment. The aim of this study is to develop a mathematical model of the spatio-temporal dynamics of tumour processes in order to assess key factors that limit treatment efficacy. We propose a reaction&amp;amp;ndash;diffusion model described by a system of partial differential equations for the densities of tumour cells and CAR-T cells, the concentration of immune inhibitors, and the degree of antigen escape. The methods of investigation include stability analysis and numerical solution of the model using a finite-difference scheme. The simulations show that antigen escape produces a resistant tumour core and relapse after an initial regression; increasing the escape rate from &amp;amp;gamma;=0.001 to 0.1 increases the final tumour volume at t=100 days from approximately 35.3 a.u. to 36.2 a.u. Parameter mapping further indicates that for &amp;amp;gamma;&amp;amp;le;0.01 tumour control can be achieved at moderate killing rates (kCT&amp;amp;asymp;1day&amp;amp;minus;1), whereas for &amp;amp;gamma;&amp;amp;ge;0.05 comparable control requires kCT&amp;amp;asymp;2&amp;amp;ndash;5day&amp;amp;minus;1. Repeated CAR-T administration improves durability: the residual normalised tumour volume at t=100 days decreases from approximately 4.5 after a single infusion to approximately 0.9 (double) and approximately 0.5 (triple), with a saturating benefit for further intensification. We conclude that the proposed model is a valuable tool for analysing and optimising CAR-T therapy protocols, and that our results highlight the need for combined strategies aimed at overcoming antigen escape.</description>
	<pubDate>2025-12-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 3: Reaction-Diffusion Model of CAR-T Cell Therapy in Solid Tumours with Antigen Escape</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/3">doi: 10.3390/computation14010003</a></p>
	<p>Authors:
		Maxim V. Polyakov
		Elena I. Tuchina
		</p>
	<p>Developing effective CAR-T cell therapy for solid tumours remains challenging because of biological barriers such as antigen escape and an immunosuppressive microenvironment. The aim of this study is to develop a mathematical model of the spatio-temporal dynamics of tumour processes in order to assess key factors that limit treatment efficacy. We propose a reaction&amp;amp;ndash;diffusion model described by a system of partial differential equations for the densities of tumour cells and CAR-T cells, the concentration of immune inhibitors, and the degree of antigen escape. The methods of investigation include stability analysis and numerical solution of the model using a finite-difference scheme. The simulations show that antigen escape produces a resistant tumour core and relapse after an initial regression; increasing the escape rate from &amp;amp;gamma;=0.001 to 0.1 increases the final tumour volume at t=100 days from approximately 35.3 a.u. to 36.2 a.u. Parameter mapping further indicates that for &amp;amp;gamma;&amp;amp;le;0.01 tumour control can be achieved at moderate killing rates (kCT&amp;amp;asymp;1day&amp;amp;minus;1), whereas for &amp;amp;gamma;&amp;amp;ge;0.05 comparable control requires kCT&amp;amp;asymp;2&amp;amp;ndash;5day&amp;amp;minus;1. Repeated CAR-T administration improves durability: the residual normalised tumour volume at t=100 days decreases from approximately 4.5 after a single infusion to approximately 0.9 (double) and approximately 0.5 (triple), with a saturating benefit for further intensification. We conclude that the proposed model is a valuable tool for analysing and optimising CAR-T therapy protocols, and that our results highlight the need for combined strategies aimed at overcoming antigen escape.</p>
	]]></content:encoded>

	<dc:title>Reaction-Diffusion Model of CAR-T Cell Therapy in Solid Tumours with Antigen Escape</dc:title>
			<dc:creator>Maxim V. Polyakov</dc:creator>
			<dc:creator>Elena I. Tuchina</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010003</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2025-12-30</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2025-12-30</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>3</prism:startingPage>
		<prism:doi>10.3390/computation14010003</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/3</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2079-3197/14/1/2">

	<title>Computation, Vol. 14, Pages 2: An Interpretable Artificial Intelligence Approach for Reliability and Regulation-Aware Decision Support in Power Systems</title>
	<link>https://www.mdpi.com/2079-3197/14/1/2</link>
	<description>Modern medium-voltage (MV) distribution networks face increasing reliability challenges driven by aging assets, climate variability, and evolving operational demands. In Colombia and across Latin America, reliability metrics, such as the System Average Interruption Frequency Index (SAIFI), standardized under IEEE 1366, serve as key indicators for regulatory compliance and service quality. However, existing analytical approaches struggle to jointly deliver predictive accuracy, interpretability, and traceability required for regulated environments. Here, we introduce CRITAIR (Criticality Analysis through Interpretable Artificial Intelligence-based Recommendations), an integrated framework that combines predictive modeling, explainable analytics, and regulation-aware reasoning to enhance reliability management in MV networks. CRITAIR unifies three components: (i) a TabNet-based predictive module that estimates SAIFI using outage, asset, and meteorological data while producing global and local attributions; (ii) an agentic retrieval-and-reasoning stage that grounds recommendations in regulatory evidence from RETIE and NTC 2050; and (iii) interpretable reasoning graphs that map decision pathways. Evaluations conducted on real operational data demonstrate that CRITAIR achieves competitive predictive performance&amp;amp;mdash;comparable to Random Forest and XGBoost&amp;amp;mdash;while maintaining transparency through sparse attention and sequential feature explainability. Also, our regulation-aware reasoning module exhibits coherent and verifiable recommendations, achieving high semantic alignment scores (BERTScore) and expert-rated interpretability. Overall, CRITAIR bridges the gap between predictive analytics and regulatory governance, offering a transparent, auditable, and deployment-ready solution for digital transformation in electric distribution systems.</description>
	<pubDate>2025-12-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computation, Vol. 14, Pages 2: An Interpretable Artificial Intelligence Approach for Reliability and Regulation-Aware Decision Support in Power Systems</b></p>
	<p>Computation <a href="https://www.mdpi.com/2079-3197/14/1/2">doi: 10.3390/computation14010002</a></p>
	<p>Authors:
		Diego Armando Pérez-Rosero
		Santiago Pineda-Quintero
		Juan Carlos Álvarez-Barreto
		Andrés Marino Álvarez-Meza
		German Castellanos-Dominguez
		</p>
	<p>Modern medium-voltage (MV) distribution networks face increasing reliability challenges driven by aging assets, climate variability, and evolving operational demands. In Colombia and across Latin America, reliability metrics, such as the System Average Interruption Frequency Index (SAIFI), standardized under IEEE 1366, serve as key indicators for regulatory compliance and service quality. However, existing analytical approaches struggle to jointly deliver predictive accuracy, interpretability, and traceability required for regulated environments. Here, we introduce CRITAIR (Criticality Analysis through Interpretable Artificial Intelligence-based Recommendations), an integrated framework that combines predictive modeling, explainable analytics, and regulation-aware reasoning to enhance reliability management in MV networks. CRITAIR unifies three components: (i) a TabNet-based predictive module that estimates SAIFI using outage, asset, and meteorological data while producing global and local attributions; (ii) an agentic retrieval-and-reasoning stage that grounds recommendations in regulatory evidence from RETIE and NTC 2050; and (iii) interpretable reasoning graphs that map decision pathways. Evaluations conducted on real operational data demonstrate that CRITAIR achieves competitive predictive performance&amp;amp;mdash;comparable to Random Forest and XGBoost&amp;amp;mdash;while maintaining transparency through sparse attention and sequential feature explainability. Also, our regulation-aware reasoning module exhibits coherent and verifiable recommendations, achieving high semantic alignment scores (BERTScore) and expert-rated interpretability. Overall, CRITAIR bridges the gap between predictive analytics and regulatory governance, offering a transparent, auditable, and deployment-ready solution for digital transformation in electric distribution systems.</p>
	]]></content:encoded>

	<dc:title>An Interpretable Artificial Intelligence Approach for Reliability and Regulation-Aware Decision Support in Power Systems</dc:title>
			<dc:creator>Diego Armando Pérez-Rosero</dc:creator>
			<dc:creator>Santiago Pineda-Quintero</dc:creator>
			<dc:creator>Juan Carlos Álvarez-Barreto</dc:creator>
			<dc:creator>Andrés Marino Álvarez-Meza</dc:creator>
			<dc:creator>German Castellanos-Dominguez</dc:creator>
		<dc:identifier>doi: 10.3390/computation14010002</dc:identifier>
	<dc:source>Computation</dc:source>
	<dc:date>2025-12-21</dc:date>

	<prism:publicationName>Computation</prism:publicationName>
	<prism:publicationDate>2025-12-21</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2</prism:startingPage>
		<prism:doi>10.3390/computation14010002</prism:doi>
	<prism:url>https://www.mdpi.com/2079-3197/14/1/2</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
    
<cc:License rdf:about="https://creativecommons.org/licenses/by/4.0/">
	<cc:permits rdf:resource="https://creativecommons.org/ns#Reproduction" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#Distribution" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#DerivativeWorks" />
</cc:License>

</rdf:RDF>
