<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF xmlns="http://purl.org/rss/1.0/"
 xmlns:dc="http://purl.org/dc/elements/1.1/"
 xmlns:dcterms="http://purl.org/dc/terms/"
 xmlns:cc="http://web.resource.org/cc/"
 xmlns:prism="http://prismstandard.org/namespaces/basic/2.0/"
 xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
 xmlns:admin="http://webns.net/mvcb/"
 xmlns:content="http://purl.org/rss/1.0/modules/content/">
    <channel rdf:about="https://www.mdpi.com/rss/journal/network">
		<title>Network</title>
		<description>Latest open access articles published in Network at https://www.mdpi.com/journal/network</description>
		<link>https://www.mdpi.com/journal/network</link>
		<admin:generatorAgent rdf:resource="https://www.mdpi.com/journal/network"/>
		<admin:errorReportsTo rdf:resource="mailto:support@mdpi.com"/>
		<dc:publisher>MDPI</dc:publisher>
		<dc:language>en</dc:language>
		<dc:rights>Creative Commons Attribution (CC-BY)</dc:rights>
						<prism:copyright>MDPI</prism:copyright>
		<prism:rightsAgent>support@mdpi.com</prism:rightsAgent>
		<image rdf:resource="https://pub.mdpi-res.com/img/design/mdpi-pub-logo.png?13cf3b5bd783e021?1777446772"/>
				<items>
			<rdf:Seq>
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/2/28" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/2/27" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/2/26" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/2/25" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/2/24" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/2/23" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/2/22" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/2/21" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/2/20" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/2/19" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/1/18" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/1/17" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/1/16" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/1/15" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/1/14" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/1/13" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/1/12" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/1/11" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/1/10" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/1/9" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/1/8" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/1/7" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/1/6" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/1/5" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/1/4" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/1/3" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/1/2" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/6/1/1" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/4/54" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/4/53" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/4/52" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/4/51" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/4/50" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/4/49" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/4/48" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/4/47" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/4/46" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/4/45" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/4/44" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/4/43" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/4/42" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/41" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/40" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/39" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/38" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/37" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/36" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/35" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/34" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/33" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/32" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/31" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/30" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/29" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/28" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/27" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/26" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/25" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/24" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/23" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/3/22" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/2/21" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/2/20" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/2/19" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/2/18" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/2/17" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/2/16" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/2/15" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/2/14" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/2/13" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/2/12" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/2/11" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/2/10" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/2/9" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/1/8" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/1/7" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/1/5" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/1/6" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/1/4" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/1/3" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/1/2" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/5/1/1" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/4/4/29" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/4/4/28" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/4/4/27" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/4/4/26" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/4/4/25" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/4/4/24" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/4/4/23" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/4/4/22" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/4/4/21" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/4/4/20" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/4/3/19" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/4/3/18" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/4/3/17" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/4/3/16" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/4/3/15" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/4/3/14" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/4/3/13" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2673-8732/4/3/12" />
                    	</rdf:Seq>
		</items>
				<cc:license rdf:resource="https://creativecommons.org/licenses/by/4.0/" />
	</channel>

        <item rdf:about="https://www.mdpi.com/2673-8732/6/2/28">

	<title>Network, Vol. 6, Pages 28: Fiber-Optic Gyroscopes in Modern Navigation Systems: A Comprehensive Review</title>
	<link>https://www.mdpi.com/2673-8732/6/2/28</link>
	<description>This paper provides a comprehensive overview of the progress in fiber-optic gyroscope technology, covering 260 key studies of the last ten years. A critical comparative analysis of fiber-optic gyroscope with alternative inertial sensors (Micro-Electro-Mechanical Systems, Hemispherical Resonator Gyroscope, Ring Laser Gyroscope) has been carried out. Confirming the unique advantages of fiber-optic gyroscope for autonomous navigation. Fundamental limitations of accuracy are considered in detail: temperature drifts, polarization noise, and Rayleigh backscattering. Modern hardware methods for suppressing these errors, including the use of photonic crystal and hollow fibers (Air-Core/Hollow-Core), are also considered in this work. The central place in the review is occupied by the analysis of the technological paradigm shift from bulky discrete circuits to hybrid integrated photonics (Indium Phosphide, Silicon Nitride, Lithium Niobate) and hybrid architectures to reduce weight and size characteristics. The role of artificial intelligence (Deep Learning, Long Short-Term Memory) methods in nonlinear drift compensation and calibration is discussed. The usage of the Brillouin effect and optomechanics promising areas are outlined, necessary to create a new generation of navigation systems operating in the absence of Global Navigation Satellite Systems signals.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 28: Fiber-Optic Gyroscopes in Modern Navigation Systems: A Comprehensive Review</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/2/28">doi: 10.3390/network6020028</a></p>
	<p>Authors:
		Nurzhigit Smailov
		Yerlan Tashtay
		Pawel Komada
		Yerzhan Nussupov
		Kanat Zhunussov
		Askhat Batyrgaliyev
		Daulet Naubetov
		Aziskhan Amir
		Beibarys Sekenov
		Darkhan Yerezhep
		</p>
	<p>This paper provides a comprehensive overview of the progress in fiber-optic gyroscope technology, covering 260 key studies of the last ten years. A critical comparative analysis of fiber-optic gyroscope with alternative inertial sensors (Micro-Electro-Mechanical Systems, Hemispherical Resonator Gyroscope, Ring Laser Gyroscope) has been carried out. Confirming the unique advantages of fiber-optic gyroscope for autonomous navigation. Fundamental limitations of accuracy are considered in detail: temperature drifts, polarization noise, and Rayleigh backscattering. Modern hardware methods for suppressing these errors, including the use of photonic crystal and hollow fibers (Air-Core/Hollow-Core), are also considered in this work. The central place in the review is occupied by the analysis of the technological paradigm shift from bulky discrete circuits to hybrid integrated photonics (Indium Phosphide, Silicon Nitride, Lithium Niobate) and hybrid architectures to reduce weight and size characteristics. The role of artificial intelligence (Deep Learning, Long Short-Term Memory) methods in nonlinear drift compensation and calibration is discussed. The usage of the Brillouin effect and optomechanics promising areas are outlined, necessary to create a new generation of navigation systems operating in the absence of Global Navigation Satellite Systems signals.</p>
	]]></content:encoded>

	<dc:title>Fiber-Optic Gyroscopes in Modern Navigation Systems: A Comprehensive Review</dc:title>
			<dc:creator>Nurzhigit Smailov</dc:creator>
			<dc:creator>Yerlan Tashtay</dc:creator>
			<dc:creator>Pawel Komada</dc:creator>
			<dc:creator>Yerzhan Nussupov</dc:creator>
			<dc:creator>Kanat Zhunussov</dc:creator>
			<dc:creator>Askhat Batyrgaliyev</dc:creator>
			<dc:creator>Daulet Naubetov</dc:creator>
			<dc:creator>Aziskhan Amir</dc:creator>
			<dc:creator>Beibarys Sekenov</dc:creator>
			<dc:creator>Darkhan Yerezhep</dc:creator>
		<dc:identifier>doi: 10.3390/network6020028</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>28</prism:startingPage>
		<prism:doi>10.3390/network6020028</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/2/28</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/2/27">

	<title>Network, Vol. 6, Pages 27: Performance Analysis of Discrete Hartley Transform-Based Orthogonal Frequency Division Multiplexing for Visible Light Communications</title>
	<link>https://www.mdpi.com/2673-8732/6/2/27</link>
	<description>A discrete Hartley transform (DHT)-based orthogonal frequency division multiplexing (OFDM) scheme is investigated for intensity modulation/direct detection (IM/DD) visible light communication (VLC) systems, where transmitted signals are required to be real-valued and non-negative. To address this constraint, a practical unipolar transmission framework with corresponding bipolar reconstruction is developed. By exploiting the real-valued and self-inverse properties of the DHT, the proposed scheme removes the need for Hermitian symmetry and enables full utilization of available subcarriers. Under equal-bandwidth conditions, this results in an approximately 50% reduction in computational complexity compared with conventional DCO-OFDM and ACO-OFDM schemes. Theoretical analysis and numerical results further show that the proposed approach achieves comparable bit error rate (BER) performance while exhibiting improved spectral confinement, as reflected by reduced out-of-band sidelobes under identical filtering conditions. In addition, it maintains spectral efficiency equivalent to DCO-OFDM under the same bandwidth constraint. These advantages are achieved at the cost of restricting subcarrier modulation to real-valued constellations, which may reduce flexibility in frequency-selective channels. Overall, these findings support DHT-OFDM as a low-complexity, spectrally confined multicarrier waveform for IM/DD VLC systems, particularly in scenarios where efficient spectrum utilization and reduced adjacent-channel interference are required.</description>
	<pubDate>2026-04-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 27: Performance Analysis of Discrete Hartley Transform-Based Orthogonal Frequency Division Multiplexing for Visible Light Communications</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/2/27">doi: 10.3390/network6020027</a></p>
	<p>Authors:
		Ming Che
		</p>
	<p>A discrete Hartley transform (DHT)-based orthogonal frequency division multiplexing (OFDM) scheme is investigated for intensity modulation/direct detection (IM/DD) visible light communication (VLC) systems, where transmitted signals are required to be real-valued and non-negative. To address this constraint, a practical unipolar transmission framework with corresponding bipolar reconstruction is developed. By exploiting the real-valued and self-inverse properties of the DHT, the proposed scheme removes the need for Hermitian symmetry and enables full utilization of available subcarriers. Under equal-bandwidth conditions, this results in an approximately 50% reduction in computational complexity compared with conventional DCO-OFDM and ACO-OFDM schemes. Theoretical analysis and numerical results further show that the proposed approach achieves comparable bit error rate (BER) performance while exhibiting improved spectral confinement, as reflected by reduced out-of-band sidelobes under identical filtering conditions. In addition, it maintains spectral efficiency equivalent to DCO-OFDM under the same bandwidth constraint. These advantages are achieved at the cost of restricting subcarrier modulation to real-valued constellations, which may reduce flexibility in frequency-selective channels. Overall, these findings support DHT-OFDM as a low-complexity, spectrally confined multicarrier waveform for IM/DD VLC systems, particularly in scenarios where efficient spectrum utilization and reduced adjacent-channel interference are required.</p>
	]]></content:encoded>

	<dc:title>Performance Analysis of Discrete Hartley Transform-Based Orthogonal Frequency Division Multiplexing for Visible Light Communications</dc:title>
			<dc:creator>Ming Che</dc:creator>
		<dc:identifier>doi: 10.3390/network6020027</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-04-21</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-04-21</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>27</prism:startingPage>
		<prism:doi>10.3390/network6020027</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/2/27</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/2/26">

	<title>Network, Vol. 6, Pages 26: Ray Tracing Simulators for 5G New Radio Systems: Comparative Analysis Through Urban Measurements at 27 GHz</title>
	<link>https://www.mdpi.com/2673-8732/6/2/26</link>
	<description>The use of millimeter-wave spectrum in fifth-generation (5G) systems is increasing the need for accurate prediction of received power and coverage in real deployment scenarios. In this context, ray tracing (RT) is a promising approach for site-specific analysis, although its reliability depends on how accurately different tools reproduce measurements in complex urban environments. This work presents a comparative assessment at 27 GHz of three RT tools: in-house Exact tool based on Vertical Plane Launching (VPL), Matlab 5G and open-source Sionna RT based on Shooting and Bouncing Rays (SBR). The comparison relies on a large outdoor walk-test campaign, including about 14,725 measurement points collected in a real urban area around a 27 GHz mMIMO base station, using real operator-provided antenna radiation patterns. Measured and simulated power levels are compared using statistical metrics, including Mean Absolute Error (MAE), Root Mean Square Error (RMSE), and a planning-oriented coverage-rate metric. The results show a reasonable agreement between simulations and measurements, with RMSE and MAE values around 10&amp;amp;ndash;12 dB, highlighting tool-specific behaviors related to boundary effects, interaction modeling, and high-power overestimation. This work confirms that RT is a flexible support for 5G preliminary network design, reducing the need for extensive drive tests.</description>
	<pubDate>2026-04-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 26: Ray Tracing Simulators for 5G New Radio Systems: Comparative Analysis Through Urban Measurements at 27 GHz</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/2/26">doi: 10.3390/network6020026</a></p>
	<p>Authors:
		Francesca Lodato
		Pierpaolo Salvo
		Marcello Folli
		Simona Valbonesi
		Andrea Garzia
		Giuseppe Ruello
		Riccardo Suman
		Massimo Perobelli
		Rita Massa
		Antonio Iodice
		</p>
	<p>The use of millimeter-wave spectrum in fifth-generation (5G) systems is increasing the need for accurate prediction of received power and coverage in real deployment scenarios. In this context, ray tracing (RT) is a promising approach for site-specific analysis, although its reliability depends on how accurately different tools reproduce measurements in complex urban environments. This work presents a comparative assessment at 27 GHz of three RT tools: in-house Exact tool based on Vertical Plane Launching (VPL), Matlab 5G and open-source Sionna RT based on Shooting and Bouncing Rays (SBR). The comparison relies on a large outdoor walk-test campaign, including about 14,725 measurement points collected in a real urban area around a 27 GHz mMIMO base station, using real operator-provided antenna radiation patterns. Measured and simulated power levels are compared using statistical metrics, including Mean Absolute Error (MAE), Root Mean Square Error (RMSE), and a planning-oriented coverage-rate metric. The results show a reasonable agreement between simulations and measurements, with RMSE and MAE values around 10&amp;amp;ndash;12 dB, highlighting tool-specific behaviors related to boundary effects, interaction modeling, and high-power overestimation. This work confirms that RT is a flexible support for 5G preliminary network design, reducing the need for extensive drive tests.</p>
	]]></content:encoded>

	<dc:title>Ray Tracing Simulators for 5G New Radio Systems: Comparative Analysis Through Urban Measurements at 27 GHz</dc:title>
			<dc:creator>Francesca Lodato</dc:creator>
			<dc:creator>Pierpaolo Salvo</dc:creator>
			<dc:creator>Marcello Folli</dc:creator>
			<dc:creator>Simona Valbonesi</dc:creator>
			<dc:creator>Andrea Garzia</dc:creator>
			<dc:creator>Giuseppe Ruello</dc:creator>
			<dc:creator>Riccardo Suman</dc:creator>
			<dc:creator>Massimo Perobelli</dc:creator>
			<dc:creator>Rita Massa</dc:creator>
			<dc:creator>Antonio Iodice</dc:creator>
		<dc:identifier>doi: 10.3390/network6020026</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-04-19</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-04-19</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>26</prism:startingPage>
		<prism:doi>10.3390/network6020026</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/2/26</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/2/25">

	<title>Network, Vol. 6, Pages 25: Enhancing Smart Grid Cyber Resilience Against FDI Attacks Using Multi-Agent Recurrent DDPG</title>
	<link>https://www.mdpi.com/2673-8732/6/2/25</link>
	<description>Digital substations (DSs) play a critical role in modern Energy and Power Electrical Systems (EPESs), enabling intelligent control, monitoring, and automation. With increased reliance on communication and sensing technologies, DSs are vulnerable to cyberattacks such as False Data Injection (FDI). An adversary may falsify transformer temperature readings, misleading protection mechanisms and resulting in incorrect disconnection actions. These false disconnections may disrupt power delivery, cause economic losses, and reduce equipment lifespan. To address these challenges, we propose a reinforcement learning-based approach for cyber protection of smart grids against false temperature data injection attacks. Specifically, this work designs a Long Short-Term Memory Deep Deterministic Policy Gradient (LSTM-DDPG) deep reinforcement learning algorithm that learns to detect normal patterns and responds to suspicious thermal patterns by dynamically adjusting disconnection decisions. The agents process sequential state features to differentiate between legitimate overload conditions and sudden anomalies caused by FDI attacks. We implement the proposed approach on the IEEE 30-bus distribution network using the Pandapower simulator. The experimental results indicate that the LSTM-DDPG controller outperforms conventional DDPG and DQN baselines, achieving a recall of 0.897, F1 of 0.945, precision of 1.00 and accuracy of 0.981 with a confidence interval of 95%. In addition, grid stability reaches up to 0.9815, 1.0, 1.0, 0.9926 with respect to the voltage stability score, transformer stability value, disconnection stability, and stability index, respectively. The proposed method led to fewer false disconnections, providing improved robustness against sensor manipulations.</description>
	<pubDate>2026-04-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 25: Enhancing Smart Grid Cyber Resilience Against FDI Attacks Using Multi-Agent Recurrent DDPG</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/2/25">doi: 10.3390/network6020025</a></p>
	<p>Authors:
		Tahira Mahboob
		Mingwei Li
		Awais Aziz Shah
		Dimitrios Pezaros
		</p>
	<p>Digital substations (DSs) play a critical role in modern Energy and Power Electrical Systems (EPESs), enabling intelligent control, monitoring, and automation. With increased reliance on communication and sensing technologies, DSs are vulnerable to cyberattacks such as False Data Injection (FDI). An adversary may falsify transformer temperature readings, misleading protection mechanisms and resulting in incorrect disconnection actions. These false disconnections may disrupt power delivery, cause economic losses, and reduce equipment lifespan. To address these challenges, we propose a reinforcement learning-based approach for cyber protection of smart grids against false temperature data injection attacks. Specifically, this work designs a Long Short-Term Memory Deep Deterministic Policy Gradient (LSTM-DDPG) deep reinforcement learning algorithm that learns to detect normal patterns and responds to suspicious thermal patterns by dynamically adjusting disconnection decisions. The agents process sequential state features to differentiate between legitimate overload conditions and sudden anomalies caused by FDI attacks. We implement the proposed approach on the IEEE 30-bus distribution network using the Pandapower simulator. The experimental results indicate that the LSTM-DDPG controller outperforms conventional DDPG and DQN baselines, achieving a recall of 0.897, F1 of 0.945, precision of 1.00 and accuracy of 0.981 with a confidence interval of 95%. In addition, grid stability reaches up to 0.9815, 1.0, 1.0, 0.9926 with respect to the voltage stability score, transformer stability value, disconnection stability, and stability index, respectively. The proposed method led to fewer false disconnections, providing improved robustness against sensor manipulations.</p>
	]]></content:encoded>

	<dc:title>Enhancing Smart Grid Cyber Resilience Against FDI Attacks Using Multi-Agent Recurrent DDPG</dc:title>
			<dc:creator>Tahira Mahboob</dc:creator>
			<dc:creator>Mingwei Li</dc:creator>
			<dc:creator>Awais Aziz Shah</dc:creator>
			<dc:creator>Dimitrios Pezaros</dc:creator>
		<dc:identifier>doi: 10.3390/network6020025</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-04-17</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-04-17</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>25</prism:startingPage>
		<prism:doi>10.3390/network6020025</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/2/25</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/2/24">

	<title>Network, Vol. 6, Pages 24: Evaluation of Attack and Recovery in USFC: A Dependability View</title>
	<link>https://www.mdpi.com/2673-8732/6/2/24</link>
	<description>The integration of service function chains (SFCs) and unmanned aerial vehicles (UAVs) lays a crucial technological foundation for achieving efficient, reliable, and adaptive future airborne service networks. Service functions (SFs) in the SFC will be deployed on UAVs; this type of SFC is called unmanned aerial vehicle-based service function chains (USFCs). However, due to the combined effects of open hardware and software architectures, exposed communication links, and complex mission environments, UAVs have become ideal targets for attackers. Once a vulnerability is successfully injected into a UAV, data from the SFs running on it will be stolen, seriously threatening the dependability of the USFC. Therefore, it is necessary to conduct a quantitative evaluation of the USFC dependability to provide insights for further improving its dependability. This paper develops a USFC dependability evaluation model based on a semi-Markov process (SMP) to capture the dynamic interaction between attacker behavior and USFC system recovery behavior. The dependability of the USFC is comprehensively evaluated from two perspectives: availability and security. Extensive numerical analysis experiments are conducted, and the results not only demonstrate the changing trends of various dependability metrics under different parameters but also show parameter combinations for synergistic optimization among metrics.</description>
	<pubDate>2026-04-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 24: Evaluation of Attack and Recovery in USFC: A Dependability View</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/2/24">doi: 10.3390/network6020024</a></p>
	<p>Authors:
		Jing Bai
		Xiaohan Ge
		Liangbin Yang
		Chunding Wang
		Ziyue Yin
		</p>
	<p>The integration of service function chains (SFCs) and unmanned aerial vehicles (UAVs) lays a crucial technological foundation for achieving efficient, reliable, and adaptive future airborne service networks. Service functions (SFs) in the SFC will be deployed on UAVs; this type of SFC is called unmanned aerial vehicle-based service function chains (USFCs). However, due to the combined effects of open hardware and software architectures, exposed communication links, and complex mission environments, UAVs have become ideal targets for attackers. Once a vulnerability is successfully injected into a UAV, data from the SFs running on it will be stolen, seriously threatening the dependability of the USFC. Therefore, it is necessary to conduct a quantitative evaluation of the USFC dependability to provide insights for further improving its dependability. This paper develops a USFC dependability evaluation model based on a semi-Markov process (SMP) to capture the dynamic interaction between attacker behavior and USFC system recovery behavior. The dependability of the USFC is comprehensively evaluated from two perspectives: availability and security. Extensive numerical analysis experiments are conducted, and the results not only demonstrate the changing trends of various dependability metrics under different parameters but also show parameter combinations for synergistic optimization among metrics.</p>
	]]></content:encoded>

	<dc:title>Evaluation of Attack and Recovery in USFC: A Dependability View</dc:title>
			<dc:creator>Jing Bai</dc:creator>
			<dc:creator>Xiaohan Ge</dc:creator>
			<dc:creator>Liangbin Yang</dc:creator>
			<dc:creator>Chunding Wang</dc:creator>
			<dc:creator>Ziyue Yin</dc:creator>
		<dc:identifier>doi: 10.3390/network6020024</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-04-14</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-04-14</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>24</prism:startingPage>
		<prism:doi>10.3390/network6020024</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/2/24</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/2/23">

	<title>Network, Vol. 6, Pages 23: Adaptive Decision-Level Intrusion Detection for Known and Zero-Day Attacks</title>
	<link>https://www.mdpi.com/2673-8732/6/2/23</link>
	<description>Network Intrusion Detection Systems (NIDS) face increasing challenges from sophisticated cyber threats, particularly zero-day attacks that evade signature-based methods. While supervised learning is effective for known attack classification, it struggles with novel threats, whereas anomaly-based approaches suffer from high false positive rates and unstable thresholds. To address these limitations, this paper proposes a decision-level adaptive intrusion-detection framework combining hierarchical CNN-based closed-set classification with autoencoder-based zero-day detection in a cascade architecture. The framework enables deployment-time adaptation by dynamically adjusting class-specific confidence thresholds and fusion parameters without model retraining. Experiments on the CSE-CIC-IDS2018 dataset demonstrate strong closed-set performance, achieving 98.98% accuracy and a macro-F1-score of 0.9342, with improved recall for minority attack classes under adaptive thresholding. Under a zero-day evaluation protocol in which Web_Attacks and Infiltration are excluded from training and validation, the proposed approach achieves an F1-score of 0.9319 while maintaining a low false positive rate of 0.0019. The framework is further evaluated on the Simulated University Network Environment (SUNE) dataset representing campus network traffic, achieving 96.18% closed-set accuracy and 97.54% accuracy in the integrated cascade setting. These results demonstrate that the proposed framework effectively balances minority attack detection, zero-day identification, and false-alarm control in dynamic and resource-constrained network environments.</description>
	<pubDate>2026-04-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 23: Adaptive Decision-Level Intrusion Detection for Known and Zero-Day Attacks</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/2/23">doi: 10.3390/network6020023</a></p>
	<p>Authors:
		Joseph P. Mchina
		Neema Mduma
		Ramadhani S. Sinde
		</p>
	<p>Network Intrusion Detection Systems (NIDS) face increasing challenges from sophisticated cyber threats, particularly zero-day attacks that evade signature-based methods. While supervised learning is effective for known attack classification, it struggles with novel threats, whereas anomaly-based approaches suffer from high false positive rates and unstable thresholds. To address these limitations, this paper proposes a decision-level adaptive intrusion-detection framework combining hierarchical CNN-based closed-set classification with autoencoder-based zero-day detection in a cascade architecture. The framework enables deployment-time adaptation by dynamically adjusting class-specific confidence thresholds and fusion parameters without model retraining. Experiments on the CSE-CIC-IDS2018 dataset demonstrate strong closed-set performance, achieving 98.98% accuracy and a macro-F1-score of 0.9342, with improved recall for minority attack classes under adaptive thresholding. Under a zero-day evaluation protocol in which Web_Attacks and Infiltration are excluded from training and validation, the proposed approach achieves an F1-score of 0.9319 while maintaining a low false positive rate of 0.0019. The framework is further evaluated on the Simulated University Network Environment (SUNE) dataset representing campus network traffic, achieving 96.18% closed-set accuracy and 97.54% accuracy in the integrated cascade setting. These results demonstrate that the proposed framework effectively balances minority attack detection, zero-day identification, and false-alarm control in dynamic and resource-constrained network environments.</p>
	]]></content:encoded>

	<dc:title>Adaptive Decision-Level Intrusion Detection for Known and Zero-Day Attacks</dc:title>
			<dc:creator>Joseph P. Mchina</dc:creator>
			<dc:creator>Neema Mduma</dc:creator>
			<dc:creator>Ramadhani S. Sinde</dc:creator>
		<dc:identifier>doi: 10.3390/network6020023</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-04-09</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-04-09</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>23</prism:startingPage>
		<prism:doi>10.3390/network6020023</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/2/23</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/2/22">

	<title>Network, Vol. 6, Pages 22: Mitigating Metamorphic Malware Through Adversarial Learning Techniques</title>
	<link>https://www.mdpi.com/2673-8732/6/2/22</link>
	<description>Antivirus (AV) solutions remain a core defence mechanism against malicious software. However, many of these engines struggle to detect metamorphic malware, which continually alters its internal form in unpredictable ways. To address this limitation, we present an adversarially oriented approach that automatically generates novel malicious variants of existing malware that evade detection by a substantial proportion of AV systems, thereby providing material for strengthening defensive techniques. In this work, an Evolutionary Algorithm (EA) is used to evolve undetectable variants, guided by three fitness criteria: the evasiveness of the produced samples, and their behavioural and structural similarity to the original malware. The proposed method is assessed across three malware families to evaluate the effectiveness of the EA-generated variants. Results indicate that the EA produces diverse mutant variants capable of evading up to 94% of AV detectors for a given malware family, significantly surpassing the evasion rate of the original malware. Furthermore, we evaluated whether the mutants produced by the EA could enhance the training of machine learning models. In this context, a pretrained Natural Language Processing (NLP) transformer was employed within a transfer learning framework to improve the classification of metamorphic malware. When the evolved variants were incorporated into the training data, the approach achieved classification accuracies of up to 93%. These results highlight the value of using diverse EA-generated samples to strengthen malware classifiers, thereby improving the robustness of security systems against evolving threats.</description>
	<pubDate>2026-04-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 22: Mitigating Metamorphic Malware Through Adversarial Learning Techniques</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/2/22">doi: 10.3390/network6020022</a></p>
	<p>Authors:
		Kehinde O. Babaagba
		Zhiyuan Tan
		</p>
	<p>Antivirus (AV) solutions remain a core defence mechanism against malicious software. However, many of these engines struggle to detect metamorphic malware, which continually alters its internal form in unpredictable ways. To address this limitation, we present an adversarially oriented approach that automatically generates novel malicious variants of existing malware that evade detection by a substantial proportion of AV systems, thereby providing material for strengthening defensive techniques. In this work, an Evolutionary Algorithm (EA) is used to evolve undetectable variants, guided by three fitness criteria: the evasiveness of the produced samples, and their behavioural and structural similarity to the original malware. The proposed method is assessed across three malware families to evaluate the effectiveness of the EA-generated variants. Results indicate that the EA produces diverse mutant variants capable of evading up to 94% of AV detectors for a given malware family, significantly surpassing the evasion rate of the original malware. Furthermore, we evaluated whether the mutants produced by the EA could enhance the training of machine learning models. In this context, a pretrained Natural Language Processing (NLP) transformer was employed within a transfer learning framework to improve the classification of metamorphic malware. When the evolved variants were incorporated into the training data, the approach achieved classification accuracies of up to 93%. These results highlight the value of using diverse EA-generated samples to strengthen malware classifiers, thereby improving the robustness of security systems against evolving threats.</p>
	]]></content:encoded>

	<dc:title>Mitigating Metamorphic Malware Through Adversarial Learning Techniques</dc:title>
			<dc:creator>Kehinde O. Babaagba</dc:creator>
			<dc:creator>Zhiyuan Tan</dc:creator>
		<dc:identifier>doi: 10.3390/network6020022</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-04-08</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-04-08</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>22</prism:startingPage>
		<prism:doi>10.3390/network6020022</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/2/22</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/2/21">

	<title>Network, Vol. 6, Pages 21: Efficient Serial Systolic Polynomial Multiplier for Lattice-Based Post-Quantum Cryptographic Schemes in IoT Edge Node</title>
	<link>https://www.mdpi.com/2673-8732/6/2/21</link>
	<description>The rapid development of the Internet of Things (IoT) is transforming various economic and industrial sectors by embedding interconnected devices within their operational processes. However, security and privacy risks associated with these interconnected devices pose significant barriers to widespread adoption, particularly in light of potential quantum threats. To mitigate these challenges, it is imperative to employ post-quantum cryptographic schemes. However, essential constraints on IoT edge nodes complicate the effective implementation of such schemes. Among the most promising approaches in post-quantum cryptography are lattice-based schemes, which rely heavily on polynomial multiplication operations at their core. Improving the implementation of polynomial multiplication will significantly enhance the performance of these schemes. Therefore, this paper proposes an efficent low-complexity serial systolic array optimized for polynomial multiplication, particularly tailored for the Binary Ring Learning With Errors (BRLWE) scheme. Designed for cryptographic processors targeting capable IoT edge nodes, the proposed architecture demonstrates remarkable performance improvements, achieving a maximum operating frequency of 280 MHz for a field size of 256, while requiring only 8232 lookup tables (LUTs) and 2616 flip-flops (FFs). These results reflect a 16.8% reduction in LUT usage and a 19% reduction in FFs compared to the nearest competing designs, all while maintaining high throughput and low area utilization. This work significantly advances the establishment of secure and efficient infrastructure for IoT systems, bolstering their resilience against post-quantum attacks and supporting the growth of a robust digital economy. Furthermore, it aligns with sustainable development goals 8 and 9 by fostering trust and facilitating the adoption of cutting-edge IoT technologies, ultimately promoting more resilient and innovative economic activities.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 21: Efficient Serial Systolic Polynomial Multiplier for Lattice-Based Post-Quantum Cryptographic Schemes in IoT Edge Node</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/2/21">doi: 10.3390/network6020021</a></p>
	<p>Authors:
		Atef Ibrahim
		Fayez Gebali
		</p>
	<p>The rapid development of the Internet of Things (IoT) is transforming various economic and industrial sectors by embedding interconnected devices within their operational processes. However, security and privacy risks associated with these interconnected devices pose significant barriers to widespread adoption, particularly in light of potential quantum threats. To mitigate these challenges, it is imperative to employ post-quantum cryptographic schemes. However, essential constraints on IoT edge nodes complicate the effective implementation of such schemes. Among the most promising approaches in post-quantum cryptography are lattice-based schemes, which rely heavily on polynomial multiplication operations at their core. Improving the implementation of polynomial multiplication will significantly enhance the performance of these schemes. Therefore, this paper proposes an efficent low-complexity serial systolic array optimized for polynomial multiplication, particularly tailored for the Binary Ring Learning With Errors (BRLWE) scheme. Designed for cryptographic processors targeting capable IoT edge nodes, the proposed architecture demonstrates remarkable performance improvements, achieving a maximum operating frequency of 280 MHz for a field size of 256, while requiring only 8232 lookup tables (LUTs) and 2616 flip-flops (FFs). These results reflect a 16.8% reduction in LUT usage and a 19% reduction in FFs compared to the nearest competing designs, all while maintaining high throughput and low area utilization. This work significantly advances the establishment of secure and efficient infrastructure for IoT systems, bolstering their resilience against post-quantum attacks and supporting the growth of a robust digital economy. Furthermore, it aligns with sustainable development goals 8 and 9 by fostering trust and facilitating the adoption of cutting-edge IoT technologies, ultimately promoting more resilient and innovative economic activities.</p>
	]]></content:encoded>

	<dc:title>Efficient Serial Systolic Polynomial Multiplier for Lattice-Based Post-Quantum Cryptographic Schemes in IoT Edge Node</dc:title>
			<dc:creator>Atef Ibrahim</dc:creator>
			<dc:creator>Fayez Gebali</dc:creator>
		<dc:identifier>doi: 10.3390/network6020021</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>21</prism:startingPage>
		<prism:doi>10.3390/network6020021</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/2/21</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/2/20">

	<title>Network, Vol. 6, Pages 20: Techno-Economic and SLA-Aware Control of 5G Cloud-RAN via Multi-Objective and Penalty-Constrained Reinforcement Learning</title>
	<link>https://www.mdpi.com/2673-8732/6/2/20</link>
	<description>Fifth-generation (5G) mobile networks must simultaneously satisfy stringent latency targets, high user density, and energy-aware operation across heterogeneous services. Cloud Radio Access Networks (C-RAN) provide architectural flexibility through centralized baseband processing, but they also introduce new control challenges related to fronthaul constraints, dynamic traffic variations, and joint radio&amp;amp;ndash;compute coordination with Mobile Edge Computing (MEC). This paper proposes a unified AI-driven optimization framework for adaptive 5G C-RAN management, where the controller dynamically tunes key system decisions&amp;amp;mdash;including functional split selection, TDD downlink ratio, user&amp;amp;ndash;RU association, fronthaul load management, and MEC offloading proportion. To enable fair benchmarking under identical simulation settings, a static baseline policy is compared against five adaptive control strategies: Deep Q-Network (DQN), Proximal Policy Optimization (PPO), Deep Deterministic Policy Gradient (DDPG), Multi-Objective Reinforcement Learning (MORL), and a Deterministic Service-Level Agreement (SLA)-aware controller Penalty-Constrained Hierarchical Action Controller (PCHAC). Performance evaluation across techno-economic and service KPIs shows that intelligent control significantly improves operational profit, tail-latency behavior, and energy efficiency while enhancing SLA compliance compared with non-adaptive operation. The results highlight the practicality of multi-objective and constraint-aware learning for next-generation C-RAN orchestration under scaling traffic demand.</description>
	<pubDate>2026-03-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 20: Techno-Economic and SLA-Aware Control of 5G Cloud-RAN via Multi-Objective and Penalty-Constrained Reinforcement Learning</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/2/20">doi: 10.3390/network6020020</a></p>
	<p>Authors:
		Sherif M. Aboul
		Hala M. Abd El Kader
		Esraa M. Eid
		Shimaa S. Ali
		</p>
	<p>Fifth-generation (5G) mobile networks must simultaneously satisfy stringent latency targets, high user density, and energy-aware operation across heterogeneous services. Cloud Radio Access Networks (C-RAN) provide architectural flexibility through centralized baseband processing, but they also introduce new control challenges related to fronthaul constraints, dynamic traffic variations, and joint radio&amp;amp;ndash;compute coordination with Mobile Edge Computing (MEC). This paper proposes a unified AI-driven optimization framework for adaptive 5G C-RAN management, where the controller dynamically tunes key system decisions&amp;amp;mdash;including functional split selection, TDD downlink ratio, user&amp;amp;ndash;RU association, fronthaul load management, and MEC offloading proportion. To enable fair benchmarking under identical simulation settings, a static baseline policy is compared against five adaptive control strategies: Deep Q-Network (DQN), Proximal Policy Optimization (PPO), Deep Deterministic Policy Gradient (DDPG), Multi-Objective Reinforcement Learning (MORL), and a Deterministic Service-Level Agreement (SLA)-aware controller Penalty-Constrained Hierarchical Action Controller (PCHAC). Performance evaluation across techno-economic and service KPIs shows that intelligent control significantly improves operational profit, tail-latency behavior, and energy efficiency while enhancing SLA compliance compared with non-adaptive operation. The results highlight the practicality of multi-objective and constraint-aware learning for next-generation C-RAN orchestration under scaling traffic demand.</p>
	]]></content:encoded>

	<dc:title>Techno-Economic and SLA-Aware Control of 5G Cloud-RAN via Multi-Objective and Penalty-Constrained Reinforcement Learning</dc:title>
			<dc:creator>Sherif M. Aboul</dc:creator>
			<dc:creator>Hala M. Abd El Kader</dc:creator>
			<dc:creator>Esraa M. Eid</dc:creator>
			<dc:creator>Shimaa S. Ali</dc:creator>
		<dc:identifier>doi: 10.3390/network6020020</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-03-31</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-03-31</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>20</prism:startingPage>
		<prism:doi>10.3390/network6020020</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/2/20</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/2/19">

	<title>Network, Vol. 6, Pages 19: An Intelligent Framework for Crowdsource-Based Spectrum Misuse Detection in Shared-Spectrum Networks</title>
	<link>https://www.mdpi.com/2673-8732/6/2/19</link>
	<description>Dynamic Spectrum Access (DSA) has emerged as a viable solution to address spectrum scarcity in shared-spectrum networks. In response, the FCC established the Citizens Broadband Radio Service (CBRS) to manage and facilitate shared use of the federal and non-federal spectrum in a three-tiered access and authorization framework. However, due to the open nature of spectrum access and the usually limited coverage of the monitoring infrastructure, enforcing access rights in a shared-spectrum network becomes a daunting challenge. In this paper, we stipulate the use of crowdsourcing as a viable approach to engaging volunteers in spectrum monitoring in order to enforce spectrum access rights robustly and reliably. The success of this approach, however, hinges strongly on ensuring that spectrum access enforcement is carried out by reliable and trustworthy volunteers within the monitored area. To this end, a hybrid spectrum monitoring framework is proposed, which relies on opportunistically recruiting volunteers to augment the otherwise limited infrastructure of trusted devices. Although a volunteer&amp;amp;rsquo;s participation has the potential to enhance monitoring significantly, their mobility may become problematic in ensuring reliable coverage of the monitored spectrum area. To ensure continued monitoring, inspite of volunteer mobility, deep learning-based models are used to predict the likelihood that a volunteer will be available within the monitoring area. Three models, namely LSTM, GRU, and Transformer, are explored to assess their feasibility and viability to predict a volunteer&amp;amp;rsquo;s availability likelihood over an extended time interval, in a given spectrum monitoring area. Recurrent Neural Networks (RNNs) such as GRU and LSTM are effective for tasks involving sequential data, where both spatial and temporal patterns matter, which is the focus of volunteer availability prediction in spectrum monitoring. Transformers, on the other hand, excel at handling long range dependencies and contextual understanding. Furthermore, their parallel processing capabilities allows faster training and inference compared to RNN-based models like GRU and LSTM. A simulation-based study is developed to assess the performance of these models, and carry out a comparative analysis of their ability to predict volunteers&amp;amp;rsquo; availability to monitor the spectrum reliably. To this end, a real-world trace dataset of volunteers&amp;amp;rsquo; location, collected over five years, is used. The simulation results show that the three models achieve high prediction accuracy of volunteers&amp;amp;rsquo; availability, ranging from 0.82 to 0.92. The results also show that a GRU-based model outperforms LSTM and Transformer-based models, in terms of accuracy, Root Mean Square Error (RMSE), geodesic distance, and execution time.</description>
	<pubDate>2026-03-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 19: An Intelligent Framework for Crowdsource-Based Spectrum Misuse Detection in Shared-Spectrum Networks</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/2/19">doi: 10.3390/network6020019</a></p>
	<p>Authors:
		Debarun Das
		Taieb Znati
		</p>
	<p>Dynamic Spectrum Access (DSA) has emerged as a viable solution to address spectrum scarcity in shared-spectrum networks. In response, the FCC established the Citizens Broadband Radio Service (CBRS) to manage and facilitate shared use of the federal and non-federal spectrum in a three-tiered access and authorization framework. However, due to the open nature of spectrum access and the usually limited coverage of the monitoring infrastructure, enforcing access rights in a shared-spectrum network becomes a daunting challenge. In this paper, we stipulate the use of crowdsourcing as a viable approach to engaging volunteers in spectrum monitoring in order to enforce spectrum access rights robustly and reliably. The success of this approach, however, hinges strongly on ensuring that spectrum access enforcement is carried out by reliable and trustworthy volunteers within the monitored area. To this end, a hybrid spectrum monitoring framework is proposed, which relies on opportunistically recruiting volunteers to augment the otherwise limited infrastructure of trusted devices. Although a volunteer&amp;amp;rsquo;s participation has the potential to enhance monitoring significantly, their mobility may become problematic in ensuring reliable coverage of the monitored spectrum area. To ensure continued monitoring, inspite of volunteer mobility, deep learning-based models are used to predict the likelihood that a volunteer will be available within the monitoring area. Three models, namely LSTM, GRU, and Transformer, are explored to assess their feasibility and viability to predict a volunteer&amp;amp;rsquo;s availability likelihood over an extended time interval, in a given spectrum monitoring area. Recurrent Neural Networks (RNNs) such as GRU and LSTM are effective for tasks involving sequential data, where both spatial and temporal patterns matter, which is the focus of volunteer availability prediction in spectrum monitoring. Transformers, on the other hand, excel at handling long range dependencies and contextual understanding. Furthermore, their parallel processing capabilities allows faster training and inference compared to RNN-based models like GRU and LSTM. A simulation-based study is developed to assess the performance of these models, and carry out a comparative analysis of their ability to predict volunteers&amp;amp;rsquo; availability to monitor the spectrum reliably. To this end, a real-world trace dataset of volunteers&amp;amp;rsquo; location, collected over five years, is used. The simulation results show that the three models achieve high prediction accuracy of volunteers&amp;amp;rsquo; availability, ranging from 0.82 to 0.92. The results also show that a GRU-based model outperforms LSTM and Transformer-based models, in terms of accuracy, Root Mean Square Error (RMSE), geodesic distance, and execution time.</p>
	]]></content:encoded>

	<dc:title>An Intelligent Framework for Crowdsource-Based Spectrum Misuse Detection in Shared-Spectrum Networks</dc:title>
			<dc:creator>Debarun Das</dc:creator>
			<dc:creator>Taieb Znati</dc:creator>
		<dc:identifier>doi: 10.3390/network6020019</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-03-26</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-03-26</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>19</prism:startingPage>
		<prism:doi>10.3390/network6020019</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/2/19</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/1/18">

	<title>Network, Vol. 6, Pages 18: TAFL-UWSN: A Trust-Aware Federated Learning Framework for Securing Underwater Sensor Networks</title>
	<link>https://www.mdpi.com/2673-8732/6/1/18</link>
	<description>Underwater Acoustic Sensor Networks (UASNs) are pivotal for environmental monitoring, surveillance, and marine data collection. However, their open and largely unattended operational settings, constrained communication capabilities, limited energy resources, and susceptibility to insider attacks make it difficult to achieve safe, secure, and efficient collaborative learning. Federated learning (FL) offers a privacy-preserving method for decentralized model training but is inherently vulnerable to Byzantine threats and malicious participants. This paper proposes trust-aware FL for underwater sensor networks (TAFL-UWSN), a trust-aware FL framework designed to improve security, reliability, and energy efficiency in UASNs by incorporating trust evaluation directly into the FL process. The goal is to mitigate the impact of adversarial nodes while maintaining model performance in low-resource underwater environments. TAFL-UWSN integrates continuous trust scoring based on packet forwarding reliability, sensing consistency, and model deviation. Trust scores are used to weight or filter model updates both at the node level and the edge layer, where Autonomous Underwater Vehicles (AUVs) act as mobile aggregators. A trust-aware federated averaging algorithm is implemented, and extensive simulations are conducted in a custom Python-based environment, comparing TAFL-UWSN to standard FedAvg and Byzantine-resilient FL approaches under various attack conditions. TAFL-UWSN achieved a model accuracy exceeding 92% with up to 30% malicious nodes while maintaining a false positive rate below 5.5%. Communication overhead was reduced by 28%, and energy usage per node dropped by 33% compared to baseline methods. The TAFL-UWSN framework demonstrates that integrating trust into FL enables secure, efficient, and resilient underwater intelligence, validating its potential for broader application in distributed, resource-constrained environments.</description>
	<pubDate>2026-03-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 18: TAFL-UWSN: A Trust-Aware Federated Learning Framework for Securing Underwater Sensor Networks</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/1/18">doi: 10.3390/network6010018</a></p>
	<p>Authors:
		Raja Waseem Anwar
		Mohammad Abrar
		Abdu Salam
		Faizan Ullah
		</p>
	<p>Underwater Acoustic Sensor Networks (UASNs) are pivotal for environmental monitoring, surveillance, and marine data collection. However, their open and largely unattended operational settings, constrained communication capabilities, limited energy resources, and susceptibility to insider attacks make it difficult to achieve safe, secure, and efficient collaborative learning. Federated learning (FL) offers a privacy-preserving method for decentralized model training but is inherently vulnerable to Byzantine threats and malicious participants. This paper proposes trust-aware FL for underwater sensor networks (TAFL-UWSN), a trust-aware FL framework designed to improve security, reliability, and energy efficiency in UASNs by incorporating trust evaluation directly into the FL process. The goal is to mitigate the impact of adversarial nodes while maintaining model performance in low-resource underwater environments. TAFL-UWSN integrates continuous trust scoring based on packet forwarding reliability, sensing consistency, and model deviation. Trust scores are used to weight or filter model updates both at the node level and the edge layer, where Autonomous Underwater Vehicles (AUVs) act as mobile aggregators. A trust-aware federated averaging algorithm is implemented, and extensive simulations are conducted in a custom Python-based environment, comparing TAFL-UWSN to standard FedAvg and Byzantine-resilient FL approaches under various attack conditions. TAFL-UWSN achieved a model accuracy exceeding 92% with up to 30% malicious nodes while maintaining a false positive rate below 5.5%. Communication overhead was reduced by 28%, and energy usage per node dropped by 33% compared to baseline methods. The TAFL-UWSN framework demonstrates that integrating trust into FL enables secure, efficient, and resilient underwater intelligence, validating its potential for broader application in distributed, resource-constrained environments.</p>
	]]></content:encoded>

	<dc:title>TAFL-UWSN: A Trust-Aware Federated Learning Framework for Securing Underwater Sensor Networks</dc:title>
			<dc:creator>Raja Waseem Anwar</dc:creator>
			<dc:creator>Mohammad Abrar</dc:creator>
			<dc:creator>Abdu Salam</dc:creator>
			<dc:creator>Faizan Ullah</dc:creator>
		<dc:identifier>doi: 10.3390/network6010018</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-03-19</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-03-19</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>18</prism:startingPage>
		<prism:doi>10.3390/network6010018</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/1/18</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/1/17">

	<title>Network, Vol. 6, Pages 17: Green Scheduling and Task Offloading in Edge Computing: A Systematic Review</title>
	<link>https://www.mdpi.com/2673-8732/6/1/17</link>
	<description>This paper presents a Systematic Literature Review (SLR) on green scheduling and task offloading strategies for energy optimization in edge computing environments. The evolution of low-latency, high-performance applications has driven the widespread adoption of distributed computing paradigms such as Edge Computing, Fog-Cloud architectures, and the Internet of Things (IoT). In this context, Mobile Edge Computing (MEC) is often combined with Unmanned Aerial Vehicles (UAVs) to extend computational capabilities to areas with limited infrastructure, bringing processing closer to the data source to reduce latency and improve scalability. Nevertheless, these systems encounter substantial energy-related challenges, particularly in battery-powered or resource-constrained environments. To address these concerns, green computing strategies&amp;amp;mdash;especially energy-efficient scheduling and task offloading&amp;amp;mdash;have emerged as promising approaches to optimize energy usage in edge environments. Green scheduling optimizes task allocation to minimize energy consumption, whereas offloading redistributes workloads from resource-constrained devices to edge or cloud servers. Increasingly, these techniques are enhanced through artificial intelligence (AI) and machine learning (ML), enabling adaptive and context-aware decision-making in dynamic environments. This paper conducts a systematic literature review (SLR) to synthesize the most widely adopted strategies for energy-efficient scheduling and task offloading in edge computing, highlighting their impact on sustainability and performance. The analysis provides a comprehensive view of the state of the art, examines how architectural contexts influence energy-aware decisions, and highlights the role of AI/ML in enabling intelligent and sustainable edge systems. The findings reveal current research gaps and outline future directions to advance the development of robust, scalable, and environmentally responsible computing infrastructures.</description>
	<pubDate>2026-03-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 17: Green Scheduling and Task Offloading in Edge Computing: A Systematic Review</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/1/17">doi: 10.3390/network6010017</a></p>
	<p>Authors:
		Adriana Rangel Ribeiro
		Ana Clara Santos Andrade
		Gabriel Leal dos Santos
		Guilherme Dinarte Marcondes Lopes
		Edvard Martins de Oliveira
		Adler Diniz de Souza
		Jeremias Barbosa Machado
		</p>
	<p>This paper presents a Systematic Literature Review (SLR) on green scheduling and task offloading strategies for energy optimization in edge computing environments. The evolution of low-latency, high-performance applications has driven the widespread adoption of distributed computing paradigms such as Edge Computing, Fog-Cloud architectures, and the Internet of Things (IoT). In this context, Mobile Edge Computing (MEC) is often combined with Unmanned Aerial Vehicles (UAVs) to extend computational capabilities to areas with limited infrastructure, bringing processing closer to the data source to reduce latency and improve scalability. Nevertheless, these systems encounter substantial energy-related challenges, particularly in battery-powered or resource-constrained environments. To address these concerns, green computing strategies&amp;amp;mdash;especially energy-efficient scheduling and task offloading&amp;amp;mdash;have emerged as promising approaches to optimize energy usage in edge environments. Green scheduling optimizes task allocation to minimize energy consumption, whereas offloading redistributes workloads from resource-constrained devices to edge or cloud servers. Increasingly, these techniques are enhanced through artificial intelligence (AI) and machine learning (ML), enabling adaptive and context-aware decision-making in dynamic environments. This paper conducts a systematic literature review (SLR) to synthesize the most widely adopted strategies for energy-efficient scheduling and task offloading in edge computing, highlighting their impact on sustainability and performance. The analysis provides a comprehensive view of the state of the art, examines how architectural contexts influence energy-aware decisions, and highlights the role of AI/ML in enabling intelligent and sustainable edge systems. The findings reveal current research gaps and outline future directions to advance the development of robust, scalable, and environmentally responsible computing infrastructures.</p>
	]]></content:encoded>

	<dc:title>Green Scheduling and Task Offloading in Edge Computing: A Systematic Review</dc:title>
			<dc:creator>Adriana Rangel Ribeiro</dc:creator>
			<dc:creator>Ana Clara Santos Andrade</dc:creator>
			<dc:creator>Gabriel Leal dos Santos</dc:creator>
			<dc:creator>Guilherme Dinarte Marcondes Lopes</dc:creator>
			<dc:creator>Edvard Martins de Oliveira</dc:creator>
			<dc:creator>Adler Diniz de Souza</dc:creator>
			<dc:creator>Jeremias Barbosa Machado</dc:creator>
		<dc:identifier>doi: 10.3390/network6010017</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-03-16</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-03-16</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>17</prism:startingPage>
		<prism:doi>10.3390/network6010017</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/1/17</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/1/16">

	<title>Network, Vol. 6, Pages 16: Accuracy of Fiber Propagation Evaluation Using Phenomenological Attenuation and Raman Scattering Models in Multiband Optical Networks</title>
	<link>https://www.mdpi.com/2673-8732/6/1/16</link>
	<description>The constant growth of IP data traffic, driven by sustained annual increases surpassing 26%, is pushing current optical transport infrastructures towards their capacity limits. Since the deployment of new fiber cables is economically demanding, ultra-wideband transmission is emerging as a promising cost-effective solution, enabled by multi-band amplifiers and transceivers spanning the entire low-loss window of standard single-mode fibers. In this scenario, an accurate modeling of the frequency-dependent fiber parameters is essential to reliably model optical signal propagation. In particular, the combined impact of attenuation variations with frequency and inter-channel stimulated Raman scattering (SRS) fundamentally shapes the power evolution of wide wavelength division multiplexing (WDM) combs and directly affects nonlinear interference (NLI) generation, as well as the amount of ASE noise. In this work, we review a set of analytical approximations, based on phenomenological approaches, for frequency-dependent attenuation and Raman scattering gain, and analyze their impact on achieving an effective balance between computational efficiency and physical fidelity. Through extensive analyses performed with the open-source software GNPy (version 2.12, Telecom Infra Project) on an optical line system exploring multi-band scenarios spanning C+L+S, C+L+E, and U-to-E transmission, we demonstrate that the proposed approximations reproduce the reference SRS power evolution and NLI profiles with root mean square errors (RMSEs) consistently below 0.03 dB, and down to the 10&amp;amp;minus;3&amp;amp;ndash;10&amp;amp;minus;2 dB range for the most accurate configurations. Although the current implementation does not yet provide a direct reduction in computational time, the proposed framework lays the groundwork for future developments toward closed-form or semi-analytical solutions, enabling more efficient modeling and optimization of ultra-wideband optical transmission.</description>
	<pubDate>2026-03-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 16: Accuracy of Fiber Propagation Evaluation Using Phenomenological Attenuation and Raman Scattering Models in Multiband Optical Networks</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/1/16">doi: 10.3390/network6010016</a></p>
	<p>Authors:
		Giuseppina Maria Rizzi
		Vittorio Curri
		</p>
	<p>The constant growth of IP data traffic, driven by sustained annual increases surpassing 26%, is pushing current optical transport infrastructures towards their capacity limits. Since the deployment of new fiber cables is economically demanding, ultra-wideband transmission is emerging as a promising cost-effective solution, enabled by multi-band amplifiers and transceivers spanning the entire low-loss window of standard single-mode fibers. In this scenario, an accurate modeling of the frequency-dependent fiber parameters is essential to reliably model optical signal propagation. In particular, the combined impact of attenuation variations with frequency and inter-channel stimulated Raman scattering (SRS) fundamentally shapes the power evolution of wide wavelength division multiplexing (WDM) combs and directly affects nonlinear interference (NLI) generation, as well as the amount of ASE noise. In this work, we review a set of analytical approximations, based on phenomenological approaches, for frequency-dependent attenuation and Raman scattering gain, and analyze their impact on achieving an effective balance between computational efficiency and physical fidelity. Through extensive analyses performed with the open-source software GNPy (version 2.12, Telecom Infra Project) on an optical line system exploring multi-band scenarios spanning C+L+S, C+L+E, and U-to-E transmission, we demonstrate that the proposed approximations reproduce the reference SRS power evolution and NLI profiles with root mean square errors (RMSEs) consistently below 0.03 dB, and down to the 10&amp;amp;minus;3&amp;amp;ndash;10&amp;amp;minus;2 dB range for the most accurate configurations. Although the current implementation does not yet provide a direct reduction in computational time, the proposed framework lays the groundwork for future developments toward closed-form or semi-analytical solutions, enabling more efficient modeling and optimization of ultra-wideband optical transmission.</p>
	]]></content:encoded>

	<dc:title>Accuracy of Fiber Propagation Evaluation Using Phenomenological Attenuation and Raman Scattering Models in Multiband Optical Networks</dc:title>
			<dc:creator>Giuseppina Maria Rizzi</dc:creator>
			<dc:creator>Vittorio Curri</dc:creator>
		<dc:identifier>doi: 10.3390/network6010016</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-03-12</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-03-12</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>16</prism:startingPage>
		<prism:doi>10.3390/network6010016</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/1/16</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/1/15">

	<title>Network, Vol. 6, Pages 15: Investigation of Underground Communication Quality Using Distributed Antenna Systems Considering Radio-Frequency Signal Propagation Characteristics in Almaty Metro Tunnels</title>
	<link>https://www.mdpi.com/2673-8732/6/1/15</link>
	<description>This study investigates radio-frequency signal propagation in underground metro tunnels with a focus on distributed antenna system (DAS) deployment. Deterministic simulations were performed using Altair WinProp 2024.1 (ProMan) with a 3D ray-tracing engine (GO + UTD) at 2.4 GHz in a reinforced concrete tunnel model of 900 m length. Two antenna configurations (B3: 8 dBi directional; B8: 5 dBi wide-beam) were evaluated under identical geometric and material conditions. Results show that path loss varies from 42 to 65 dB over 850 m, with estimated attenuation exponents lower than free-space values due to quasi-waveguide effects. The B3 configuration provides higher near-field received power (up to &amp;amp;minus;7.5 dBm) but exhibits stronger attenuation over long distances. In contrast, the B8 configuration ensures a more uniform spatial power distribution and a reduced path-loss growth rate beyond 500 m. The findings confirm that antenna radiation pattern significantly influences underground communication performance and demonstrate the engineering suitability of distributed antenna systems for stable metro tunnel coverage.</description>
	<pubDate>2026-03-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 15: Investigation of Underground Communication Quality Using Distributed Antenna Systems Considering Radio-Frequency Signal Propagation Characteristics in Almaty Metro Tunnels</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/1/15">doi: 10.3390/network6010015</a></p>
	<p>Authors:
		Askar Abdykadyrov
		Moldir Kuatova
		Nurzhigit Smailov
		Zhandos Dosbayev
		Sunggat Marxuly
		Maxat Mamadiyarov
		Ainur Kuttybayeva
		Nurlan Kystaubayev
		Amirkhan Bekmurza
		</p>
	<p>This study investigates radio-frequency signal propagation in underground metro tunnels with a focus on distributed antenna system (DAS) deployment. Deterministic simulations were performed using Altair WinProp 2024.1 (ProMan) with a 3D ray-tracing engine (GO + UTD) at 2.4 GHz in a reinforced concrete tunnel model of 900 m length. Two antenna configurations (B3: 8 dBi directional; B8: 5 dBi wide-beam) were evaluated under identical geometric and material conditions. Results show that path loss varies from 42 to 65 dB over 850 m, with estimated attenuation exponents lower than free-space values due to quasi-waveguide effects. The B3 configuration provides higher near-field received power (up to &amp;amp;minus;7.5 dBm) but exhibits stronger attenuation over long distances. In contrast, the B8 configuration ensures a more uniform spatial power distribution and a reduced path-loss growth rate beyond 500 m. The findings confirm that antenna radiation pattern significantly influences underground communication performance and demonstrate the engineering suitability of distributed antenna systems for stable metro tunnel coverage.</p>
	]]></content:encoded>

	<dc:title>Investigation of Underground Communication Quality Using Distributed Antenna Systems Considering Radio-Frequency Signal Propagation Characteristics in Almaty Metro Tunnels</dc:title>
			<dc:creator>Askar Abdykadyrov</dc:creator>
			<dc:creator>Moldir Kuatova</dc:creator>
			<dc:creator>Nurzhigit Smailov</dc:creator>
			<dc:creator>Zhandos Dosbayev</dc:creator>
			<dc:creator>Sunggat Marxuly</dc:creator>
			<dc:creator>Maxat Mamadiyarov</dc:creator>
			<dc:creator>Ainur Kuttybayeva</dc:creator>
			<dc:creator>Nurlan Kystaubayev</dc:creator>
			<dc:creator>Amirkhan Bekmurza</dc:creator>
		<dc:identifier>doi: 10.3390/network6010015</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-03-10</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-03-10</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>15</prism:startingPage>
		<prism:doi>10.3390/network6010015</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/1/15</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/1/14">

	<title>Network, Vol. 6, Pages 14: Experimental Study of Alien Crosstalk Limits in Densely Bundled Commodity 10GBASE-T Ethernet Cables</title>
	<link>https://www.mdpi.com/2673-8732/6/1/14</link>
	<description>In the realm of high-speed Ethernet networks, alien crosstalk (AXT) significantly undermines the integrity and efficiency of data transmission. While existing works mostly focus on modeling and physical-layer mitigation techniques such as PAM16/DSQ128 modulation and LDPC coding, there is a lack of experimental evidence on how severe AXT affects commodity 10GBASE-T equipment in realistic, densely cabled installations. In this study, we assemble and evaluate the experimental testbed that emulates a highly adverse AXT environment by tightly bundling up to seven 60 m twisted-pair Ethernet cables and using only off-the-shelf 10GBASE-T network cards. We quantitatively characterize how increasing cable density leads to automatic speed downgrades, connection failures, and non-linear saturation of the aggregate throughput, and relate these effects to the observed link quality on individual ports. Our results demonstrate that, even in the presence of standard crosstalk mitigation and error-correction mechanisms, severe AXT can force commodity 10GBASE-T links to fall back from 10 Gbit/s to 1 Gbit/s or below. Based on these findings, we derive practical guidelines for dense-cabling deployments and identify key requirements for experimental testbeds that can more reliably quantify AXT severity and its impact on commodity 10GBASE-T link stability (rate fallback and link loss) under realistic conditions.</description>
	<pubDate>2026-03-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 14: Experimental Study of Alien Crosstalk Limits in Densely Bundled Commodity 10GBASE-T Ethernet Cables</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/1/14">doi: 10.3390/network6010014</a></p>
	<p>Authors:
		Aleksei Demin
		Viktoriia Vasileva
		Dmitrii Chaikovskii
		</p>
	<p>In the realm of high-speed Ethernet networks, alien crosstalk (AXT) significantly undermines the integrity and efficiency of data transmission. While existing works mostly focus on modeling and physical-layer mitigation techniques such as PAM16/DSQ128 modulation and LDPC coding, there is a lack of experimental evidence on how severe AXT affects commodity 10GBASE-T equipment in realistic, densely cabled installations. In this study, we assemble and evaluate the experimental testbed that emulates a highly adverse AXT environment by tightly bundling up to seven 60 m twisted-pair Ethernet cables and using only off-the-shelf 10GBASE-T network cards. We quantitatively characterize how increasing cable density leads to automatic speed downgrades, connection failures, and non-linear saturation of the aggregate throughput, and relate these effects to the observed link quality on individual ports. Our results demonstrate that, even in the presence of standard crosstalk mitigation and error-correction mechanisms, severe AXT can force commodity 10GBASE-T links to fall back from 10 Gbit/s to 1 Gbit/s or below. Based on these findings, we derive practical guidelines for dense-cabling deployments and identify key requirements for experimental testbeds that can more reliably quantify AXT severity and its impact on commodity 10GBASE-T link stability (rate fallback and link loss) under realistic conditions.</p>
	]]></content:encoded>

	<dc:title>Experimental Study of Alien Crosstalk Limits in Densely Bundled Commodity 10GBASE-T Ethernet Cables</dc:title>
			<dc:creator>Aleksei Demin</dc:creator>
			<dc:creator>Viktoriia Vasileva</dc:creator>
			<dc:creator>Dmitrii Chaikovskii</dc:creator>
		<dc:identifier>doi: 10.3390/network6010014</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-03-09</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-03-09</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>14</prism:startingPage>
		<prism:doi>10.3390/network6010014</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/1/14</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/1/13">

	<title>Network, Vol. 6, Pages 13: Forecasting-Aware Digital Twin Calibration for Reliable Multi-Horizon Traffic Prediction</title>
	<link>https://www.mdpi.com/2673-8732/6/1/13</link>
	<description>Digital twin systems are becoming an important tool in intelligent transportation management, as they provide simulation-based environments for monitoring, analyzing, and predicting traffic behavior. However, the predictive performance of traffic digital twins is often limited by the quality and temporal consistency of sensor-level data generated from microscopic simulations. Most current calibration methods focus mainly on matching macroscopic traffic indicators, such as vehicle count and speed, without explicitly addressing the requirements of multi-horizon forecasting. This creates a gap between achieving realistic simulations and building reliable predictive models. This research proposes a forecasting-aware digital traffic twin framework that integrates microscopic SUMO simulation, controlled sensor-level observation modeling through geometric misalignment and noise injection, behavioral calibration, and deep temporal forecasting within a unified end-to-end structure. Unlike traditional calibration approaches, the proposed Genetic Algorithm (GA) reformulates calibration as a multi-step predictive optimization task. Simulation parameters are optimized by minimizing forecasting error produced by a lightweight proxy sequence model embedded within the calibration loop. In this way, calibration moves beyond simple statistical matching and instead emphasizes temporal learnability and forecasting stability, enabling the digital twin to generate traffic patterns more suitable for long-term prediction. Based on the calibrated traffic time series, both convolutional and recurrent deep learning models are evaluated under single-step and multi-step forecasting scenarios. To further examine generalizability, external validation is performed using the real-world PEMS-BAY dataset. The experimental findings demonstrate that forecasting-aware calibration reduces macroscopic traffic signal errors by around 50% for vehicle count and around 40% for average speed, improves temporal stability, and significantly enhances forecasting accuracy across both short-term and long-term horizons.</description>
	<pubDate>2026-03-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 13: Forecasting-Aware Digital Twin Calibration for Reliable Multi-Horizon Traffic Prediction</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/1/13">doi: 10.3390/network6010013</a></p>
	<p>Authors:
		Zeyad AlJundi
		Taqwa A. Alhaj
		Fatin A. Elhaj
		Inshirah Idris
		Tasneem Darwish
		</p>
	<p>Digital twin systems are becoming an important tool in intelligent transportation management, as they provide simulation-based environments for monitoring, analyzing, and predicting traffic behavior. However, the predictive performance of traffic digital twins is often limited by the quality and temporal consistency of sensor-level data generated from microscopic simulations. Most current calibration methods focus mainly on matching macroscopic traffic indicators, such as vehicle count and speed, without explicitly addressing the requirements of multi-horizon forecasting. This creates a gap between achieving realistic simulations and building reliable predictive models. This research proposes a forecasting-aware digital traffic twin framework that integrates microscopic SUMO simulation, controlled sensor-level observation modeling through geometric misalignment and noise injection, behavioral calibration, and deep temporal forecasting within a unified end-to-end structure. Unlike traditional calibration approaches, the proposed Genetic Algorithm (GA) reformulates calibration as a multi-step predictive optimization task. Simulation parameters are optimized by minimizing forecasting error produced by a lightweight proxy sequence model embedded within the calibration loop. In this way, calibration moves beyond simple statistical matching and instead emphasizes temporal learnability and forecasting stability, enabling the digital twin to generate traffic patterns more suitable for long-term prediction. Based on the calibrated traffic time series, both convolutional and recurrent deep learning models are evaluated under single-step and multi-step forecasting scenarios. To further examine generalizability, external validation is performed using the real-world PEMS-BAY dataset. The experimental findings demonstrate that forecasting-aware calibration reduces macroscopic traffic signal errors by around 50% for vehicle count and around 40% for average speed, improves temporal stability, and significantly enhances forecasting accuracy across both short-term and long-term horizons.</p>
	]]></content:encoded>

	<dc:title>Forecasting-Aware Digital Twin Calibration for Reliable Multi-Horizon Traffic Prediction</dc:title>
			<dc:creator>Zeyad AlJundi</dc:creator>
			<dc:creator>Taqwa A. Alhaj</dc:creator>
			<dc:creator>Fatin A. Elhaj</dc:creator>
			<dc:creator>Inshirah Idris</dc:creator>
			<dc:creator>Tasneem Darwish</dc:creator>
		<dc:identifier>doi: 10.3390/network6010013</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-03-06</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-03-06</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>13</prism:startingPage>
		<prism:doi>10.3390/network6010013</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/1/13</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/1/12">

	<title>Network, Vol. 6, Pages 12: Satellite Backhaul for Extending Connectivity in Rural Remote Areas: Deployment and Performance Assessment</title>
	<link>https://www.mdpi.com/2673-8732/6/1/12</link>
	<description>Limited terrestrial network coverage in rural and remote areas constitutes a significant barrier to the digital transformation of the agricultural sector. Smart and precision farming applications, ranging from conventional environmental monitoring systems to advanced Digital Twin solutions, rely on the reliable transmission of sensor data, images, and video streams from geographically isolated farms. Such data-intensive services cannot be effectively supported without a robust communication infrastructure. Non-Terrestrial Networks (NTNs), particularly satellite systems, offer both narrowband and broadband connectivity, enabling the transmission of low-rate sensor measurements, as well as high-throughput multimedia data from the field. This paper presents an experimental performance evaluation of two satellite backhauling solutions: a Geostationary Earth Orbit (GEO) system provided by SES and a Low Earth Orbit (LEO) system from Starlink. The networks were first deployed and tested in a laboratory environment and subsequently validated in an operational agricultural field setting. Their performance is benchmarked against a terrestrial cellular network to assess their suitability for supporting advanced agricultural applications. The performance assessment results indicate that both satellite backhauling solutions are reliable and capable of meeting the bandwidth and latency requirements of delay-tolerant agricultural applications. In addition to the technical evaluation, this work presents a cost&amp;amp;ndash;benefit analysis that further underscores the advantages of NTN-based solutions. Despite higher initial expenditures, they provide extended coverage in remote areas and enable cost sharing across multiple users, improving overall economic viability.</description>
	<pubDate>2026-02-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 12: Satellite Backhaul for Extending Connectivity in Rural Remote Areas: Deployment and Performance Assessment</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/1/12">doi: 10.3390/network6010012</a></p>
	<p>Authors:
		Souhaima Stiri
		Maria Rita Palattella
		Juan David Niebles Castano
		Christos Politis
		</p>
	<p>Limited terrestrial network coverage in rural and remote areas constitutes a significant barrier to the digital transformation of the agricultural sector. Smart and precision farming applications, ranging from conventional environmental monitoring systems to advanced Digital Twin solutions, rely on the reliable transmission of sensor data, images, and video streams from geographically isolated farms. Such data-intensive services cannot be effectively supported without a robust communication infrastructure. Non-Terrestrial Networks (NTNs), particularly satellite systems, offer both narrowband and broadband connectivity, enabling the transmission of low-rate sensor measurements, as well as high-throughput multimedia data from the field. This paper presents an experimental performance evaluation of two satellite backhauling solutions: a Geostationary Earth Orbit (GEO) system provided by SES and a Low Earth Orbit (LEO) system from Starlink. The networks were first deployed and tested in a laboratory environment and subsequently validated in an operational agricultural field setting. Their performance is benchmarked against a terrestrial cellular network to assess their suitability for supporting advanced agricultural applications. The performance assessment results indicate that both satellite backhauling solutions are reliable and capable of meeting the bandwidth and latency requirements of delay-tolerant agricultural applications. In addition to the technical evaluation, this work presents a cost&amp;amp;ndash;benefit analysis that further underscores the advantages of NTN-based solutions. Despite higher initial expenditures, they provide extended coverage in remote areas and enable cost sharing across multiple users, improving overall economic viability.</p>
	]]></content:encoded>

	<dc:title>Satellite Backhaul for Extending Connectivity in Rural Remote Areas: Deployment and Performance Assessment</dc:title>
			<dc:creator>Souhaima Stiri</dc:creator>
			<dc:creator>Maria Rita Palattella</dc:creator>
			<dc:creator>Juan David Niebles Castano</dc:creator>
			<dc:creator>Christos Politis</dc:creator>
		<dc:identifier>doi: 10.3390/network6010012</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-02-24</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-02-24</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>12</prism:startingPage>
		<prism:doi>10.3390/network6010012</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/1/12</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/1/11">

	<title>Network, Vol. 6, Pages 11: Beyond Attention: Hierarchical Mamba Models for Scalable Spatiotemporal Traffic Forecasting</title>
	<link>https://www.mdpi.com/2673-8732/6/1/11</link>
	<description>Traffic forecasting in cellular networks is a challenging spatiotemporal prediction problem due to strong temporal dependencies, spatial heterogeneity across cells, and the need for scalability to large network deployments. Traditional cell-specific models incur prohibitive training and maintenance costs, while global models often fail to capture heterogeneous spatial dynamics. Recent spatiotemporal architectures based on attention or graph neural networks improve accuracy but introduce high computational overhead, limiting their applicability in large-scale or real-time settings. We propose HiSTM (Hierarchical SpatioTemporal Mamba), a spatiotemporal forecasting architecture built on state-space modeling. HiSTM combines spatial convolutional encoding for local neighborhood interactions with Mamba-based temporal modeling to capture long-range dependencies, followed by attention-based temporal aggregation for prediction. The hierarchical design enables representation learning with linear computational complexity in sequence length and supports both grid-based and correlation-defined spatial structures. Cluster-aware extensions incorporate spatial regime information to handle heterogeneous traffic patterns. Experimental evaluation on large-scale real-world cellular datasets demonstrates that HiSTM achieves better accuracy, outperforming strong baselines. On the Milan dataset, HiSTM reduces MAE by 29.4% compared to STN, while achieving the lowest RMSE and highest R2 score among all evaluated models. In multi-step autoregressive forecasting, HiSTM maintains 36.8% lower MAE than STN and 11.3% lower than STTRE at the 6-step horizon, with a 58% slower error accumulation rate compared to STN. On the unseen Trentino dataset, HiSTM achieves 47.3% MAE reduction over STN and demonstrates better cross-dataset generalization. A single HiSTM model outperforms 10,000 independently trained cell-specific LSTMs, demonstrating the advantage of joint spatiotemporal learning. HiSTM maintains best-in-class performance with up to 30% missing data, outperforming all baselines under various missing data scenarios. The model achieves these results while being 45&amp;amp;times; smaller than PredRNNpp, 18&amp;amp;times; smaller than xLSTM, and maintaining competitive inference latency of 1.19 ms, showcasing its effectiveness for scalable 5/6G traffic prediction in resource-constrained environments.</description>
	<pubDate>2026-02-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 11: Beyond Attention: Hierarchical Mamba Models for Scalable Spatiotemporal Traffic Forecasting</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/1/11">doi: 10.3390/network6010011</a></p>
	<p>Authors:
		Zineddine Bettouche
		Khalid Ali
		Andreas Fischer
		Andreas Kassler
		</p>
	<p>Traffic forecasting in cellular networks is a challenging spatiotemporal prediction problem due to strong temporal dependencies, spatial heterogeneity across cells, and the need for scalability to large network deployments. Traditional cell-specific models incur prohibitive training and maintenance costs, while global models often fail to capture heterogeneous spatial dynamics. Recent spatiotemporal architectures based on attention or graph neural networks improve accuracy but introduce high computational overhead, limiting their applicability in large-scale or real-time settings. We propose HiSTM (Hierarchical SpatioTemporal Mamba), a spatiotemporal forecasting architecture built on state-space modeling. HiSTM combines spatial convolutional encoding for local neighborhood interactions with Mamba-based temporal modeling to capture long-range dependencies, followed by attention-based temporal aggregation for prediction. The hierarchical design enables representation learning with linear computational complexity in sequence length and supports both grid-based and correlation-defined spatial structures. Cluster-aware extensions incorporate spatial regime information to handle heterogeneous traffic patterns. Experimental evaluation on large-scale real-world cellular datasets demonstrates that HiSTM achieves better accuracy, outperforming strong baselines. On the Milan dataset, HiSTM reduces MAE by 29.4% compared to STN, while achieving the lowest RMSE and highest R2 score among all evaluated models. In multi-step autoregressive forecasting, HiSTM maintains 36.8% lower MAE than STN and 11.3% lower than STTRE at the 6-step horizon, with a 58% slower error accumulation rate compared to STN. On the unseen Trentino dataset, HiSTM achieves 47.3% MAE reduction over STN and demonstrates better cross-dataset generalization. A single HiSTM model outperforms 10,000 independently trained cell-specific LSTMs, demonstrating the advantage of joint spatiotemporal learning. HiSTM maintains best-in-class performance with up to 30% missing data, outperforming all baselines under various missing data scenarios. The model achieves these results while being 45&amp;amp;times; smaller than PredRNNpp, 18&amp;amp;times; smaller than xLSTM, and maintaining competitive inference latency of 1.19 ms, showcasing its effectiveness for scalable 5/6G traffic prediction in resource-constrained environments.</p>
	]]></content:encoded>

	<dc:title>Beyond Attention: Hierarchical Mamba Models for Scalable Spatiotemporal Traffic Forecasting</dc:title>
			<dc:creator>Zineddine Bettouche</dc:creator>
			<dc:creator>Khalid Ali</dc:creator>
			<dc:creator>Andreas Fischer</dc:creator>
			<dc:creator>Andreas Kassler</dc:creator>
		<dc:identifier>doi: 10.3390/network6010011</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-02-13</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-02-13</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>11</prism:startingPage>
		<prism:doi>10.3390/network6010011</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/1/11</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/1/10">

	<title>Network, Vol. 6, Pages 10: Round-Trip Time Estimation Using Enhanced Regularized Extreme Learning Machine</title>
	<link>https://www.mdpi.com/2673-8732/6/1/10</link>
	<description>Reliable Internet connectivity is essential for latency-sensitive services such as video conferencing, media streaming, and online gaming. Round-trip time (RTT) is a key indicator of network performance and is central to setting retransmission timeout (RTO); inaccurate RTT estimates may trigger unnecessary retransmissions or slow loss recovery. This paper proposes an Enhanced Regularized Extreme Learning Machine (RELM) for RTT estimation that improves generalization and efficiency by interleaving a bidirectional log-step heuristic to select the regularization constant C. Unlike manual tuning or fixed-range grid search, the proposed heuristic explores C on a logarithmic scale in both directions (&amp;amp;times;10 and /10) within a single loop and terminates using a tolerance&amp;amp;ndash;patience criterion, reducing redundant evaluations without requiring predefined bounds. A custom RTT dataset is generated using Mininet with a dumbbell topology under controlled delay injections (1&amp;amp;ndash;1000 ms), yielding 1000 supervised samples derived from 100,000 raw RTT measurements. Experiments follow a strict train/validation/test split (6:1:3) with training-only standardization/normalization and validation-only hyperparameter selection. On the controlled Mininet dataset, the best configuration (ReLU, 150 hidden neurons, C=102) achieves R2=0.9999, MAPE=0.0018, MAE=966.04, and RMSE=1589.64 on the test set, while maintaining millisecond-level runtime. Under the same evaluation pipeline, the proposed method demonstrates competitive performance compared to common regression baselines (SVR, GAM, Decision Tree, KNN, Random Forest, GBDT, and ELM), while maintaining lower computational overhead within the controlled simulation setting. To assess practical robustness, an additional evaluation on a public real-world WiFi RSS&amp;amp;ndash;RTT dataset shows near-meter accuracy in LOS and mixed LOS/NLOS scenarios, while performance degrades markedly under dominant NLOS conditions, reflecting physical-channel limitations rather than model instability. These results demonstrate the feasibility of the Enhanced RELM and motivate further validation on operational networks with packet loss, jitter, and path variability.</description>
	<pubDate>2026-01-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 10: Round-Trip Time Estimation Using Enhanced Regularized Extreme Learning Machine</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/1/10">doi: 10.3390/network6010010</a></p>
	<p>Authors:
		Hassan Rizky Putra Sailellah
		Hilal Hudan Nuha
		Aji Gautama Putrada
		</p>
	<p>Reliable Internet connectivity is essential for latency-sensitive services such as video conferencing, media streaming, and online gaming. Round-trip time (RTT) is a key indicator of network performance and is central to setting retransmission timeout (RTO); inaccurate RTT estimates may trigger unnecessary retransmissions or slow loss recovery. This paper proposes an Enhanced Regularized Extreme Learning Machine (RELM) for RTT estimation that improves generalization and efficiency by interleaving a bidirectional log-step heuristic to select the regularization constant C. Unlike manual tuning or fixed-range grid search, the proposed heuristic explores C on a logarithmic scale in both directions (&amp;amp;times;10 and /10) within a single loop and terminates using a tolerance&amp;amp;ndash;patience criterion, reducing redundant evaluations without requiring predefined bounds. A custom RTT dataset is generated using Mininet with a dumbbell topology under controlled delay injections (1&amp;amp;ndash;1000 ms), yielding 1000 supervised samples derived from 100,000 raw RTT measurements. Experiments follow a strict train/validation/test split (6:1:3) with training-only standardization/normalization and validation-only hyperparameter selection. On the controlled Mininet dataset, the best configuration (ReLU, 150 hidden neurons, C=102) achieves R2=0.9999, MAPE=0.0018, MAE=966.04, and RMSE=1589.64 on the test set, while maintaining millisecond-level runtime. Under the same evaluation pipeline, the proposed method demonstrates competitive performance compared to common regression baselines (SVR, GAM, Decision Tree, KNN, Random Forest, GBDT, and ELM), while maintaining lower computational overhead within the controlled simulation setting. To assess practical robustness, an additional evaluation on a public real-world WiFi RSS&amp;amp;ndash;RTT dataset shows near-meter accuracy in LOS and mixed LOS/NLOS scenarios, while performance degrades markedly under dominant NLOS conditions, reflecting physical-channel limitations rather than model instability. These results demonstrate the feasibility of the Enhanced RELM and motivate further validation on operational networks with packet loss, jitter, and path variability.</p>
	]]></content:encoded>

	<dc:title>Round-Trip Time Estimation Using Enhanced Regularized Extreme Learning Machine</dc:title>
			<dc:creator>Hassan Rizky Putra Sailellah</dc:creator>
			<dc:creator>Hilal Hudan Nuha</dc:creator>
			<dc:creator>Aji Gautama Putrada</dc:creator>
		<dc:identifier>doi: 10.3390/network6010010</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-01-29</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-01-29</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>10</prism:startingPage>
		<prism:doi>10.3390/network6010010</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/1/10</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/1/9">

	<title>Network, Vol. 6, Pages 9: Auditing Inferential Blind Spots: A Framework for Evaluating Forensic Coverage in Network Telemetry Architectures</title>
	<link>https://www.mdpi.com/2673-8732/6/1/9</link>
	<description>Network operators increasingly rely on abstracted telemetry (e.g., flow records and time-aggregated statistics) to achieve scalable monitoring of high-speed networks, but this abstraction fundamentally constrains the forensic and security inferences that can be supported from network data. We present a design-time audit framework that evaluates which threat hypotheses become non-supportable as network evidence is transformed from packet-level traces to flow records and time-aggregated statistics. Our methodology examines three evidence layers (L0: packet headers, L1: IP Flow Information Export (IPFIX) flow records, L2: time-aggregated flows), computes a catalog of 13 network-forensic artifacts (e.g., destination fan-out, inter-arrival time burstiness, SYN-dominant connection patterns) at each layer, and maps artifact availability to tactic support using literature-grounded associations with MITRE Adversarial Tactics, Techniques, and Common Knowledge (ATT&amp;amp;amp;CK). Applied to backbone traffic from the MAWI Day-In-The-Life (DITL) archive, the audit reveals selectiveinference loss: Execution becomes non-supportable at L1 (due to loss of packet-level timing artifacts), while Lateral Movement and Persistence become non-supportable at L2 (due to loss of entity-linked structural artifacts). Inference coverage decreases from 9 to 7 out of 9 evaluated ATT&amp;amp;amp;CK tactics, while coverage of defensive countermeasures (MITRE D3FEND) increases at L1 (7 &amp;amp;rarr; 8 technique categories) then decreases at L2 (8 &amp;amp;rarr; 7), reflecting a shift from behavioral monitoring to flow-based controls. The framework provides network architects with a practical tool for configuring telemetry systems (e.g., IPFIX exporters, P4 pipelines) to reason about and provision the minimum forensic coverage.</description>
	<pubDate>2026-01-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 9: Auditing Inferential Blind Spots: A Framework for Evaluating Forensic Coverage in Network Telemetry Architectures</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/1/9">doi: 10.3390/network6010009</a></p>
	<p>Authors:
		Mehrnoush Vaseghipanah
		Sam Jabbehdari
		Hamidreza Navidi
		</p>
	<p>Network operators increasingly rely on abstracted telemetry (e.g., flow records and time-aggregated statistics) to achieve scalable monitoring of high-speed networks, but this abstraction fundamentally constrains the forensic and security inferences that can be supported from network data. We present a design-time audit framework that evaluates which threat hypotheses become non-supportable as network evidence is transformed from packet-level traces to flow records and time-aggregated statistics. Our methodology examines three evidence layers (L0: packet headers, L1: IP Flow Information Export (IPFIX) flow records, L2: time-aggregated flows), computes a catalog of 13 network-forensic artifacts (e.g., destination fan-out, inter-arrival time burstiness, SYN-dominant connection patterns) at each layer, and maps artifact availability to tactic support using literature-grounded associations with MITRE Adversarial Tactics, Techniques, and Common Knowledge (ATT&amp;amp;amp;CK). Applied to backbone traffic from the MAWI Day-In-The-Life (DITL) archive, the audit reveals selectiveinference loss: Execution becomes non-supportable at L1 (due to loss of packet-level timing artifacts), while Lateral Movement and Persistence become non-supportable at L2 (due to loss of entity-linked structural artifacts). Inference coverage decreases from 9 to 7 out of 9 evaluated ATT&amp;amp;amp;CK tactics, while coverage of defensive countermeasures (MITRE D3FEND) increases at L1 (7 &amp;amp;rarr; 8 technique categories) then decreases at L2 (8 &amp;amp;rarr; 7), reflecting a shift from behavioral monitoring to flow-based controls. The framework provides network architects with a practical tool for configuring telemetry systems (e.g., IPFIX exporters, P4 pipelines) to reason about and provision the minimum forensic coverage.</p>
	]]></content:encoded>

	<dc:title>Auditing Inferential Blind Spots: A Framework for Evaluating Forensic Coverage in Network Telemetry Architectures</dc:title>
			<dc:creator>Mehrnoush Vaseghipanah</dc:creator>
			<dc:creator>Sam Jabbehdari</dc:creator>
			<dc:creator>Hamidreza Navidi</dc:creator>
		<dc:identifier>doi: 10.3390/network6010009</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-01-29</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-01-29</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>9</prism:startingPage>
		<prism:doi>10.3390/network6010009</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/1/9</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/1/8">

	<title>Network, Vol. 6, Pages 8: DOTSSA: Directed Acyclic Graph-Based Online Trajectory Simplification with Stay Areas</title>
	<link>https://www.mdpi.com/2673-8732/6/1/8</link>
	<description>Devices equipped with the Global Positioning System (GPS) generate massive volumes of trajectory data on a daily basis, imposing substantial computational, network, and storage burdens. Online trajectory simplification reduces redundant points in a streaming manner while preserving essential spatial and temporal characteristics. A representative method in this line of research is Directed acyclic graph-based Online Trajectory Simplification (DOTS). However, DOTS does not preserve stay-related information and can incur high computational cost. To address these limitations, we propose Directed acyclic graph-based Online Trajectory Simplification with Stay Areas (DOTSSA), a fast online simplification method that integrates DOTS with an online stay area detection algorithm (SA). In DOTSSA, SA continuously monitors movement patterns to detect stay areas and segments the incoming trajectory accordingly, after which DOTS is applied to the extracted segments. This approach ensures the preservation of stay areas while reducing computational overhead through localized DAG construction. Experimental evaluations on a real-world dataset show that, compared with DOTS, DOTSSA can reduce compression time, while achieving comparable compression ratios and preserving key trajectory features.</description>
	<pubDate>2026-01-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 8: DOTSSA: Directed Acyclic Graph-Based Online Trajectory Simplification with Stay Areas</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/1/8">doi: 10.3390/network6010008</a></p>
	<p>Authors:
		Masaharu Hirota
		</p>
	<p>Devices equipped with the Global Positioning System (GPS) generate massive volumes of trajectory data on a daily basis, imposing substantial computational, network, and storage burdens. Online trajectory simplification reduces redundant points in a streaming manner while preserving essential spatial and temporal characteristics. A representative method in this line of research is Directed acyclic graph-based Online Trajectory Simplification (DOTS). However, DOTS does not preserve stay-related information and can incur high computational cost. To address these limitations, we propose Directed acyclic graph-based Online Trajectory Simplification with Stay Areas (DOTSSA), a fast online simplification method that integrates DOTS with an online stay area detection algorithm (SA). In DOTSSA, SA continuously monitors movement patterns to detect stay areas and segments the incoming trajectory accordingly, after which DOTS is applied to the extracted segments. This approach ensures the preservation of stay areas while reducing computational overhead through localized DAG construction. Experimental evaluations on a real-world dataset show that, compared with DOTS, DOTSSA can reduce compression time, while achieving comparable compression ratios and preserving key trajectory features.</p>
	]]></content:encoded>

	<dc:title>DOTSSA: Directed Acyclic Graph-Based Online Trajectory Simplification with Stay Areas</dc:title>
			<dc:creator>Masaharu Hirota</dc:creator>
		<dc:identifier>doi: 10.3390/network6010008</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-01-29</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-01-29</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>8</prism:startingPage>
		<prism:doi>10.3390/network6010008</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/1/8</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/1/7">

	<title>Network, Vol. 6, Pages 7: FANET Routing Protocol for Prioritizing Data Transmission to the Ground Station</title>
	<link>https://www.mdpi.com/2673-8732/6/1/7</link>
	<description>In recent years, with the improvement of unmanned aerial vehicle (UAV) performance, various applications have been explored. In environments such as disaster areas, where existing infrastructure may be damaged, alternative uplink communication for transmitting observation data from UAVs to the ground station (GS) is critical. However, conventional mobile ad hoc network (MANET) routing protocols do not sufficiently account for GS-oriented traffic or the highly mobile UAV topology. This study proposed a flying ad hoc network (FANET) routing protocol that introduces a control option called GS flood, where the GS periodically disseminates routing information, enabling each UAV to efficiently acquire fresh source routes to the GS. Evaluation using NS-3 in a disaster scenario confirmed that the proposed method achieves a higher packet delivery ratio and practical latency compared to the representative MANET routing protocols, namely DSR, AODV, and OLSR, while operating with fewer control IP packets than existing methods. Furthermore, although the multihop throughput between UAVs and the GS in the proposed method plateaued at approximately 40% of the physical-layer maximum, it demonstrated performance exceeding realistic satellite uplink capacities ranging from several hundred kbps to several Mbps.</description>
	<pubDate>2026-01-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 7: FANET Routing Protocol for Prioritizing Data Transmission to the Ground Station</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/1/7">doi: 10.3390/network6010007</a></p>
	<p>Authors:
		Kaoru Takabatake
		Tomofumi Matsuzawa
		</p>
	<p>In recent years, with the improvement of unmanned aerial vehicle (UAV) performance, various applications have been explored. In environments such as disaster areas, where existing infrastructure may be damaged, alternative uplink communication for transmitting observation data from UAVs to the ground station (GS) is critical. However, conventional mobile ad hoc network (MANET) routing protocols do not sufficiently account for GS-oriented traffic or the highly mobile UAV topology. This study proposed a flying ad hoc network (FANET) routing protocol that introduces a control option called GS flood, where the GS periodically disseminates routing information, enabling each UAV to efficiently acquire fresh source routes to the GS. Evaluation using NS-3 in a disaster scenario confirmed that the proposed method achieves a higher packet delivery ratio and practical latency compared to the representative MANET routing protocols, namely DSR, AODV, and OLSR, while operating with fewer control IP packets than existing methods. Furthermore, although the multihop throughput between UAVs and the GS in the proposed method plateaued at approximately 40% of the physical-layer maximum, it demonstrated performance exceeding realistic satellite uplink capacities ranging from several hundred kbps to several Mbps.</p>
	]]></content:encoded>

	<dc:title>FANET Routing Protocol for Prioritizing Data Transmission to the Ground Station</dc:title>
			<dc:creator>Kaoru Takabatake</dc:creator>
			<dc:creator>Tomofumi Matsuzawa</dc:creator>
		<dc:identifier>doi: 10.3390/network6010007</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-01-14</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-01-14</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>7</prism:startingPage>
		<prism:doi>10.3390/network6010007</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/1/7</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/1/6">

	<title>Network, Vol. 6, Pages 6: Securing IoT Networks Using Machine Learning-Resistant Physical Unclonable Functions (PUFs) on Edge Devices</title>
	<link>https://www.mdpi.com/2673-8732/6/1/6</link>
	<description>The Internet of Things (IoT) has transformed global connectivity by linking people, smart devices, and data. However, as the number of connected devices continues to grow, ensuring secure data transmission and communication has become increasingly challenging. IoT security threats arise at the device level due to limited computing resources, mobility, and the large diversity of devices, as well as at the network level, where the use of varied protocols by different vendors introduces further vulnerabilities. Physical Unclonable Functions (PUFs) provide a lightweight, hardware-based security primitive that exploits inherent device-specific variations to ensure uniqueness, unpredictability, and enhanced protection of data and user privacy. Additionally, modeling attacks against PUF architectures is challenging due to the random and unpredictable physical variations inherent in their design, making it nearly impossible for attackers to accurately replicate their unique responses. This study collected approximately 80,000 Challenge Response Pairs (CRPs) from a Ring Oscillator (RO) PUF design to evaluate its resilience against modeling attacks. The predictive performance of five machine learning algorithms, i.e., Support Vector Machines, Logistic Regression, Artificial Neural Networks with a Multilayer Perceptron, K-Nearest Neighbors, and Gradient Boosting, was analyzed, and the results showed an average accuracy of approximately 60%, demonstrating the strong resistance of the RO PUF to these attacks. The NIST statistical test suite was applied to the CRP data of the RO PUF to evaluate its randomness quality. The p-values from the 15 statistical tests confirm that the CRP data exhibit true randomness, with most values exceeding the 0.01 threshold and supporting the null hypothesis of randomness.</description>
	<pubDate>2026-01-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 6: Securing IoT Networks Using Machine Learning-Resistant Physical Unclonable Functions (PUFs) on Edge Devices</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/1/6">doi: 10.3390/network6010006</a></p>
	<p>Authors:
		Abdul Manan Sheikh
		Md. Rafiqul Islam
		Mohamed Hadi Habaebi
		Suriza Ahmad Zabidi
		Athaur Rahman bin Najeeb
		Mazhar Baloch
		</p>
	<p>The Internet of Things (IoT) has transformed global connectivity by linking people, smart devices, and data. However, as the number of connected devices continues to grow, ensuring secure data transmission and communication has become increasingly challenging. IoT security threats arise at the device level due to limited computing resources, mobility, and the large diversity of devices, as well as at the network level, where the use of varied protocols by different vendors introduces further vulnerabilities. Physical Unclonable Functions (PUFs) provide a lightweight, hardware-based security primitive that exploits inherent device-specific variations to ensure uniqueness, unpredictability, and enhanced protection of data and user privacy. Additionally, modeling attacks against PUF architectures is challenging due to the random and unpredictable physical variations inherent in their design, making it nearly impossible for attackers to accurately replicate their unique responses. This study collected approximately 80,000 Challenge Response Pairs (CRPs) from a Ring Oscillator (RO) PUF design to evaluate its resilience against modeling attacks. The predictive performance of five machine learning algorithms, i.e., Support Vector Machines, Logistic Regression, Artificial Neural Networks with a Multilayer Perceptron, K-Nearest Neighbors, and Gradient Boosting, was analyzed, and the results showed an average accuracy of approximately 60%, demonstrating the strong resistance of the RO PUF to these attacks. The NIST statistical test suite was applied to the CRP data of the RO PUF to evaluate its randomness quality. The p-values from the 15 statistical tests confirm that the CRP data exhibit true randomness, with most values exceeding the 0.01 threshold and supporting the null hypothesis of randomness.</p>
	]]></content:encoded>

	<dc:title>Securing IoT Networks Using Machine Learning-Resistant Physical Unclonable Functions (PUFs) on Edge Devices</dc:title>
			<dc:creator>Abdul Manan Sheikh</dc:creator>
			<dc:creator>Md. Rafiqul Islam</dc:creator>
			<dc:creator>Mohamed Hadi Habaebi</dc:creator>
			<dc:creator>Suriza Ahmad Zabidi</dc:creator>
			<dc:creator>Athaur Rahman bin Najeeb</dc:creator>
			<dc:creator>Mazhar Baloch</dc:creator>
		<dc:identifier>doi: 10.3390/network6010006</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-01-12</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-01-12</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>6</prism:startingPage>
		<prism:doi>10.3390/network6010006</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/1/6</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/1/5">

	<title>Network, Vol. 6, Pages 5: Enhanced Wireless Sensor Network Lifetime Using EGWO-Optimized Neural Network Approach</title>
	<link>https://www.mdpi.com/2673-8732/6/1/5</link>
	<description>Efficient clustering is essential for reducing energy consumption and extending the operational lifetime of Wireless Sensor Networks. Classical protocols such as LEACH, PEGASIS, HEED, and EEHC frequently exhibit unbalanced energy usage, resulting in early node failures and reduced communication reliability. This study introduces an Enhanced Grey Wolf Optimization-based Neural Network (EGWO-NN) designed to adaptively select cluster heads by continuously optimizing decision parameters according to real-time network conditions. The proposed method is evaluated against four benchmark protocols using statistical comparisons of node survivability, transmission energy, and communication performance. Results show that EGWO-NN sustains significantly more alive nodes per round, with strong statistical differences compared with LEACH, PEGASIS, HEED, and EEHC (t = 18.27, 9.94, 18.91, 18.93; p &amp;amp;lt; 10&amp;amp;minus;22). Transmission energy analysis similarly indicates significant improvements across all pairwise tests (|t| = 4.12&amp;amp;ndash;46.34; p &amp;amp;lt; 10&amp;amp;minus;4), supported by an overall ANOVA result (F = 14.74, p = 1.42&amp;amp;times;10&amp;amp;minus;10). EGWO-NN also enhances data delivery, outperforming baseline protocols in both packets sent and Packet Delivery Ratio, with highly significant differences (t = 17.62&amp;amp;ndash;19.75 and 11.25&amp;amp;ndash;22.89). These findings demonstrate that EGWO-NN provides a robust and scalable approach for improving energy efficiency and communication reliability in WSNs.</description>
	<pubDate>2026-01-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 5: Enhanced Wireless Sensor Network Lifetime Using EGWO-Optimized Neural Network Approach</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/1/5">doi: 10.3390/network6010005</a></p>
	<p>Authors:
		Mohamad Nurkamal Fauzan
		Rendy Munadi
		Sony Sumaryo
		Hilal Hudan Nuha
		</p>
	<p>Efficient clustering is essential for reducing energy consumption and extending the operational lifetime of Wireless Sensor Networks. Classical protocols such as LEACH, PEGASIS, HEED, and EEHC frequently exhibit unbalanced energy usage, resulting in early node failures and reduced communication reliability. This study introduces an Enhanced Grey Wolf Optimization-based Neural Network (EGWO-NN) designed to adaptively select cluster heads by continuously optimizing decision parameters according to real-time network conditions. The proposed method is evaluated against four benchmark protocols using statistical comparisons of node survivability, transmission energy, and communication performance. Results show that EGWO-NN sustains significantly more alive nodes per round, with strong statistical differences compared with LEACH, PEGASIS, HEED, and EEHC (t = 18.27, 9.94, 18.91, 18.93; p &amp;amp;lt; 10&amp;amp;minus;22). Transmission energy analysis similarly indicates significant improvements across all pairwise tests (|t| = 4.12&amp;amp;ndash;46.34; p &amp;amp;lt; 10&amp;amp;minus;4), supported by an overall ANOVA result (F = 14.74, p = 1.42&amp;amp;times;10&amp;amp;minus;10). EGWO-NN also enhances data delivery, outperforming baseline protocols in both packets sent and Packet Delivery Ratio, with highly significant differences (t = 17.62&amp;amp;ndash;19.75 and 11.25&amp;amp;ndash;22.89). These findings demonstrate that EGWO-NN provides a robust and scalable approach for improving energy efficiency and communication reliability in WSNs.</p>
	]]></content:encoded>

	<dc:title>Enhanced Wireless Sensor Network Lifetime Using EGWO-Optimized Neural Network Approach</dc:title>
			<dc:creator>Mohamad Nurkamal Fauzan</dc:creator>
			<dc:creator>Rendy Munadi</dc:creator>
			<dc:creator>Sony Sumaryo</dc:creator>
			<dc:creator>Hilal Hudan Nuha</dc:creator>
		<dc:identifier>doi: 10.3390/network6010005</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2026-01-04</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2026-01-04</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>5</prism:startingPage>
		<prism:doi>10.3390/network6010005</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/1/5</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/1/4">

	<title>Network, Vol. 6, Pages 4: Evaluating AES-128 Segment Encryption in Live HTTP Streaming Under Content Tampering and Packet Loss</title>
	<link>https://www.mdpi.com/2673-8732/6/1/4</link>
	<description>One of the main sources of entertainment is live video streaming platforms, which allow viewers to watch video streams in real time. However, because of the increasing demand for high quality content, the vulnerability of streaming systems against cyberattacks highlights how crucial it is to implement strong security mechanisms without sacrificing performance. Therefore, the safeguard of video streams against cyberthreats such as content tampering and interception is a top priority while still maintaining robustness against network fluctuations. Two distinct scenarios are proposed to test AES-128 encryption in securing HTTP live streaming segments against content tampering and resilience to packet loss. Results show that AES-128 encryption provides confidentiality and successfully prevents meaningful manipulation of the video content, confirming its reliability as segment encryption does not significantly alter packet loss-induced playback behavior compared to unencrypted streaming under the tested conditions, Performance analysis shows that AES-128 has no significant difference in data loss for up to 4% of network packet loss compared to unencrypted segments.</description>
	<pubDate>2025-12-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 4: Evaluating AES-128 Segment Encryption in Live HTTP Streaming Under Content Tampering and Packet Loss</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/1/4">doi: 10.3390/network6010004</a></p>
	<p>Authors:
		Bzav Shorsh Sabir
		Aree Ali Mohammed
		</p>
	<p>One of the main sources of entertainment is live video streaming platforms, which allow viewers to watch video streams in real time. However, because of the increasing demand for high quality content, the vulnerability of streaming systems against cyberattacks highlights how crucial it is to implement strong security mechanisms without sacrificing performance. Therefore, the safeguard of video streams against cyberthreats such as content tampering and interception is a top priority while still maintaining robustness against network fluctuations. Two distinct scenarios are proposed to test AES-128 encryption in securing HTTP live streaming segments against content tampering and resilience to packet loss. Results show that AES-128 encryption provides confidentiality and successfully prevents meaningful manipulation of the video content, confirming its reliability as segment encryption does not significantly alter packet loss-induced playback behavior compared to unencrypted streaming under the tested conditions, Performance analysis shows that AES-128 has no significant difference in data loss for up to 4% of network packet loss compared to unencrypted segments.</p>
	]]></content:encoded>

	<dc:title>Evaluating AES-128 Segment Encryption in Live HTTP Streaming Under Content Tampering and Packet Loss</dc:title>
			<dc:creator>Bzav Shorsh Sabir</dc:creator>
			<dc:creator>Aree Ali Mohammed</dc:creator>
		<dc:identifier>doi: 10.3390/network6010004</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-12-31</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-12-31</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>4</prism:startingPage>
		<prism:doi>10.3390/network6010004</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/1/4</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/1/3">

	<title>Network, Vol. 6, Pages 3: Adaptive Real-Time Risk and Impact Assessment for 5G Network Security</title>
	<link>https://www.mdpi.com/2673-8732/6/1/3</link>
	<description>The expansion of 5G networks has led to larger attack surfaces due to more applications and use cases, more IoT connections, and the distributed 5G system architecture. Existing security frameworks often lack the ability to perform real-time, context-aware risk assessments that are specifically adapted to dynamic 5G environments. In this paper, we present an integrated framework that combines Snort intrusion detection with a risk and impact assessment model to evaluate threats in real time. By correlating intrusion alerts with contextual risk metrics tied to 5G core functions, the framework prioritizes incidents and supports timely mitigation. Evaluation in a controlled testbed shows the framework&amp;amp;rsquo;s stability, scalability, and effective risk classification, thereby strengthening cybersecurity for next-generation networks.</description>
	<pubDate>2025-12-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 3: Adaptive Real-Time Risk and Impact Assessment for 5G Network Security</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/1/3">doi: 10.3390/network6010003</a></p>
	<p>Authors:
		Dionysia Varvarigou
		Kostas Lampropoulos
		Spyros Denazis
		Paris Kitsos
		</p>
	<p>The expansion of 5G networks has led to larger attack surfaces due to more applications and use cases, more IoT connections, and the distributed 5G system architecture. Existing security frameworks often lack the ability to perform real-time, context-aware risk assessments that are specifically adapted to dynamic 5G environments. In this paper, we present an integrated framework that combines Snort intrusion detection with a risk and impact assessment model to evaluate threats in real time. By correlating intrusion alerts with contextual risk metrics tied to 5G core functions, the framework prioritizes incidents and supports timely mitigation. Evaluation in a controlled testbed shows the framework&amp;amp;rsquo;s stability, scalability, and effective risk classification, thereby strengthening cybersecurity for next-generation networks.</p>
	]]></content:encoded>

	<dc:title>Adaptive Real-Time Risk and Impact Assessment for 5G Network Security</dc:title>
			<dc:creator>Dionysia Varvarigou</dc:creator>
			<dc:creator>Kostas Lampropoulos</dc:creator>
			<dc:creator>Spyros Denazis</dc:creator>
			<dc:creator>Paris Kitsos</dc:creator>
		<dc:identifier>doi: 10.3390/network6010003</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-12-24</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-12-24</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>3</prism:startingPage>
		<prism:doi>10.3390/network6010003</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/1/3</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/1/2">

	<title>Network, Vol. 6, Pages 2: Multi-Level Multi-Technology Underwater Networks: Challenges and Opportunities for Marine Monitoring</title>
	<link>https://www.mdpi.com/2673-8732/6/1/2</link>
	<description>Underwater networks are crucial for monitoring the marine ecosystem, enabling data collection to support the preservation and protection of natural resources. Among the various technologies available, acoustic and optical communications stand out for their superior performance in underwater environments. Acoustic technologies are suitable for long-range communications, typically operating over hundreds of meters up to several kilometers, albeit with low data rates ranging from a few hundred bps to few tens of kbps. In contrast, optical technologies excel in providing high data rates, often between 1 and 10 Mbps, but only over short distances (e.g., 50 m) in controlled conditions. To leverage the strengths of these technologies, recent research has proposed multi-modal underwater systems; however, these solutions generally rely on single-level or at most dual-level architectures, limiting the benefits of a structured hierarchical approach. In this review paper, after discussing related work on multi-technology acoustic and optical networks, we highlight relevant design guidelines for multi-technology, multi-level underwater architectures, explicitly considering three layers: a deep acoustic layer, an intermediate optical layer, and an upper RF-enabled surface layer. For illustration, we also discuss a PoC of such a hierarchical architecture under development at the University of Catania, Italy, in the Area Marina Isole dei Ciclopi natural reserve. The PoC includes optical nodes capable of transmitting up to 10 Mbps over short ranges and acoustic nodes (both software defined and not) supporting rates of tens of kbps over hundreds of meters and being adaptive to network conditions, interconnected through hybrid multi-technology nodes deployed across the three network levels. By assigning specific technologies to appropriate layers, the architecture enhances scalability, robustness, and adaptability to dynamic underwater conditions. This design strategy not only improves data transmission efficiency but also ensures seamless operation across diverse marine scenarios, making it an effective solution for a wide range of underwater monitoring applications.</description>
	<pubDate>2025-12-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 2: Multi-Level Multi-Technology Underwater Networks: Challenges and Opportunities for Marine Monitoring</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/1/2">doi: 10.3390/network6010002</a></p>
	<p>Authors:
		A. Rehman
		L. Galluccio
		</p>
	<p>Underwater networks are crucial for monitoring the marine ecosystem, enabling data collection to support the preservation and protection of natural resources. Among the various technologies available, acoustic and optical communications stand out for their superior performance in underwater environments. Acoustic technologies are suitable for long-range communications, typically operating over hundreds of meters up to several kilometers, albeit with low data rates ranging from a few hundred bps to few tens of kbps. In contrast, optical technologies excel in providing high data rates, often between 1 and 10 Mbps, but only over short distances (e.g., 50 m) in controlled conditions. To leverage the strengths of these technologies, recent research has proposed multi-modal underwater systems; however, these solutions generally rely on single-level or at most dual-level architectures, limiting the benefits of a structured hierarchical approach. In this review paper, after discussing related work on multi-technology acoustic and optical networks, we highlight relevant design guidelines for multi-technology, multi-level underwater architectures, explicitly considering three layers: a deep acoustic layer, an intermediate optical layer, and an upper RF-enabled surface layer. For illustration, we also discuss a PoC of such a hierarchical architecture under development at the University of Catania, Italy, in the Area Marina Isole dei Ciclopi natural reserve. The PoC includes optical nodes capable of transmitting up to 10 Mbps over short ranges and acoustic nodes (both software defined and not) supporting rates of tens of kbps over hundreds of meters and being adaptive to network conditions, interconnected through hybrid multi-technology nodes deployed across the three network levels. By assigning specific technologies to appropriate layers, the architecture enhances scalability, robustness, and adaptability to dynamic underwater conditions. This design strategy not only improves data transmission efficiency but also ensures seamless operation across diverse marine scenarios, making it an effective solution for a wide range of underwater monitoring applications.</p>
	]]></content:encoded>

	<dc:title>Multi-Level Multi-Technology Underwater Networks: Challenges and Opportunities for Marine Monitoring</dc:title>
			<dc:creator>A. Rehman</dc:creator>
			<dc:creator>L. Galluccio</dc:creator>
		<dc:identifier>doi: 10.3390/network6010002</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-12-24</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-12-24</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>2</prism:startingPage>
		<prism:doi>10.3390/network6010002</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/1/2</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/6/1/1">

	<title>Network, Vol. 6, Pages 1: Authentication and Authorisation Method for a Cloud Side Static IoT Application</title>
	<link>https://www.mdpi.com/2673-8732/6/1/1</link>
	<description>IoT applications are increasingly common, yet they often rely on expensive, externally managed authentication services. This paper introduces a novel, self-contained authentication method for IoT applications which leverages fog computing principles to lower operational costs and infrastructure complexity. The proposed system, fogauth, combines device serial numbers with cryptographically generated UUIDs to establish secure identification without third-party services. A static cloud-side architecture coupled with a lightweight, locally hosted API enables secure authentication through object-storage operations. Performance testing demonstrates comparable security performance to commercial cloud-based authentication while reducing long-term operational costs and maintaining latency at below 2 minutes in production conditions. fogauth provides a scalable and economically viable alternative for companies seeking to reduce cloud dependency and minimize long-term costs associated with IoT application security. To support reproducibility, a complete open-source implementation and validation dataset are provided, allowing independent replication and extension of the system.</description>
	<pubDate>2025-12-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 6, Pages 1: Authentication and Authorisation Method for a Cloud Side Static IoT Application</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/6/1/1">doi: 10.3390/network6010001</a></p>
	<p>Authors:
		Jose Alvarez
		Matheus Santos
		David May
		Gerard Dooly
		</p>
	<p>IoT applications are increasingly common, yet they often rely on expensive, externally managed authentication services. This paper introduces a novel, self-contained authentication method for IoT applications which leverages fog computing principles to lower operational costs and infrastructure complexity. The proposed system, fogauth, combines device serial numbers with cryptographically generated UUIDs to establish secure identification without third-party services. A static cloud-side architecture coupled with a lightweight, locally hosted API enables secure authentication through object-storage operations. Performance testing demonstrates comparable security performance to commercial cloud-based authentication while reducing long-term operational costs and maintaining latency at below 2 minutes in production conditions. fogauth provides a scalable and economically viable alternative for companies seeking to reduce cloud dependency and minimize long-term costs associated with IoT application security. To support reproducibility, a complete open-source implementation and validation dataset are provided, allowing independent replication and extension of the system.</p>
	]]></content:encoded>

	<dc:title>Authentication and Authorisation Method for a Cloud Side Static IoT Application</dc:title>
			<dc:creator>Jose Alvarez</dc:creator>
			<dc:creator>Matheus Santos</dc:creator>
			<dc:creator>David May</dc:creator>
			<dc:creator>Gerard Dooly</dc:creator>
		<dc:identifier>doi: 10.3390/network6010001</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-12-19</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-12-19</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1</prism:startingPage>
		<prism:doi>10.3390/network6010001</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/6/1/1</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/4/54">

	<title>Network, Vol. 5, Pages 54: Dynamic Predictive Feedback Mechanism for Intelligent Bandwidth Control in Future SDN Networks</title>
	<link>https://www.mdpi.com/2673-8732/5/4/54</link>
	<description>Future programmable networks such as 5G/6G and large-scale IoT deployments demand dynamic and intelligent bandwidth control mechanisms to ensure stable Quality of Service (QoS) under highly variable traffic conditions. Conventional queue-based schedulers and emerging machine learning techniques still struggle with slow reaction to congestion, unstable fairness, and high computational costs. To address these challenges, this paper proposes a Dynamic Predictive Feedback (DPF) mechanism that integrates clustered-LSTM based short-term traffic prediction with meta-control driven adaptive bandwidth adjustment in a Software-Defined Networking (SDN) architecture. The prediction module proactively estimates future queue depth and arrival rates using in-band network telemetry (INT), while the feedback controller continuously adjusts scheduling weights based on congestion risk and fairness metrics. Extensive emulation experiments conducted under Static, Bursty IoT, Mixed, and Stress workloads show that DPF consistently outperforms state-of-the-art solutions, including A-WFQ and DRL-based schedulers, achieving up to 32% higher throughput, up to 40% lower latency, and 10&amp;amp;ndash;12% lower CPU and memory usage. Moreover, DPF demonstrates strong fairness (Jain&amp;amp;rsquo;s Index &amp;amp;ge; 0.96), high adaptability, and minimal performance variance across scenarios. These results confirm that DPF is a scalable and resource-efficient solution capable of supporting the demands of future programmable, 5G/6G-ready network infrastructures.</description>
	<pubDate>2025-12-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 54: Dynamic Predictive Feedback Mechanism for Intelligent Bandwidth Control in Future SDN Networks</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/4/54">doi: 10.3390/network5040054</a></p>
	<p>Authors:
		Kritsanapong Somsuk
		Suchart Khummanee
		Panida Songram
		</p>
	<p>Future programmable networks such as 5G/6G and large-scale IoT deployments demand dynamic and intelligent bandwidth control mechanisms to ensure stable Quality of Service (QoS) under highly variable traffic conditions. Conventional queue-based schedulers and emerging machine learning techniques still struggle with slow reaction to congestion, unstable fairness, and high computational costs. To address these challenges, this paper proposes a Dynamic Predictive Feedback (DPF) mechanism that integrates clustered-LSTM based short-term traffic prediction with meta-control driven adaptive bandwidth adjustment in a Software-Defined Networking (SDN) architecture. The prediction module proactively estimates future queue depth and arrival rates using in-band network telemetry (INT), while the feedback controller continuously adjusts scheduling weights based on congestion risk and fairness metrics. Extensive emulation experiments conducted under Static, Bursty IoT, Mixed, and Stress workloads show that DPF consistently outperforms state-of-the-art solutions, including A-WFQ and DRL-based schedulers, achieving up to 32% higher throughput, up to 40% lower latency, and 10&amp;amp;ndash;12% lower CPU and memory usage. Moreover, DPF demonstrates strong fairness (Jain&amp;amp;rsquo;s Index &amp;amp;ge; 0.96), high adaptability, and minimal performance variance across scenarios. These results confirm that DPF is a scalable and resource-efficient solution capable of supporting the demands of future programmable, 5G/6G-ready network infrastructures.</p>
	]]></content:encoded>

	<dc:title>Dynamic Predictive Feedback Mechanism for Intelligent Bandwidth Control in Future SDN Networks</dc:title>
			<dc:creator>Kritsanapong Somsuk</dc:creator>
			<dc:creator>Suchart Khummanee</dc:creator>
			<dc:creator>Panida Songram</dc:creator>
		<dc:identifier>doi: 10.3390/network5040054</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-12-12</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-12-12</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>54</prism:startingPage>
		<prism:doi>10.3390/network5040054</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/4/54</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/4/53">

	<title>Network, Vol. 5, Pages 53: Design and Performance Evaluation of HEPS Data Center Network</title>
	<link>https://www.mdpi.com/2673-8732/5/4/53</link>
	<description>Among the 15 beamlines in the first phase of the High-Energy Photon Source (HEPS) in China, the maximum peak data generation volume can reach 1 PB per day, with the maximum peak data generation rate reaching 3.2 Tb/s. This poses significant challenges to the underlying network system. To meet the storage, computing, and analysis needs of HEPS scientific data, this paper designed a high-performance and scalable network architecture based on RoCE (RDMA over Converged Ethernet). Test results demonstrate that the RoCE-based HEPS data center network system achieves high bandwidth and ultra-low latency, stably maintains reliable transmission performance during the interaction of scientific data storage, computing, and analysis, and exhibits excellent scalability to adapt to the future expansion of HEPS beamlines.</description>
	<pubDate>2025-12-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 53: Design and Performance Evaluation of HEPS Data Center Network</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/4/53">doi: 10.3390/network5040053</a></p>
	<p>Authors:
		Shan Zeng
		Tao Cui
		Yanming Wang
		Mengyao Qi
		Fazhi Qi
		</p>
	<p>Among the 15 beamlines in the first phase of the High-Energy Photon Source (HEPS) in China, the maximum peak data generation volume can reach 1 PB per day, with the maximum peak data generation rate reaching 3.2 Tb/s. This poses significant challenges to the underlying network system. To meet the storage, computing, and analysis needs of HEPS scientific data, this paper designed a high-performance and scalable network architecture based on RoCE (RDMA over Converged Ethernet). Test results demonstrate that the RoCE-based HEPS data center network system achieves high bandwidth and ultra-low latency, stably maintains reliable transmission performance during the interaction of scientific data storage, computing, and analysis, and exhibits excellent scalability to adapt to the future expansion of HEPS beamlines.</p>
	]]></content:encoded>

	<dc:title>Design and Performance Evaluation of HEPS Data Center Network</dc:title>
			<dc:creator>Shan Zeng</dc:creator>
			<dc:creator>Tao Cui</dc:creator>
			<dc:creator>Yanming Wang</dc:creator>
			<dc:creator>Mengyao Qi</dc:creator>
			<dc:creator>Fazhi Qi</dc:creator>
		<dc:identifier>doi: 10.3390/network5040053</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-12-05</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-12-05</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Communication</prism:section>
	<prism:startingPage>53</prism:startingPage>
		<prism:doi>10.3390/network5040053</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/4/53</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/4/52">

	<title>Network, Vol. 5, Pages 52: Dynamic Multi-Objective Controller Placement in SD-WAN: A GMM-MARL Hybrid Framework</title>
	<link>https://www.mdpi.com/2673-8732/5/4/52</link>
	<description>Modern Software-Defined Wide Area Networks (SD-WANs) require adaptive controller placement addressing multi-objective optimization where latency minimization, load balancing, and fault tolerance must be simultaneously optimized. Traditional static approaches fail under dynamic network conditions with evolving traffic patterns and topology changes. This paper presents a novel hybrid framework integrating Gaussian Mixture Model (GMM) clustering with Multi-Agent Reinforcement Learning (MARL) for dynamic controller placement. The approach leverages probabilistic clustering for intelligent MARL initialization, reducing exploration requirements. Centralized Training with Decentralized Execution (CTDE) enables distributed optimization through cooperative agents. Experimental evaluation using real-world topologies demonstrates a noticeable reduction in the latency, improvement in network balance, and significant computational efficiency versus existing methods. Dynamic adaptation experiments confirm superior scalability during network changes. The hybrid architecture achieves linear scalability through problem decomposition while maintaining real-time responsiveness, establishing practical viability.</description>
	<pubDate>2025-11-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 52: Dynamic Multi-Objective Controller Placement in SD-WAN: A GMM-MARL Hybrid Framework</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/4/52">doi: 10.3390/network5040052</a></p>
	<p>Authors:
		Abdulrahman M. Abdulghani
		Azizol Abdullah
		A. R. Rahiman
		Nor Asilah Wati Abdul Hamid
		Bilal Omar Akram
		</p>
	<p>Modern Software-Defined Wide Area Networks (SD-WANs) require adaptive controller placement addressing multi-objective optimization where latency minimization, load balancing, and fault tolerance must be simultaneously optimized. Traditional static approaches fail under dynamic network conditions with evolving traffic patterns and topology changes. This paper presents a novel hybrid framework integrating Gaussian Mixture Model (GMM) clustering with Multi-Agent Reinforcement Learning (MARL) for dynamic controller placement. The approach leverages probabilistic clustering for intelligent MARL initialization, reducing exploration requirements. Centralized Training with Decentralized Execution (CTDE) enables distributed optimization through cooperative agents. Experimental evaluation using real-world topologies demonstrates a noticeable reduction in the latency, improvement in network balance, and significant computational efficiency versus existing methods. Dynamic adaptation experiments confirm superior scalability during network changes. The hybrid architecture achieves linear scalability through problem decomposition while maintaining real-time responsiveness, establishing practical viability.</p>
	]]></content:encoded>

	<dc:title>Dynamic Multi-Objective Controller Placement in SD-WAN: A GMM-MARL Hybrid Framework</dc:title>
			<dc:creator>Abdulrahman M. Abdulghani</dc:creator>
			<dc:creator>Azizol Abdullah</dc:creator>
			<dc:creator>A. R. Rahiman</dc:creator>
			<dc:creator>Nor Asilah Wati Abdul Hamid</dc:creator>
			<dc:creator>Bilal Omar Akram</dc:creator>
		<dc:identifier>doi: 10.3390/network5040052</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-11-11</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-11-11</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>52</prism:startingPage>
		<prism:doi>10.3390/network5040052</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/4/52</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/4/51">

	<title>Network, Vol. 5, Pages 51: Alohomora: Workflow-Aware Authentication and Authorization in Heterogeneous Systems</title>
	<link>https://www.mdpi.com/2673-8732/5/4/51</link>
	<description>Current federated identity management systems lack contextual awareness of workflows across independent systems, creating security gaps and workflow integrity challenges. This article details the design and implementation of Alohomora, a distributed workflow-aware authentication system that maintains cross-system workflow context through path-bound tokens. Alohomora complements existing identity providers such as OAuth and SAML by adding workflow orchestration capabilities while leveraging standard authentication protocols for initial user verification. The system introduces workflow graphs as a formal model for representing dependencies between functions across heterogeneous systems and employs a distributed caching architecture with collaboration groups for scalable session management. In a typical deployment scenario, an employee onboarding workflow across human resources services, account provisioning, and benefits systems forms a trust group where Alohomora enforces ordered step execution, validates prerequisite completion at each transition, and generates cryptographic completion assertions upon workflow finalization. Extensive performance evaluation under concurrent user requests demonstrates polynomial performance characteristics with superior scalability compared to centralized OAuth introspection. The results show that Alohomora maintains high throughput under heavy load while providing strong, secure access control through workflow path binding and distributed trust orchestration. The prototype implementation is available as open source.</description>
	<pubDate>2025-11-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 51: Alohomora: Workflow-Aware Authentication and Authorization in Heterogeneous Systems</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/4/51">doi: 10.3390/network5040051</a></p>
	<p>Authors:
		Hussain M. J. Almohri
		</p>
	<p>Current federated identity management systems lack contextual awareness of workflows across independent systems, creating security gaps and workflow integrity challenges. This article details the design and implementation of Alohomora, a distributed workflow-aware authentication system that maintains cross-system workflow context through path-bound tokens. Alohomora complements existing identity providers such as OAuth and SAML by adding workflow orchestration capabilities while leveraging standard authentication protocols for initial user verification. The system introduces workflow graphs as a formal model for representing dependencies between functions across heterogeneous systems and employs a distributed caching architecture with collaboration groups for scalable session management. In a typical deployment scenario, an employee onboarding workflow across human resources services, account provisioning, and benefits systems forms a trust group where Alohomora enforces ordered step execution, validates prerequisite completion at each transition, and generates cryptographic completion assertions upon workflow finalization. Extensive performance evaluation under concurrent user requests demonstrates polynomial performance characteristics with superior scalability compared to centralized OAuth introspection. The results show that Alohomora maintains high throughput under heavy load while providing strong, secure access control through workflow path binding and distributed trust orchestration. The prototype implementation is available as open source.</p>
	]]></content:encoded>

	<dc:title>Alohomora: Workflow-Aware Authentication and Authorization in Heterogeneous Systems</dc:title>
			<dc:creator>Hussain M. J. Almohri</dc:creator>
		<dc:identifier>doi: 10.3390/network5040051</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-11-05</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-11-05</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>51</prism:startingPage>
		<prism:doi>10.3390/network5040051</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/4/51</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/4/50">

	<title>Network, Vol. 5, Pages 50: A Two-Phase Genetic Algorithm Approach for Sleep Scheduling, Routing, and Clustering in Heterogeneous Wireless Sensor Networks</title>
	<link>https://www.mdpi.com/2673-8732/5/4/50</link>
	<description>Heterogeneous wireless sensor networks (HWSNs), comprising super nodes and normal sensors, offer a promising solution for monitoring diverse environments. However, their deployment is constrained by the limited battery life of sensors. To address this issue, clustering and routing techniques have been employed to conserve energy. Nevertheless, existing approaches often struggle with suboptimal energy distribution and weak network coverage. Additionally, they mostly failed to exploit other energy saving techniques such as sleep scheduling. This paper proposes a novel genetic algorithm (GA)-based approach to optimize sleep scheduling, routing, and clustering in HWSNs. The method comprises two phases, namely join sleep scheduling and tree construction, and clustering of normal nodes. Inspired by the concept of unequal clustering, the HWSN is split into some rings in the first phase, and the number of awake super nodes in each ring keeps the same. This approach addresses the challenges of balancing energy consumption and network lifetime. Furthermore, including network coverage and energy-related criteria in the proposed GA yields long-lasting network operation. Through rigorous simulations, we demonstrate that, on average, our algorithm reduces energy consumption and improves network coverage by 23% and 21.9%, respectively, and extends network lifetime by 501 rounds, compared to the state-of-the-art methods.</description>
	<pubDate>2025-11-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 50: A Two-Phase Genetic Algorithm Approach for Sleep Scheduling, Routing, and Clustering in Heterogeneous Wireless Sensor Networks</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/4/50">doi: 10.3390/network5040050</a></p>
	<p>Authors:
		Sarah Abdulelah Abbas
		Leili Farzinvash
		Mina Zolfy
		</p>
	<p>Heterogeneous wireless sensor networks (HWSNs), comprising super nodes and normal sensors, offer a promising solution for monitoring diverse environments. However, their deployment is constrained by the limited battery life of sensors. To address this issue, clustering and routing techniques have been employed to conserve energy. Nevertheless, existing approaches often struggle with suboptimal energy distribution and weak network coverage. Additionally, they mostly failed to exploit other energy saving techniques such as sleep scheduling. This paper proposes a novel genetic algorithm (GA)-based approach to optimize sleep scheduling, routing, and clustering in HWSNs. The method comprises two phases, namely join sleep scheduling and tree construction, and clustering of normal nodes. Inspired by the concept of unequal clustering, the HWSN is split into some rings in the first phase, and the number of awake super nodes in each ring keeps the same. This approach addresses the challenges of balancing energy consumption and network lifetime. Furthermore, including network coverage and energy-related criteria in the proposed GA yields long-lasting network operation. Through rigorous simulations, we demonstrate that, on average, our algorithm reduces energy consumption and improves network coverage by 23% and 21.9%, respectively, and extends network lifetime by 501 rounds, compared to the state-of-the-art methods.</p>
	]]></content:encoded>

	<dc:title>A Two-Phase Genetic Algorithm Approach for Sleep Scheduling, Routing, and Clustering in Heterogeneous Wireless Sensor Networks</dc:title>
			<dc:creator>Sarah Abdulelah Abbas</dc:creator>
			<dc:creator>Leili Farzinvash</dc:creator>
			<dc:creator>Mina Zolfy</dc:creator>
		<dc:identifier>doi: 10.3390/network5040050</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-11-04</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-11-04</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>50</prism:startingPage>
		<prism:doi>10.3390/network5040050</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/4/50</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/4/49">

	<title>Network, Vol. 5, Pages 49: Real-Time Handover in LEO Satellite Networks via Markov Chain-Guided Simulated Annealing</title>
	<link>https://www.mdpi.com/2673-8732/5/4/49</link>
	<description>This paper presents a real-time handover and link assignment framework for low-Earth-orbit (LEO) satellite networks operating in dense urban canyons. The proposed Markov chain-guided simulated annealing (MCSA) algorithm optimizes user-to-satellite assignments under dynamic channel and capacity constraints. By incorporating Markov chains to guide state transitions, MCSA achieves faster convergence and more effective exploration than conventional simulated annealing. Simulations conducted in Ku-band urban canyon environments show that the framework achieves an average user satisfaction of about 97%, providing an approximately 10% improvement over genetic algorithm (GA) results. It also delivers 10&amp;amp;ndash;15% higher resource utilization, lower blocking rates comparable to integer linear programming (ILP), and superior runtime scalability with linear complexity O(k&amp;amp;middot;|U|&amp;amp;middot;|S|). These results confirm that MCSA provides a scalable and robust real-time mobility management solution for next-generation LEO satellite systems.</description>
	<pubDate>2025-11-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 49: Real-Time Handover in LEO Satellite Networks via Markov Chain-Guided Simulated Annealing</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/4/49">doi: 10.3390/network5040049</a></p>
	<p>Authors:
		Mohammad A. Massad
		Abdallah Y. Alma’aitah
		Hossam S. Hassanein
		</p>
	<p>This paper presents a real-time handover and link assignment framework for low-Earth-orbit (LEO) satellite networks operating in dense urban canyons. The proposed Markov chain-guided simulated annealing (MCSA) algorithm optimizes user-to-satellite assignments under dynamic channel and capacity constraints. By incorporating Markov chains to guide state transitions, MCSA achieves faster convergence and more effective exploration than conventional simulated annealing. Simulations conducted in Ku-band urban canyon environments show that the framework achieves an average user satisfaction of about 97%, providing an approximately 10% improvement over genetic algorithm (GA) results. It also delivers 10&amp;amp;ndash;15% higher resource utilization, lower blocking rates comparable to integer linear programming (ILP), and superior runtime scalability with linear complexity O(k&amp;amp;middot;|U|&amp;amp;middot;|S|). These results confirm that MCSA provides a scalable and robust real-time mobility management solution for next-generation LEO satellite systems.</p>
	]]></content:encoded>

	<dc:title>Real-Time Handover in LEO Satellite Networks via Markov Chain-Guided Simulated Annealing</dc:title>
			<dc:creator>Mohammad A. Massad</dc:creator>
			<dc:creator>Abdallah Y. Alma’aitah</dc:creator>
			<dc:creator>Hossam S. Hassanein</dc:creator>
		<dc:identifier>doi: 10.3390/network5040049</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-11-03</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-11-03</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>49</prism:startingPage>
		<prism:doi>10.3390/network5040049</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/4/49</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/4/48">

	<title>Network, Vol. 5, Pages 48: Intelligent Reflecting-Surface-Aided Orbital Angular Momentum Divergence-Alleviated Wireless Communication Mechanism</title>
	<link>https://www.mdpi.com/2673-8732/5/4/48</link>
	<description>Orbital angular momentum (OAM) beams exhibit divergence during transmission, which constrains the capacity of communication system channels. To address these challenges, intelligent reflecting surfaces (IRSs), which can independently manipulate incident electromagnetic waves by adjustment of their amplitude and phase, are employed to construct IRS-assisted OAM communication systems. By introducing additional information pathways, IRSs enhance diversity gain. We studied the simulations of two placement methods for an IRS: arbitrary placement and standard placement. In the case of arbitrary placement, the beam reflected by the IRS can be decomposed into different OAM modes, producing various reception powers corresponding to each OAM mode component. This improves the signal-to-noise ratio (SNR) at the receiver, thereby enhancing channel capacity. In particular, when the IRS is symmetrically and uniformly positioned at the center of the main transmission axis, its elements can be approximated as a uniform circular array (UCA). This configuration not only achieves optimal reception along the direction of the maximum gain of the orbital angular momentum beam but also reduces the antenna radius required at the receiver to half or even less.</description>
	<pubDate>2025-10-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 48: Intelligent Reflecting-Surface-Aided Orbital Angular Momentum Divergence-Alleviated Wireless Communication Mechanism</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/4/48">doi: 10.3390/network5040048</a></p>
	<p>Authors:
		Qiuli Wu
		Yufei Zhao
		Shicheng Li
		Yiqi Li
		Deyu Lin
		Xuefeng Jiang
		</p>
	<p>Orbital angular momentum (OAM) beams exhibit divergence during transmission, which constrains the capacity of communication system channels. To address these challenges, intelligent reflecting surfaces (IRSs), which can independently manipulate incident electromagnetic waves by adjustment of their amplitude and phase, are employed to construct IRS-assisted OAM communication systems. By introducing additional information pathways, IRSs enhance diversity gain. We studied the simulations of two placement methods for an IRS: arbitrary placement and standard placement. In the case of arbitrary placement, the beam reflected by the IRS can be decomposed into different OAM modes, producing various reception powers corresponding to each OAM mode component. This improves the signal-to-noise ratio (SNR) at the receiver, thereby enhancing channel capacity. In particular, when the IRS is symmetrically and uniformly positioned at the center of the main transmission axis, its elements can be approximated as a uniform circular array (UCA). This configuration not only achieves optimal reception along the direction of the maximum gain of the orbital angular momentum beam but also reduces the antenna radius required at the receiver to half or even less.</p>
	]]></content:encoded>

	<dc:title>Intelligent Reflecting-Surface-Aided Orbital Angular Momentum Divergence-Alleviated Wireless Communication Mechanism</dc:title>
			<dc:creator>Qiuli Wu</dc:creator>
			<dc:creator>Yufei Zhao</dc:creator>
			<dc:creator>Shicheng Li</dc:creator>
			<dc:creator>Yiqi Li</dc:creator>
			<dc:creator>Deyu Lin</dc:creator>
			<dc:creator>Xuefeng Jiang</dc:creator>
		<dc:identifier>doi: 10.3390/network5040048</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-10-30</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-10-30</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>48</prism:startingPage>
		<prism:doi>10.3390/network5040048</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/4/48</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/4/47">

	<title>Network, Vol. 5, Pages 47: Adaptive Context-Aware VANET Routing Protocol for Intelligent Transportation Systems</title>
	<link>https://www.mdpi.com/2673-8732/5/4/47</link>
	<description>Vehicular Ad-Hoc Networks (VANETs) play a critical role in Intelligent Transportation Systems (ITS), enabling communication between vehicles and roadside infrastructure. This paper proposes an Adaptive Context-Aware VANET Routing (ACAVR) protocol designed to handle the challenges of high mobility, dynamic topology, and variable vehicle density in urban environments. The proposed protocol integrates context-aware routing, dynamic clustering, and geographic forwarding to enhance performance under diverse traffic conditions. Simulation results demonstrate that ACAVR achieves higher throughput, improved packet delivery ratio, lower end-to-end delay, and reduced routing overhead compared to existing routing schemes. The proposed ACAVR outperforms benchmark protocols such as DyTE, RGoV, and CAEL, improving PDR by 12&amp;amp;ndash;18%, reducing delay by 10&amp;amp;ndash;15%, and increasing throughput by 15&amp;amp;ndash;22%.</description>
	<pubDate>2025-10-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 47: Adaptive Context-Aware VANET Routing Protocol for Intelligent Transportation Systems</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/4/47">doi: 10.3390/network5040047</a></p>
	<p>Authors:
		Abdul Karim Kazi
		Muhammad Umer Farooq
		Raheela Asif
		Saman Hina
		</p>
	<p>Vehicular Ad-Hoc Networks (VANETs) play a critical role in Intelligent Transportation Systems (ITS), enabling communication between vehicles and roadside infrastructure. This paper proposes an Adaptive Context-Aware VANET Routing (ACAVR) protocol designed to handle the challenges of high mobility, dynamic topology, and variable vehicle density in urban environments. The proposed protocol integrates context-aware routing, dynamic clustering, and geographic forwarding to enhance performance under diverse traffic conditions. Simulation results demonstrate that ACAVR achieves higher throughput, improved packet delivery ratio, lower end-to-end delay, and reduced routing overhead compared to existing routing schemes. The proposed ACAVR outperforms benchmark protocols such as DyTE, RGoV, and CAEL, improving PDR by 12&amp;amp;ndash;18%, reducing delay by 10&amp;amp;ndash;15%, and increasing throughput by 15&amp;amp;ndash;22%.</p>
	]]></content:encoded>

	<dc:title>Adaptive Context-Aware VANET Routing Protocol for Intelligent Transportation Systems</dc:title>
			<dc:creator>Abdul Karim Kazi</dc:creator>
			<dc:creator>Muhammad Umer Farooq</dc:creator>
			<dc:creator>Raheela Asif</dc:creator>
			<dc:creator>Saman Hina</dc:creator>
		<dc:identifier>doi: 10.3390/network5040047</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-10-27</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-10-27</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>47</prism:startingPage>
		<prism:doi>10.3390/network5040047</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/4/47</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/4/46">

	<title>Network, Vol. 5, Pages 46: A Game-Theoretic Analysis of Cooperation Among Autonomous Systems in Network Federations</title>
	<link>https://www.mdpi.com/2673-8732/5/4/46</link>
	<description>This paper investigates cooperative behavior among Autonomous Systems (ASs) within a federated network environment designed to support collaborative shared-technology deployment. It makes use of the concept of an AS federation, where independently managed systems adhere to a shared standard while maintaining implementation flexibility. Using a systematic game-theoretic framework, the study models various coalition structures&amp;amp;mdash;including full cooperation, partial coalitions, and defection&amp;amp;mdash;across several canonical cooperative games. The analysis evaluates the effects of different cooperation strategies and resource-sharing schemes on payoff distribution and coalition stability. Simulation results over short- and medium-to-long-term horizons demonstrate that cooperative coalition formation, especially with fair payoff allocation, consistently outperforms solitary strategies. The study also identifies key thresholds affecting partial coalition viability and explores the impact of defection on overall federation performance. By linking theoretical game models with practical deployment challenges in heterogeneous networked systems, this work offers valuable insights for designing mechanisms that promote effective cooperation in complex, resource-constrained environments.</description>
	<pubDate>2025-10-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 46: A Game-Theoretic Analysis of Cooperation Among Autonomous Systems in Network Federations</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/4/46">doi: 10.3390/network5040046</a></p>
	<p>Authors:
		Rudolf Kovacs
		Bogdan Iancu
		Vasile Dadarlat
		Adrian Peculea
		</p>
	<p>This paper investigates cooperative behavior among Autonomous Systems (ASs) within a federated network environment designed to support collaborative shared-technology deployment. It makes use of the concept of an AS federation, where independently managed systems adhere to a shared standard while maintaining implementation flexibility. Using a systematic game-theoretic framework, the study models various coalition structures&amp;amp;mdash;including full cooperation, partial coalitions, and defection&amp;amp;mdash;across several canonical cooperative games. The analysis evaluates the effects of different cooperation strategies and resource-sharing schemes on payoff distribution and coalition stability. Simulation results over short- and medium-to-long-term horizons demonstrate that cooperative coalition formation, especially with fair payoff allocation, consistently outperforms solitary strategies. The study also identifies key thresholds affecting partial coalition viability and explores the impact of defection on overall federation performance. By linking theoretical game models with practical deployment challenges in heterogeneous networked systems, this work offers valuable insights for designing mechanisms that promote effective cooperation in complex, resource-constrained environments.</p>
	]]></content:encoded>

	<dc:title>A Game-Theoretic Analysis of Cooperation Among Autonomous Systems in Network Federations</dc:title>
			<dc:creator>Rudolf Kovacs</dc:creator>
			<dc:creator>Bogdan Iancu</dc:creator>
			<dc:creator>Vasile Dadarlat</dc:creator>
			<dc:creator>Adrian Peculea</dc:creator>
		<dc:identifier>doi: 10.3390/network5040046</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-10-15</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-10-15</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>46</prism:startingPage>
		<prism:doi>10.3390/network5040046</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/4/46</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/4/45">

	<title>Network, Vol. 5, Pages 45: Contrastive Geometric Cross-Entropy: A Unified Explicit-Margin Loss for Classification in Network Automation</title>
	<link>https://www.mdpi.com/2673-8732/5/4/45</link>
	<description>As network automation and self-organizing networks (SONs) rapidly evolve, edge devices increasingly demand lightweight, real-time, and high-precision classification algorithms to support critical tasks such as traffic identification, intrusion detection, and fault diagnosis. In recent years, cross-entropy (CE) loss has been widely adopted in deep learning classification tasks due to its computational efficiency and ease of optimization. However, traditional CE methods primarily focus on class separability without explicitly constraining intra-class compactness and inter-class boundaries in the feature space, thereby limiting their generalization performance on complex classification tasks. To address this issue, we propose a novel classification loss framework&amp;amp;mdash;Contrastive Geometric Cross-Entropy (CGCE). Without incurring additional computational or memory overhead, CGCE explicitly introduces learnable class representation vectors and constructs the loss function based on the dot-product similarity between features and these class representations, thus explicitly reinforcing geometric constraints in the feature space. This mechanism effectively enhances intra-class compactness and inter-class separability. Theoretical analysis further demonstrates that minimizing the CGCE loss naturally induces clear and measurable geometric class boundaries in the feature space, a desirable property absent from traditional CE methods. Furthermore, CGCE can seamlessly incorporate the prior knowledge of pretrained models, converging rapidly within only a few training epochs (for example, on the CIFAR-10 dataset using the ViT model, a single training epoch is sufficient to reach 99% of the final training accuracy.) Experimental results on both text and image classification tasks show that CGCE achieves accuracy improvements of up to 2% over traditional CE methods, exhibiting stronger generalization capabilities under challenging scenarios such as class imbalance, few-shot learning, and noisy labels. These findings indicate that CGCE has significant potential as a superior alternative to traditional CE methods.</description>
	<pubDate>2025-10-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 45: Contrastive Geometric Cross-Entropy: A Unified Explicit-Margin Loss for Classification in Network Automation</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/4/45">doi: 10.3390/network5040045</a></p>
	<p>Authors:
		Yifan Wu
		Lei Xiao
		Xia Du
		</p>
	<p>As network automation and self-organizing networks (SONs) rapidly evolve, edge devices increasingly demand lightweight, real-time, and high-precision classification algorithms to support critical tasks such as traffic identification, intrusion detection, and fault diagnosis. In recent years, cross-entropy (CE) loss has been widely adopted in deep learning classification tasks due to its computational efficiency and ease of optimization. However, traditional CE methods primarily focus on class separability without explicitly constraining intra-class compactness and inter-class boundaries in the feature space, thereby limiting their generalization performance on complex classification tasks. To address this issue, we propose a novel classification loss framework&amp;amp;mdash;Contrastive Geometric Cross-Entropy (CGCE). Without incurring additional computational or memory overhead, CGCE explicitly introduces learnable class representation vectors and constructs the loss function based on the dot-product similarity between features and these class representations, thus explicitly reinforcing geometric constraints in the feature space. This mechanism effectively enhances intra-class compactness and inter-class separability. Theoretical analysis further demonstrates that minimizing the CGCE loss naturally induces clear and measurable geometric class boundaries in the feature space, a desirable property absent from traditional CE methods. Furthermore, CGCE can seamlessly incorporate the prior knowledge of pretrained models, converging rapidly within only a few training epochs (for example, on the CIFAR-10 dataset using the ViT model, a single training epoch is sufficient to reach 99% of the final training accuracy.) Experimental results on both text and image classification tasks show that CGCE achieves accuracy improvements of up to 2% over traditional CE methods, exhibiting stronger generalization capabilities under challenging scenarios such as class imbalance, few-shot learning, and noisy labels. These findings indicate that CGCE has significant potential as a superior alternative to traditional CE methods.</p>
	]]></content:encoded>

	<dc:title>Contrastive Geometric Cross-Entropy: A Unified Explicit-Margin Loss for Classification in Network Automation</dc:title>
			<dc:creator>Yifan Wu</dc:creator>
			<dc:creator>Lei Xiao</dc:creator>
			<dc:creator>Xia Du</dc:creator>
		<dc:identifier>doi: 10.3390/network5040045</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-10-09</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-10-09</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>45</prism:startingPage>
		<prism:doi>10.3390/network5040045</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/4/45</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/4/44">

	<title>Network, Vol. 5, Pages 44: Hybrid Spatio-Temporal CNN&amp;ndash;LSTM/BiLSTM Models for Blocking Prediction in Elastic Optical Networks</title>
	<link>https://www.mdpi.com/2673-8732/5/4/44</link>
	<description>Elastic optical networks (EONs) must allocate resources dynamically to accommodate heterogeneous, high-bandwidth demands. However, the continuous setup and teardown of connections with different bit rates can fragment the spectrum and lead to blocking. The blocking predictors enable proactive defragmentation and resource reallocation within network controllers. In this paper, we propose two novel deep learning models (based on CNN&amp;amp;ndash;BiLSTM and CNN&amp;amp;ndash;LSTM) to predict blocking in EONs by combining spatial feature extraction from spectrum snapshots using 2D convolutional layers with temporal sequence modeling. This hybrid spatio-temporal design learns how local fragmentation patterns evolve over time, allowing it to detect impending blocking scenarios more accurately than conventional methods. We evaluate our model on the simulated NSFNET topology and compare it against multiple baselines, namely 1D CNN, 2D CNN, k-nearest neighbors (KNN), and support vector machines (SVMs). The results show that the proposed CNN&amp;amp;ndash;BiLSTM/LSTM models consistently achieve higher performance. The CNN&amp;amp;ndash;BiLSTM model achieved the highest accuracy in blocking prediction, while the CNN&amp;amp;ndash;LSTM model shows slightly lower accuracy; however, it has much lower complexity and a faster learning time.</description>
	<pubDate>2025-10-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 44: Hybrid Spatio-Temporal CNN&amp;ndash;LSTM/BiLSTM Models for Blocking Prediction in Elastic Optical Networks</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/4/44">doi: 10.3390/network5040044</a></p>
	<p>Authors:
		Farzaneh Nourmohammadi
		Jaume Comellas
		Uzay Kaymak
		</p>
	<p>Elastic optical networks (EONs) must allocate resources dynamically to accommodate heterogeneous, high-bandwidth demands. However, the continuous setup and teardown of connections with different bit rates can fragment the spectrum and lead to blocking. The blocking predictors enable proactive defragmentation and resource reallocation within network controllers. In this paper, we propose two novel deep learning models (based on CNN&amp;amp;ndash;BiLSTM and CNN&amp;amp;ndash;LSTM) to predict blocking in EONs by combining spatial feature extraction from spectrum snapshots using 2D convolutional layers with temporal sequence modeling. This hybrid spatio-temporal design learns how local fragmentation patterns evolve over time, allowing it to detect impending blocking scenarios more accurately than conventional methods. We evaluate our model on the simulated NSFNET topology and compare it against multiple baselines, namely 1D CNN, 2D CNN, k-nearest neighbors (KNN), and support vector machines (SVMs). The results show that the proposed CNN&amp;amp;ndash;BiLSTM/LSTM models consistently achieve higher performance. The CNN&amp;amp;ndash;BiLSTM model achieved the highest accuracy in blocking prediction, while the CNN&amp;amp;ndash;LSTM model shows slightly lower accuracy; however, it has much lower complexity and a faster learning time.</p>
	]]></content:encoded>

	<dc:title>Hybrid Spatio-Temporal CNN&amp;amp;ndash;LSTM/BiLSTM Models for Blocking Prediction in Elastic Optical Networks</dc:title>
			<dc:creator>Farzaneh Nourmohammadi</dc:creator>
			<dc:creator>Jaume Comellas</dc:creator>
			<dc:creator>Uzay Kaymak</dc:creator>
		<dc:identifier>doi: 10.3390/network5040044</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-10-07</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-10-07</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>44</prism:startingPage>
		<prism:doi>10.3390/network5040044</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/4/44</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/4/43">

	<title>Network, Vol. 5, Pages 43: Optimized Hybrid Ensemble Intrusion Detection for VANET-Based Autonomous Vehicle Security</title>
	<link>https://www.mdpi.com/2673-8732/5/4/43</link>
	<description>Connected and Autonomous Vehicles are promising for advancing traffic safety and efficiency. However, the increased connectivity makes these vehicles vulnerable to a broad array of cyber threats. This paper presents a novel hybrid approach for intrusion detection in in-vehicle networks, specifically focusing on the Controller Area Network bus. Ensemble learning techniques are combined with sophisticated optimization techniques and dynamic adaptation mechanisms to develop a robust, accurate, and computationally efficient intrusion detection system. The proposed system is evaluated on real-world automotive network datasets that include various attack types (e.g., Denial of Service, fuzzy, and spoofing attacks). With these results, the proposed hybrid adaptive system achieves an unprecedented accuracy of 99.995% with a 0.00001% false positive rate, which is significantly more accurate than traditional methods. In addition, the system is very robust to novel attack patterns and is tolerant to varying computational constraints and is suitable for deployment on a real-time basis in various automotive platforms. As this research represents a significant advancement in automotive cybersecurity, a scalable and proactive defense mechanism is necessary to safely operate next-generation vehicles.</description>
	<pubDate>2025-10-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 43: Optimized Hybrid Ensemble Intrusion Detection for VANET-Based Autonomous Vehicle Security</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/4/43">doi: 10.3390/network5040043</a></p>
	<p>Authors:
		Ahmad Aloqaily
		Emad E. Abdallah
		Aladdin Baarah
		Mohammad Alnabhan
		Esra’a Alshdaifat
		Hind Milhem
		</p>
	<p>Connected and Autonomous Vehicles are promising for advancing traffic safety and efficiency. However, the increased connectivity makes these vehicles vulnerable to a broad array of cyber threats. This paper presents a novel hybrid approach for intrusion detection in in-vehicle networks, specifically focusing on the Controller Area Network bus. Ensemble learning techniques are combined with sophisticated optimization techniques and dynamic adaptation mechanisms to develop a robust, accurate, and computationally efficient intrusion detection system. The proposed system is evaluated on real-world automotive network datasets that include various attack types (e.g., Denial of Service, fuzzy, and spoofing attacks). With these results, the proposed hybrid adaptive system achieves an unprecedented accuracy of 99.995% with a 0.00001% false positive rate, which is significantly more accurate than traditional methods. In addition, the system is very robust to novel attack patterns and is tolerant to varying computational constraints and is suitable for deployment on a real-time basis in various automotive platforms. As this research represents a significant advancement in automotive cybersecurity, a scalable and proactive defense mechanism is necessary to safely operate next-generation vehicles.</p>
	]]></content:encoded>

	<dc:title>Optimized Hybrid Ensemble Intrusion Detection for VANET-Based Autonomous Vehicle Security</dc:title>
			<dc:creator>Ahmad Aloqaily</dc:creator>
			<dc:creator>Emad E. Abdallah</dc:creator>
			<dc:creator>Aladdin Baarah</dc:creator>
			<dc:creator>Mohammad Alnabhan</dc:creator>
			<dc:creator>Esra’a Alshdaifat</dc:creator>
			<dc:creator>Hind Milhem</dc:creator>
		<dc:identifier>doi: 10.3390/network5040043</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-10-03</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-10-03</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>43</prism:startingPage>
		<prism:doi>10.3390/network5040043</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/4/43</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/4/42">

	<title>Network, Vol. 5, Pages 42: Bijective Network-to-Image Encoding for Interpretable CNN-Based Intrusion Detection System</title>
	<link>https://www.mdpi.com/2673-8732/5/4/42</link>
	<description>As 5G and beyond networks grow in heterogeneity, complexity, and scale, traditional Intrusion Detection Systems (IDS) struggle to maintain accurate and precise detection mechanisms. A promising alternative approach to this problem has involved the use of Deep Learning (DL) techniques; however, DL-based IDS suffer from issues relating to interpretation, performance variability, and high computational overheads. These issues limit their practical deployment in real-world applications. In this study, CiNeT is introduced as a novel DL-based IDS employing Convolutional Neural Networks (CNN) within a bijective encoding&amp;amp;ndash;decoding framework between network traffic features (such as IPv6, IPv4, Timestamp, MAC addresses, and network data) and their RGB representations. This transformation facilitates our DL IDS in detecting spatial patterns without sacrificing fidelity. The bijective pipeline enables complete traceability from detection decisions to their corresponding network traffic features, enabling a significant initiative towards solving the &amp;amp;lsquo;black-box&amp;amp;rsquo; problem inherent in Deep Learning models, thus facilitating digital forensics. Finally, the DL IDS has been evaluated on three datasets, UNSW NB-15, InSDN, and ToN_IoT, with analysis conducted on accuracy, GPU usage, memory utilisation, training, testing, and validation time. To summarise, this study presents a new CNN-based IDS with an end-to-end pipeline between network traffic data and their RGB representation, which offers high performance and enhanced interpretability through revisable transformation.</description>
	<pubDate>2025-09-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 42: Bijective Network-to-Image Encoding for Interpretable CNN-Based Intrusion Detection System</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/4/42">doi: 10.3390/network5040042</a></p>
	<p>Authors:
		Omesh A. Fernando
		Joseph Spring
		Hannan Xiao
		</p>
	<p>As 5G and beyond networks grow in heterogeneity, complexity, and scale, traditional Intrusion Detection Systems (IDS) struggle to maintain accurate and precise detection mechanisms. A promising alternative approach to this problem has involved the use of Deep Learning (DL) techniques; however, DL-based IDS suffer from issues relating to interpretation, performance variability, and high computational overheads. These issues limit their practical deployment in real-world applications. In this study, CiNeT is introduced as a novel DL-based IDS employing Convolutional Neural Networks (CNN) within a bijective encoding&amp;amp;ndash;decoding framework between network traffic features (such as IPv6, IPv4, Timestamp, MAC addresses, and network data) and their RGB representations. This transformation facilitates our DL IDS in detecting spatial patterns without sacrificing fidelity. The bijective pipeline enables complete traceability from detection decisions to their corresponding network traffic features, enabling a significant initiative towards solving the &amp;amp;lsquo;black-box&amp;amp;rsquo; problem inherent in Deep Learning models, thus facilitating digital forensics. Finally, the DL IDS has been evaluated on three datasets, UNSW NB-15, InSDN, and ToN_IoT, with analysis conducted on accuracy, GPU usage, memory utilisation, training, testing, and validation time. To summarise, this study presents a new CNN-based IDS with an end-to-end pipeline between network traffic data and their RGB representation, which offers high performance and enhanced interpretability through revisable transformation.</p>
	]]></content:encoded>

	<dc:title>Bijective Network-to-Image Encoding for Interpretable CNN-Based Intrusion Detection System</dc:title>
			<dc:creator>Omesh A. Fernando</dc:creator>
			<dc:creator>Joseph Spring</dc:creator>
			<dc:creator>Hannan Xiao</dc:creator>
		<dc:identifier>doi: 10.3390/network5040042</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-09-25</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-09-25</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>42</prism:startingPage>
		<prism:doi>10.3390/network5040042</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/4/42</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/41">

	<title>Network, Vol. 5, Pages 41: Unified Distributed Machine Learning for 6G Intelligent Transportation Systems: A Hierarchical Approach for Terrestrial and Non-Terrestrial Networks</title>
	<link>https://www.mdpi.com/2673-8732/5/3/41</link>
	<description>The successful integration of Terrestrial and Non-Terrestrial Networks (T/NTNs) in 6G is poised to revolutionize demanding domains like Earth Observation (EO) and Intelligent Transportation Systems (ITSs). Still, it requires Distributed Machine Learning (DML) frameworks that are scalable, private, and efficient. Existing methods, such as Federated Learning (FL) and Split Learning (SL), face critical limitations in terms of client computation burden and latency. To address these challenges, this paper proposes a novel hierarchical DML paradigm. We first introduce Federated Split Transfer Learning (FSTL), a foundational framework that synergizes FL, SL, and Transfer Learning (TL) to enable efficient, privacy-preserving learning within a single client group. We then extend this concept to the Generalized FSTL (GFSTL) framework, a scalable, multi-group architecture designed for complex and large-scale networks. GFSTL orchestrates parallel training across multiple client groups managed by intermediate servers (RSUs/HAPs) and aggregates them at a higher-level central server, significantly enhancing performance. We apply this framework to a unified T/NTN architecture that seamlessly integrates vehicular, aerial, and satellite assets, enabling advanced applications in 6G ITS and EO. Comprehensive simulations using the YOLOv5 model on the Cityscapes dataset validate our approach. The results show that GFSTL not only achieves faster convergence and higher detection accuracy but also substantially reduces communication overhead compared to baseline FL, and critically, both detection accuracy and end-to-end latency remain essentially invariant as the number of participating users grows, making GFSTL especially well suited for large-scale heterogeneous 6G ITS deployments. We also provide a formal latency decomposition and analysis that explains this scaling behavior. This work establishes GFSTL as a robust and practical solution for enabling the intelligent, connected, and resilient ecosystems required for next-generation transportation and environmental monitoring.</description>
	<pubDate>2025-09-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 41: Unified Distributed Machine Learning for 6G Intelligent Transportation Systems: A Hierarchical Approach for Terrestrial and Non-Terrestrial Networks</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/41">doi: 10.3390/network5030041</a></p>
	<p>Authors:
		David Naseh
		Arash Bozorgchenani
		Swapnil Sadashiv Shinde
		Daniele Tarchi
		</p>
	<p>The successful integration of Terrestrial and Non-Terrestrial Networks (T/NTNs) in 6G is poised to revolutionize demanding domains like Earth Observation (EO) and Intelligent Transportation Systems (ITSs). Still, it requires Distributed Machine Learning (DML) frameworks that are scalable, private, and efficient. Existing methods, such as Federated Learning (FL) and Split Learning (SL), face critical limitations in terms of client computation burden and latency. To address these challenges, this paper proposes a novel hierarchical DML paradigm. We first introduce Federated Split Transfer Learning (FSTL), a foundational framework that synergizes FL, SL, and Transfer Learning (TL) to enable efficient, privacy-preserving learning within a single client group. We then extend this concept to the Generalized FSTL (GFSTL) framework, a scalable, multi-group architecture designed for complex and large-scale networks. GFSTL orchestrates parallel training across multiple client groups managed by intermediate servers (RSUs/HAPs) and aggregates them at a higher-level central server, significantly enhancing performance. We apply this framework to a unified T/NTN architecture that seamlessly integrates vehicular, aerial, and satellite assets, enabling advanced applications in 6G ITS and EO. Comprehensive simulations using the YOLOv5 model on the Cityscapes dataset validate our approach. The results show that GFSTL not only achieves faster convergence and higher detection accuracy but also substantially reduces communication overhead compared to baseline FL, and critically, both detection accuracy and end-to-end latency remain essentially invariant as the number of participating users grows, making GFSTL especially well suited for large-scale heterogeneous 6G ITS deployments. We also provide a formal latency decomposition and analysis that explains this scaling behavior. This work establishes GFSTL as a robust and practical solution for enabling the intelligent, connected, and resilient ecosystems required for next-generation transportation and environmental monitoring.</p>
	]]></content:encoded>

	<dc:title>Unified Distributed Machine Learning for 6G Intelligent Transportation Systems: A Hierarchical Approach for Terrestrial and Non-Terrestrial Networks</dc:title>
			<dc:creator>David Naseh</dc:creator>
			<dc:creator>Arash Bozorgchenani</dc:creator>
			<dc:creator>Swapnil Sadashiv Shinde</dc:creator>
			<dc:creator>Daniele Tarchi</dc:creator>
		<dc:identifier>doi: 10.3390/network5030041</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-09-17</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-09-17</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>41</prism:startingPage>
		<prism:doi>10.3390/network5030041</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/41</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/40">

	<title>Network, Vol. 5, Pages 40: Orchestrating and Choreographing Distributed Self-Explaining Ambient Applications</title>
	<link>https://www.mdpi.com/2673-8732/5/3/40</link>
	<description>The Internet of Things allows us to implement concepts such as Education 4.0 by connecting sensors, actuators, and applications. In the case of direct and explicit connections, we refer to ensembles that can consist of devices and applications. When realizing spatially distributed applications, there are scenarios in which these ensembles must coordinate with each other. In software development, this process is referred to as orchestration or choreography. This paper describes a software framework that provides orchestration or choreography for self-explaining ensembles using predefined rules based on a self-description of all involved components. The framework is capable of generating user instructions or explanations for smart environments that cover interaction details. The approach also forms a basis to provide information about event-based coordination. In a case study, we investigated the technical perception of a coordinated spatial learning game application (an ambient serious game). Most participants perceived the application as cohesive and found it responsive. These results suggest that our framework provides a solid foundation for implementing coordinated applications within smart environments that appear as unified applications.</description>
	<pubDate>2025-09-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 40: Orchestrating and Choreographing Distributed Self-Explaining Ambient Applications</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/40">doi: 10.3390/network5030040</a></p>
	<p>Authors:
		Börge Kordts
		Lea C. Brandl
		Andreas Schrader
		</p>
	<p>The Internet of Things allows us to implement concepts such as Education 4.0 by connecting sensors, actuators, and applications. In the case of direct and explicit connections, we refer to ensembles that can consist of devices and applications. When realizing spatially distributed applications, there are scenarios in which these ensembles must coordinate with each other. In software development, this process is referred to as orchestration or choreography. This paper describes a software framework that provides orchestration or choreography for self-explaining ensembles using predefined rules based on a self-description of all involved components. The framework is capable of generating user instructions or explanations for smart environments that cover interaction details. The approach also forms a basis to provide information about event-based coordination. In a case study, we investigated the technical perception of a coordinated spatial learning game application (an ambient serious game). Most participants perceived the application as cohesive and found it responsive. These results suggest that our framework provides a solid foundation for implementing coordinated applications within smart environments that appear as unified applications.</p>
	]]></content:encoded>

	<dc:title>Orchestrating and Choreographing Distributed Self-Explaining Ambient Applications</dc:title>
			<dc:creator>Börge Kordts</dc:creator>
			<dc:creator>Lea C. Brandl</dc:creator>
			<dc:creator>Andreas Schrader</dc:creator>
		<dc:identifier>doi: 10.3390/network5030040</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-09-17</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-09-17</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>40</prism:startingPage>
		<prism:doi>10.3390/network5030040</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/40</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/39">

	<title>Network, Vol. 5, Pages 39: Integrating Reinforcement Learning and LLM with Self-Optimization Network System</title>
	<link>https://www.mdpi.com/2673-8732/5/3/39</link>
	<description>The rapid expansion of communication networks and increasingly complex service demands have presented significant challenges to the intelligent management of network resources. To address these challenges, we have proposed a network self-optimization framework integrating the predictive capabilities of the Large Language Model (LLM) with the decision-making capabilities of multi-agent Reinforcement Learning (RL). Specifically, historical network traffic data are converted into structured inputs to forecast future traffic patterns using a GPT-2-based prediction module. Concurrently, a Multi-Agent Deep Deterministic Policy Gradient (MADDPG) algorithm leverages real-time sensor data&amp;amp;mdash;including link delay and packet loss rates collected by embedded network sensors&amp;amp;mdash;to dynamically optimize bandwidth allocation. This sensor-driven mechanism enables the system to perform real-time optimization of bandwidth allocation, ensuring accurate monitoring and proactive resource scheduling. We evaluate our framework in a heterogeneous network simulated using Mininet under diverse traffic scenarios. Experimental results show that the proposed method significantly reduces network latency and packet loss, as well as improves robustness and resource utilization, highlighting the effectiveness of integrating sensor-driven RL optimization with predictive insights from LLMs.</description>
	<pubDate>2025-09-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 39: Integrating Reinforcement Learning and LLM with Self-Optimization Network System</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/39">doi: 10.3390/network5030039</a></p>
	<p>Authors:
		Xing Xu
		Jianbin Zhao
		Yu Zhang
		Rongpeng Li
		</p>
	<p>The rapid expansion of communication networks and increasingly complex service demands have presented significant challenges to the intelligent management of network resources. To address these challenges, we have proposed a network self-optimization framework integrating the predictive capabilities of the Large Language Model (LLM) with the decision-making capabilities of multi-agent Reinforcement Learning (RL). Specifically, historical network traffic data are converted into structured inputs to forecast future traffic patterns using a GPT-2-based prediction module. Concurrently, a Multi-Agent Deep Deterministic Policy Gradient (MADDPG) algorithm leverages real-time sensor data&amp;amp;mdash;including link delay and packet loss rates collected by embedded network sensors&amp;amp;mdash;to dynamically optimize bandwidth allocation. This sensor-driven mechanism enables the system to perform real-time optimization of bandwidth allocation, ensuring accurate monitoring and proactive resource scheduling. We evaluate our framework in a heterogeneous network simulated using Mininet under diverse traffic scenarios. Experimental results show that the proposed method significantly reduces network latency and packet loss, as well as improves robustness and resource utilization, highlighting the effectiveness of integrating sensor-driven RL optimization with predictive insights from LLMs.</p>
	]]></content:encoded>

	<dc:title>Integrating Reinforcement Learning and LLM with Self-Optimization Network System</dc:title>
			<dc:creator>Xing Xu</dc:creator>
			<dc:creator>Jianbin Zhao</dc:creator>
			<dc:creator>Yu Zhang</dc:creator>
			<dc:creator>Rongpeng Li</dc:creator>
		<dc:identifier>doi: 10.3390/network5030039</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-09-16</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-09-16</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>39</prism:startingPage>
		<prism:doi>10.3390/network5030039</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/39</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/38">

	<title>Network, Vol. 5, Pages 38: From Counters to Telemetry: A Survey of Programmable Network-Wide Monitoring</title>
	<link>https://www.mdpi.com/2673-8732/5/3/38</link>
	<description>Network monitoring is becoming increasingly challenging as networks grow in scale, speed, and complexity. The evolution of monitoring approaches reflects a shift from device-centric, localized techniques toward network-wide observability enabled by modern networking paradigms. Early methods like SNMP polling and NetFlow provided basic insights but struggled with real-time visibility in large, dynamic environments. The emergence of Software-Defined Networking (SDN) introduced centralized control and a global view of network state, opening the door to more coordinated and programmable measurement strategies. More recently, programmable data planes (e.g., P4-based switches) and in-band telemetry frameworks have allowed fine grained, line rate data collection directly from traffic, reducing overhead and latency compared to traditional polling. These developments mark a move away from single point or per flow analysis toward holistic monitoring woven throughout the network fabric. In this survey, we systematically review the state of the art in network-wide monitoring. We define key concepts (topologies, flows, telemetry, observability) and trace the progression of monitoring architectures from traditional networks to SDN to fully programmable networks. We introduce a taxonomy spanning local device measures, path level techniques, global network-wide methods, and hybrid approaches. Finally, we summarize open research challenges and future directions, highlighting that modern networks demand monitoring frameworks that are not only scalable and real-time but also tightly integrated with network control and automation.</description>
	<pubDate>2025-09-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 38: From Counters to Telemetry: A Survey of Programmable Network-Wide Monitoring</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/38">doi: 10.3390/network5030038</a></p>
	<p>Authors:
		Nofel Yaseen
		</p>
	<p>Network monitoring is becoming increasingly challenging as networks grow in scale, speed, and complexity. The evolution of monitoring approaches reflects a shift from device-centric, localized techniques toward network-wide observability enabled by modern networking paradigms. Early methods like SNMP polling and NetFlow provided basic insights but struggled with real-time visibility in large, dynamic environments. The emergence of Software-Defined Networking (SDN) introduced centralized control and a global view of network state, opening the door to more coordinated and programmable measurement strategies. More recently, programmable data planes (e.g., P4-based switches) and in-band telemetry frameworks have allowed fine grained, line rate data collection directly from traffic, reducing overhead and latency compared to traditional polling. These developments mark a move away from single point or per flow analysis toward holistic monitoring woven throughout the network fabric. In this survey, we systematically review the state of the art in network-wide monitoring. We define key concepts (topologies, flows, telemetry, observability) and trace the progression of monitoring architectures from traditional networks to SDN to fully programmable networks. We introduce a taxonomy spanning local device measures, path level techniques, global network-wide methods, and hybrid approaches. Finally, we summarize open research challenges and future directions, highlighting that modern networks demand monitoring frameworks that are not only scalable and real-time but also tightly integrated with network control and automation.</p>
	]]></content:encoded>

	<dc:title>From Counters to Telemetry: A Survey of Programmable Network-Wide Monitoring</dc:title>
			<dc:creator>Nofel Yaseen</dc:creator>
		<dc:identifier>doi: 10.3390/network5030038</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-09-16</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-09-16</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>38</prism:startingPage>
		<prism:doi>10.3390/network5030038</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/38</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/37">

	<title>Network, Vol. 5, Pages 37: Hybrid NFC-VLC Systems: Integration Strategies, Applications, and Future Directions</title>
	<link>https://www.mdpi.com/2673-8732/5/3/37</link>
	<description>The hybridization of Near-Field Communication (NFC) with Visible Light Communication (VLC) presents a promising framework for robust, secure, and efficient wireless transmission. By combining proximity-based authentication of NFC with high-speed and interference-resistant data transfer of VLC, this approach mitigates the inherent limitations of each technology, such as the restricted range of NFC and authentication challenges of VLC. The resulting hybrid system leverages NFC for secure handshaking and VLC for high-throughput communication, enabling scalable, real-time applications across diverse domains. This study examines integration strategies, technical enablers, and potential use cases, including smart street poles for secure citizen engagement, patient authentication and record access systems in healthcare, personalized retail advertising, and automated attendance tracking in education. Additionally, this paper addresses key challenges in hybridization and explores future research directions, such as the integration of Artificial Intelligence and 6G networks.</description>
	<pubDate>2025-09-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 37: Hybrid NFC-VLC Systems: Integration Strategies, Applications, and Future Directions</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/37">doi: 10.3390/network5030037</a></p>
	<p>Authors:
		Vindula L. Jayaweera
		Chamodi Peiris
		Dhanushika Darshani
		Sampath Edirisinghe
		Nishan Dharmaweera
		Uditha Wijewardhana
		</p>
	<p>The hybridization of Near-Field Communication (NFC) with Visible Light Communication (VLC) presents a promising framework for robust, secure, and efficient wireless transmission. By combining proximity-based authentication of NFC with high-speed and interference-resistant data transfer of VLC, this approach mitigates the inherent limitations of each technology, such as the restricted range of NFC and authentication challenges of VLC. The resulting hybrid system leverages NFC for secure handshaking and VLC for high-throughput communication, enabling scalable, real-time applications across diverse domains. This study examines integration strategies, technical enablers, and potential use cases, including smart street poles for secure citizen engagement, patient authentication and record access systems in healthcare, personalized retail advertising, and automated attendance tracking in education. Additionally, this paper addresses key challenges in hybridization and explores future research directions, such as the integration of Artificial Intelligence and 6G networks.</p>
	]]></content:encoded>

	<dc:title>Hybrid NFC-VLC Systems: Integration Strategies, Applications, and Future Directions</dc:title>
			<dc:creator>Vindula L. Jayaweera</dc:creator>
			<dc:creator>Chamodi Peiris</dc:creator>
			<dc:creator>Dhanushika Darshani</dc:creator>
			<dc:creator>Sampath Edirisinghe</dc:creator>
			<dc:creator>Nishan Dharmaweera</dc:creator>
			<dc:creator>Uditha Wijewardhana</dc:creator>
		<dc:identifier>doi: 10.3390/network5030037</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-09-15</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-09-15</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>37</prism:startingPage>
		<prism:doi>10.3390/network5030037</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/37</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/36">

	<title>Network, Vol. 5, Pages 36: Efficient, Scalable, and Secure Network Monitoring Platform: Self-Contained Solution for Future SMEs</title>
	<link>https://www.mdpi.com/2673-8732/5/3/36</link>
	<description>In this paper, we introduce a novel, self-hosted Syslog collection platform designed specifically to address the challenges that small and medium enterprises (SMEs) face in implementing comprehensive syslog monitoring solutions. Our analysis begins with an assessment of current network observability practices, evaluating enterprise solutions, on-premises systems, and Software as a Service (SaaS) offerings to identify features crucial for SME environments. The proposed platform represents an advancement in the field through the incorporation of modern practices, including GitOps and continuous integration and continuous delivery/deployment (CI/CD), and its implementation onto a self-managed Kubernetes platform, which is an approach not commonly explored in SME-focused solutions. We will explore its scalability by leveraging dynamic templates, which allow us to select the number and type of nodes when deploying networks of various sizes. This architecture ensures organisations can deploy a pre-designed, scalable network monitoring solution without extensive external support. The resilience of the proposed platform is assessed by providing empirical evidence of the scaling performance and reliability under various failure scenarios, including node failure and high network throughput stress.</description>
	<pubDate>2025-09-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 36: Efficient, Scalable, and Secure Network Monitoring Platform: Self-Contained Solution for Future SMEs</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/36">doi: 10.3390/network5030036</a></p>
	<p>Authors:
		Alfred Stephen Tonge
		Babu Kaji Baniya
		Deepak GC
		</p>
	<p>In this paper, we introduce a novel, self-hosted Syslog collection platform designed specifically to address the challenges that small and medium enterprises (SMEs) face in implementing comprehensive syslog monitoring solutions. Our analysis begins with an assessment of current network observability practices, evaluating enterprise solutions, on-premises systems, and Software as a Service (SaaS) offerings to identify features crucial for SME environments. The proposed platform represents an advancement in the field through the incorporation of modern practices, including GitOps and continuous integration and continuous delivery/deployment (CI/CD), and its implementation onto a self-managed Kubernetes platform, which is an approach not commonly explored in SME-focused solutions. We will explore its scalability by leveraging dynamic templates, which allow us to select the number and type of nodes when deploying networks of various sizes. This architecture ensures organisations can deploy a pre-designed, scalable network monitoring solution without extensive external support. The resilience of the proposed platform is assessed by providing empirical evidence of the scaling performance and reliability under various failure scenarios, including node failure and high network throughput stress.</p>
	]]></content:encoded>

	<dc:title>Efficient, Scalable, and Secure Network Monitoring Platform: Self-Contained Solution for Future SMEs</dc:title>
			<dc:creator>Alfred Stephen Tonge</dc:creator>
			<dc:creator>Babu Kaji Baniya</dc:creator>
			<dc:creator>Deepak GC</dc:creator>
		<dc:identifier>doi: 10.3390/network5030036</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-09-10</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-09-10</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>36</prism:startingPage>
		<prism:doi>10.3390/network5030036</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/36</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/35">

	<title>Network, Vol. 5, Pages 35: When Robust Isn&amp;rsquo;t Resilient: Quantifying Budget-Driven Trade-Offs in Connectivity Cascades with Concurrent Self-Healing</title>
	<link>https://www.mdpi.com/2673-8732/5/3/35</link>
	<description>Cascading link failures continue to imperil power grids, transport networks, and cyber-physical systems, yet the relationship between a network&amp;amp;rsquo;s robustness at the moment of attack and its subsequent resiliency remains poorly understood. We introduce a dynamic framework in which connectivity-based cascades and distributed self-healing act concurrently within each time-step. Failure is triggered when a node&amp;amp;rsquo;s active-neighbor ratio falls below a threshold &amp;amp;phi;; healing activates once the global fraction of inactive nodes exceeds trigger T and is limited by budget B. Two real data sets&amp;amp;mdash;a 332-node U.S. airport graph and a 1133-node university e-mail graph&amp;amp;mdash;serve as testbeds. For each graph we sweep the parameter quartet (&amp;amp;phi;,&amp;amp;thinsp;B,&amp;amp;thinsp;T,attackmode) and record (i) immediate robustness R, (ii) 90% recovery time T90, and (iii) cumulative average damage. Results show that targeted hub removal is up to three times more damaging than random failure, but that prompt healing with B&amp;amp;ge;0.12 can halve T90. Scatter-plot analysis reveals a non-monotonic correlation: high-R states recover quickly only when B and T are favorable, whereas low-R states can rebound rapidly under ample budgets. A multiplicative fit T90&amp;amp;prop;B&amp;amp;minus;&amp;amp;beta;g(T)h(R) (with &amp;amp;beta;&amp;amp;asymp;1) captures these interactions. The findings demonstrate that structural hardening alone cannot guarantee fast recovery; resource-aware, early-triggered self-healing is the decisive factor. The proposed model and data-driven insights provide a quantitative basis for designing infrastructure that is both robust to failure and resilient in restoration.</description>
	<pubDate>2025-09-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 35: When Robust Isn&amp;rsquo;t Resilient: Quantifying Budget-Driven Trade-Offs in Connectivity Cascades with Concurrent Self-Healing</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/35">doi: 10.3390/network5030035</a></p>
	<p>Authors:
		Waseem Al Aqqad
		</p>
	<p>Cascading link failures continue to imperil power grids, transport networks, and cyber-physical systems, yet the relationship between a network&amp;amp;rsquo;s robustness at the moment of attack and its subsequent resiliency remains poorly understood. We introduce a dynamic framework in which connectivity-based cascades and distributed self-healing act concurrently within each time-step. Failure is triggered when a node&amp;amp;rsquo;s active-neighbor ratio falls below a threshold &amp;amp;phi;; healing activates once the global fraction of inactive nodes exceeds trigger T and is limited by budget B. Two real data sets&amp;amp;mdash;a 332-node U.S. airport graph and a 1133-node university e-mail graph&amp;amp;mdash;serve as testbeds. For each graph we sweep the parameter quartet (&amp;amp;phi;,&amp;amp;thinsp;B,&amp;amp;thinsp;T,attackmode) and record (i) immediate robustness R, (ii) 90% recovery time T90, and (iii) cumulative average damage. Results show that targeted hub removal is up to three times more damaging than random failure, but that prompt healing with B&amp;amp;ge;0.12 can halve T90. Scatter-plot analysis reveals a non-monotonic correlation: high-R states recover quickly only when B and T are favorable, whereas low-R states can rebound rapidly under ample budgets. A multiplicative fit T90&amp;amp;prop;B&amp;amp;minus;&amp;amp;beta;g(T)h(R) (with &amp;amp;beta;&amp;amp;asymp;1) captures these interactions. The findings demonstrate that structural hardening alone cannot guarantee fast recovery; resource-aware, early-triggered self-healing is the decisive factor. The proposed model and data-driven insights provide a quantitative basis for designing infrastructure that is both robust to failure and resilient in restoration.</p>
	]]></content:encoded>

	<dc:title>When Robust Isn&amp;amp;rsquo;t Resilient: Quantifying Budget-Driven Trade-Offs in Connectivity Cascades with Concurrent Self-Healing</dc:title>
			<dc:creator>Waseem Al Aqqad</dc:creator>
		<dc:identifier>doi: 10.3390/network5030035</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-09-03</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-09-03</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>35</prism:startingPage>
		<prism:doi>10.3390/network5030035</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/35</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/34">

	<title>Network, Vol. 5, Pages 34: Unlocking Blockchain&amp;rsquo;s Potential in Supply Chain Management: A Review of Challenges, Applications, and Emerging Solutions</title>
	<link>https://www.mdpi.com/2673-8732/5/3/34</link>
	<description>Blockchain&amp;amp;rsquo;s decentralized, immutable, and transparent nature offers a promising solution to enhance security, trust, and efficiency in supply chains. While integrating blockchain into the SCM process poses significant challenges, including technical, operational, and regulatory issues, this review analyzes blockchain&amp;amp;rsquo;s potential in SCM with a focus on the key challenges encountered when applying blockchain in this domain&amp;amp;mdash;such as scalability limitations, interoperability barriers, high implementation costs, and privacy as well as data security concerns. The key contributions are as follows: (1) applications of blockchain across major SCM domains&amp;amp;mdash;including pharmaceuticals, healthcare, logistics, and agri-food; (2) SCM functions that benefit from blockchain integration; (3) how blockchain&amp;amp;rsquo;s properties is reshaping modern SCM processes; (4) the challenges faced by businesses while integrating blockchain into supply chains; (5) a critical evaluation of existing solutions and their limitations, categorized into three main domains; (6) unresolved issues highlighted in dedicated &amp;amp;ldquo;Critical Issues to Consider&amp;amp;rdquo; sections; (7) synergies with big data, IoT, and AI for secure and intelligent supply chains, along with challenges of emerging solutions; and (8) unexplored domains for blockchain in SCM. By synthesizing current research and industry insights, this study offers practical guidance and outlines future directions for building scalable and resilient global trade networks.</description>
	<pubDate>2025-08-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 34: Unlocking Blockchain&amp;rsquo;s Potential in Supply Chain Management: A Review of Challenges, Applications, and Emerging Solutions</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/34">doi: 10.3390/network5030034</a></p>
	<p>Authors:
		Mahafuja Khatun
		Tasneem Darwish
		</p>
	<p>Blockchain&amp;amp;rsquo;s decentralized, immutable, and transparent nature offers a promising solution to enhance security, trust, and efficiency in supply chains. While integrating blockchain into the SCM process poses significant challenges, including technical, operational, and regulatory issues, this review analyzes blockchain&amp;amp;rsquo;s potential in SCM with a focus on the key challenges encountered when applying blockchain in this domain&amp;amp;mdash;such as scalability limitations, interoperability barriers, high implementation costs, and privacy as well as data security concerns. The key contributions are as follows: (1) applications of blockchain across major SCM domains&amp;amp;mdash;including pharmaceuticals, healthcare, logistics, and agri-food; (2) SCM functions that benefit from blockchain integration; (3) how blockchain&amp;amp;rsquo;s properties is reshaping modern SCM processes; (4) the challenges faced by businesses while integrating blockchain into supply chains; (5) a critical evaluation of existing solutions and their limitations, categorized into three main domains; (6) unresolved issues highlighted in dedicated &amp;amp;ldquo;Critical Issues to Consider&amp;amp;rdquo; sections; (7) synergies with big data, IoT, and AI for secure and intelligent supply chains, along with challenges of emerging solutions; and (8) unexplored domains for blockchain in SCM. By synthesizing current research and industry insights, this study offers practical guidance and outlines future directions for building scalable and resilient global trade networks.</p>
	]]></content:encoded>

	<dc:title>Unlocking Blockchain&amp;amp;rsquo;s Potential in Supply Chain Management: A Review of Challenges, Applications, and Emerging Solutions</dc:title>
			<dc:creator>Mahafuja Khatun</dc:creator>
			<dc:creator>Tasneem Darwish</dc:creator>
		<dc:identifier>doi: 10.3390/network5030034</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-08-26</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-08-26</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>34</prism:startingPage>
		<prism:doi>10.3390/network5030034</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/34</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/33">

	<title>Network, Vol. 5, Pages 33: A Multiple-Input Multiple-Output Transmission System Employing Orbital Angular Momentum Multiplexing for Wireless Backhaul Applications</title>
	<link>https://www.mdpi.com/2673-8732/5/3/33</link>
	<description>This paper presents a long-range experimental demonstration of multi-mode multiple-input multiple-output (MIMO) transmission using orbital angular momentum (OAM) waves for Line-of-Sight (LoS) wireless backhaul applications. A 4 &amp;amp;times; 4 MIMO system employing distinct OAM modes is implemented and shown to support multiplexing data transmission over a single frequency band without inter-channel interference. In contrast, a 2 &amp;amp;times; 2 plane wave MIMO configuration fails to achieve reliable demodulation due to mutual interference, underscoring the spatial limitations of conventional waveforms. The results confirm that OAM provides spatial orthogonality suitable for high-capacity, frequency-efficient wireless backhaul links. Experimental validation is conducted over an 100 m outdoor path, demonstrating the feasibility of OAM-based MIMO in practical wireless backhaul scenarios.</description>
	<pubDate>2025-08-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 33: A Multiple-Input Multiple-Output Transmission System Employing Orbital Angular Momentum Multiplexing for Wireless Backhaul Applications</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/33">doi: 10.3390/network5030033</a></p>
	<p>Authors:
		Afkar Mohamed Ismail
		Yufei Zhao
		Gaohua Ju
		</p>
	<p>This paper presents a long-range experimental demonstration of multi-mode multiple-input multiple-output (MIMO) transmission using orbital angular momentum (OAM) waves for Line-of-Sight (LoS) wireless backhaul applications. A 4 &amp;amp;times; 4 MIMO system employing distinct OAM modes is implemented and shown to support multiplexing data transmission over a single frequency band without inter-channel interference. In contrast, a 2 &amp;amp;times; 2 plane wave MIMO configuration fails to achieve reliable demodulation due to mutual interference, underscoring the spatial limitations of conventional waveforms. The results confirm that OAM provides spatial orthogonality suitable for high-capacity, frequency-efficient wireless backhaul links. Experimental validation is conducted over an 100 m outdoor path, demonstrating the feasibility of OAM-based MIMO in practical wireless backhaul scenarios.</p>
	]]></content:encoded>

	<dc:title>A Multiple-Input Multiple-Output Transmission System Employing Orbital Angular Momentum Multiplexing for Wireless Backhaul Applications</dc:title>
			<dc:creator>Afkar Mohamed Ismail</dc:creator>
			<dc:creator>Yufei Zhao</dc:creator>
			<dc:creator>Gaohua Ju</dc:creator>
		<dc:identifier>doi: 10.3390/network5030033</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-08-25</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-08-25</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>33</prism:startingPage>
		<prism:doi>10.3390/network5030033</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/33</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/32">

	<title>Network, Vol. 5, Pages 32: A Comprehensive Review of Satellite Orbital Placement and Coverage Optimization for Low Earth Orbit Satellite Networks: Challenges and Solutions</title>
	<link>https://www.mdpi.com/2673-8732/5/3/32</link>
	<description>Nowadays, internet connectivity suffers from instability and slowness due to optical fiber cable attacks across the seas and oceans. The optimal solution to this problem is using the Low Earth Orbit (LEO) satellite network, which can resolve the problem of internet connectivity and reachability, and it has the power to bring real-time, reliable, low-latency, high-bandwidth, cost-effective internet access to many urban and rural areas in any region of the Earth. However, satellite orbital placement (SOP) and navigation should be carefully designed to reduce signal impairments. The challenges of orbital satellite placement for LEO include constellation development, satellite parameter optimization, bandwidth optimization, consideration of signal impairment, and coverage optimization. This paper presents a comprehensive review of SOP and coverage optimization, examines prevalent issues affecting LEO internet connectivity, evaluates existing solutions, and proposes novel solutions to address these challenges. Furthermore, it recommends a machine learning solution for coverage optimization and SOP that can be used to efficiently enhance internet reliability and reachability for LEO satellite networks. This survey will open the gate for developing an optimal solution for global internet connectivity and reachability.</description>
	<pubDate>2025-08-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 32: A Comprehensive Review of Satellite Orbital Placement and Coverage Optimization for Low Earth Orbit Satellite Networks: Challenges and Solutions</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/32">doi: 10.3390/network5030032</a></p>
	<p>Authors:
		Adel A. Ahmed
		</p>
	<p>Nowadays, internet connectivity suffers from instability and slowness due to optical fiber cable attacks across the seas and oceans. The optimal solution to this problem is using the Low Earth Orbit (LEO) satellite network, which can resolve the problem of internet connectivity and reachability, and it has the power to bring real-time, reliable, low-latency, high-bandwidth, cost-effective internet access to many urban and rural areas in any region of the Earth. However, satellite orbital placement (SOP) and navigation should be carefully designed to reduce signal impairments. The challenges of orbital satellite placement for LEO include constellation development, satellite parameter optimization, bandwidth optimization, consideration of signal impairment, and coverage optimization. This paper presents a comprehensive review of SOP and coverage optimization, examines prevalent issues affecting LEO internet connectivity, evaluates existing solutions, and proposes novel solutions to address these challenges. Furthermore, it recommends a machine learning solution for coverage optimization and SOP that can be used to efficiently enhance internet reliability and reachability for LEO satellite networks. This survey will open the gate for developing an optimal solution for global internet connectivity and reachability.</p>
	]]></content:encoded>

	<dc:title>A Comprehensive Review of Satellite Orbital Placement and Coverage Optimization for Low Earth Orbit Satellite Networks: Challenges and Solutions</dc:title>
			<dc:creator>Adel A. Ahmed</dc:creator>
		<dc:identifier>doi: 10.3390/network5030032</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-08-20</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-08-20</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>32</prism:startingPage>
		<prism:doi>10.3390/network5030032</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/32</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/31">

	<title>Network, Vol. 5, Pages 31: Correction: Saxena, U.R.; Kadel, R. RACHEIM: Reinforced Reliable Computing in Cloud by Ensuring Restricted Access Control. Network 2025, 5, 19</title>
	<link>https://www.mdpi.com/2673-8732/5/3/31</link>
	<description>In the original publication [...]</description>
	<pubDate>2025-08-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 31: Correction: Saxena, U.R.; Kadel, R. RACHEIM: Reinforced Reliable Computing in Cloud by Ensuring Restricted Access Control. Network 2025, 5, 19</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/31">doi: 10.3390/network5030031</a></p>
	<p>Authors:
		Urvashi Rahul Saxena
		Rajan Kadel
		</p>
	<p>In the original publication [...]</p>
	]]></content:encoded>

	<dc:title>Correction: Saxena, U.R.; Kadel, R. RACHEIM: Reinforced Reliable Computing in Cloud by Ensuring Restricted Access Control. Network 2025, 5, 19</dc:title>
			<dc:creator>Urvashi Rahul Saxena</dc:creator>
			<dc:creator>Rajan Kadel</dc:creator>
		<dc:identifier>doi: 10.3390/network5030031</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-08-19</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-08-19</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Correction</prism:section>
	<prism:startingPage>31</prism:startingPage>
		<prism:doi>10.3390/network5030031</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/31</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/30">

	<title>Network, Vol. 5, Pages 30: A Service Recommendation Model in Cloud Environment Based on Trusted Graph-Based Collaborative Filtering Recommender System</title>
	<link>https://www.mdpi.com/2673-8732/5/3/30</link>
	<description>Cloud computing has increasingly adopted multi-tenant infrastructures to enhance cost efficiency and resource utilization by enabling the shared use of computational resources. However, this shared model introduces several security and privacy concerns, including unauthorized access, data redundancy, and susceptibility to malicious activities. In such environments, the effectiveness of cloud-based recommendation systems largely depends on the trustworthiness of participating nodes. Traditional collaborative filtering techniques often suffer from limitations such as data sparsity and the cold-start problem, which significantly degrade rating prediction accuracy. To address these challenges, this study proposes a Trusted Graph-Based Collaborative Filtering Recommender System (TGBCF). The model integrates graph-based trust relationships with collaborative filtering to construct a trust-aware user network capable of generating reliable service recommendations. Each node&amp;amp;rsquo;s reliability is quantitatively assessed using a trust metric, thereby improving both the accuracy and robustness of the recommendation process. Simulation results show that TGBCF achieves a rating prediction accuracy of 93%, outperforming the baseline collaborative filtering approach (82%). Moreover, the model reduces the influence of malicious nodes by 40&amp;amp;ndash;60%, demonstrating its applicability in dynamic and security-sensitive cloud service environments.</description>
	<pubDate>2025-08-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 30: A Service Recommendation Model in Cloud Environment Based on Trusted Graph-Based Collaborative Filtering Recommender System</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/30">doi: 10.3390/network5030030</a></p>
	<p>Authors:
		Urvashi Rahul Saxena
		Yogita Khatri
		Rajan Kadel
		Samar Shailendra
		</p>
	<p>Cloud computing has increasingly adopted multi-tenant infrastructures to enhance cost efficiency and resource utilization by enabling the shared use of computational resources. However, this shared model introduces several security and privacy concerns, including unauthorized access, data redundancy, and susceptibility to malicious activities. In such environments, the effectiveness of cloud-based recommendation systems largely depends on the trustworthiness of participating nodes. Traditional collaborative filtering techniques often suffer from limitations such as data sparsity and the cold-start problem, which significantly degrade rating prediction accuracy. To address these challenges, this study proposes a Trusted Graph-Based Collaborative Filtering Recommender System (TGBCF). The model integrates graph-based trust relationships with collaborative filtering to construct a trust-aware user network capable of generating reliable service recommendations. Each node&amp;amp;rsquo;s reliability is quantitatively assessed using a trust metric, thereby improving both the accuracy and robustness of the recommendation process. Simulation results show that TGBCF achieves a rating prediction accuracy of 93%, outperforming the baseline collaborative filtering approach (82%). Moreover, the model reduces the influence of malicious nodes by 40&amp;amp;ndash;60%, demonstrating its applicability in dynamic and security-sensitive cloud service environments.</p>
	]]></content:encoded>

	<dc:title>A Service Recommendation Model in Cloud Environment Based on Trusted Graph-Based Collaborative Filtering Recommender System</dc:title>
			<dc:creator>Urvashi Rahul Saxena</dc:creator>
			<dc:creator>Yogita Khatri</dc:creator>
			<dc:creator>Rajan Kadel</dc:creator>
			<dc:creator>Samar Shailendra</dc:creator>
		<dc:identifier>doi: 10.3390/network5030030</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-08-13</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-08-13</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>30</prism:startingPage>
		<prism:doi>10.3390/network5030030</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/30</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/29">

	<title>Network, Vol. 5, Pages 29: Encrypted Client Hello Is Coming: A View from Passive Measurements</title>
	<link>https://www.mdpi.com/2673-8732/5/3/29</link>
	<description>The Encrypted Client Hello (ECH) extension to Transport Layer Security (TLS) and the new type of Domain Name System (DNS) records called HTTPS represent the latest efforts to improve user privacy by encrypting the server&amp;amp;rsquo;s domain name during the TLS handshake. While prior studies have assessed ECH adoption from the server perspective, little is known about its usage in the wild from a passive network standpoint. In this paper, we present the first passive analysis of ECH and HTTPS DNS adoption using a month-long dataset collected from an operational network. We find that HTTPS DNS queries already make up approximately 8% of total DNS traffic, although responses to those queries are often incomplete, leading to increased query volume. Furthermore, 59% of QUIC flows include ECH, although only a negligible fraction is directed to servers supporting it. The remaining ECH flows are composed of GREASE values, intended to prevent protocol ossification. Our findings provide new insights into the current state and challenges in deploying privacy-enhancing protocols at scale.</description>
	<pubDate>2025-08-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 29: Encrypted Client Hello Is Coming: A View from Passive Measurements</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/29">doi: 10.3390/network5030029</a></p>
	<p>Authors:
		Gabriele Merlach
		Martino Trevisan
		Danilo Giordano
		</p>
	<p>The Encrypted Client Hello (ECH) extension to Transport Layer Security (TLS) and the new type of Domain Name System (DNS) records called HTTPS represent the latest efforts to improve user privacy by encrypting the server&amp;amp;rsquo;s domain name during the TLS handshake. While prior studies have assessed ECH adoption from the server perspective, little is known about its usage in the wild from a passive network standpoint. In this paper, we present the first passive analysis of ECH and HTTPS DNS adoption using a month-long dataset collected from an operational network. We find that HTTPS DNS queries already make up approximately 8% of total DNS traffic, although responses to those queries are often incomplete, leading to increased query volume. Furthermore, 59% of QUIC flows include ECH, although only a negligible fraction is directed to servers supporting it. The remaining ECH flows are composed of GREASE values, intended to prevent protocol ossification. Our findings provide new insights into the current state and challenges in deploying privacy-enhancing protocols at scale.</p>
	]]></content:encoded>

	<dc:title>Encrypted Client Hello Is Coming: A View from Passive Measurements</dc:title>
			<dc:creator>Gabriele Merlach</dc:creator>
			<dc:creator>Martino Trevisan</dc:creator>
			<dc:creator>Danilo Giordano</dc:creator>
		<dc:identifier>doi: 10.3390/network5030029</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-08-08</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-08-08</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>29</prism:startingPage>
		<prism:doi>10.3390/network5030029</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/29</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/28">

	<title>Network, Vol. 5, Pages 28: Towards Intelligent 5G Infrastructures: Performance Evaluation of a Novel SDN-Enabled VANET Framework</title>
	<link>https://www.mdpi.com/2673-8732/5/3/28</link>
	<description>Critical Internet of Things (IoT) data in Fifth Generation Vehicular Ad Hoc Networks (5G VANETs) demands Ultra-Reliable Low-Latency Communication (URLLC) to support mission-critical vehicular applications such as autonomous driving and collision avoidance. Achieving the stringent Quality of Service (QoS) requirements for these applications remains a significant challenge. This paper proposes a novel framework integrating Software-Defined Networking (SDN) and Network Functions Virtualisation (NFV) as embedded functionalities in connected vehicles. A lightweight SDN Controller model, implemented via vehicle on-board computing resources, optimised QoS for communications between connected vehicles and the Next-Generation Node B (gNB), achieving a consistent packet delivery rate of 100%, compared to 81&amp;amp;ndash;96% for existing solutions leveraging SDN. Furthermore, a Software-Defined Wide-Area Network (SD-WAN) model deployed at the gNB enabled the efficient management of data, network, identity, and server access. Performance evaluations indicate that SDN and NFV are reliable and scalable technologies for virtualised and distributed 5G VANET infrastructures. Our SDN-based in-vehicle traffic classification model for dynamic resource allocation achieved 100% accuracy, outperforming existing Artificial Intelligence (AI)-based methods with 88&amp;amp;ndash;99% accuracy. In addition, a significant increase of 187% in flow rates over time highlights the framework&amp;amp;rsquo;s decreasing latency, adaptability, and scalability in supporting URLLC class guarantees for critical vehicular services.</description>
	<pubDate>2025-08-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 28: Towards Intelligent 5G Infrastructures: Performance Evaluation of a Novel SDN-Enabled VANET Framework</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/28">doi: 10.3390/network5030028</a></p>
	<p>Authors:
		Abiola Ifaloye
		Haifa Takruri
		Rabab Al-Zaidi
		</p>
	<p>Critical Internet of Things (IoT) data in Fifth Generation Vehicular Ad Hoc Networks (5G VANETs) demands Ultra-Reliable Low-Latency Communication (URLLC) to support mission-critical vehicular applications such as autonomous driving and collision avoidance. Achieving the stringent Quality of Service (QoS) requirements for these applications remains a significant challenge. This paper proposes a novel framework integrating Software-Defined Networking (SDN) and Network Functions Virtualisation (NFV) as embedded functionalities in connected vehicles. A lightweight SDN Controller model, implemented via vehicle on-board computing resources, optimised QoS for communications between connected vehicles and the Next-Generation Node B (gNB), achieving a consistent packet delivery rate of 100%, compared to 81&amp;amp;ndash;96% for existing solutions leveraging SDN. Furthermore, a Software-Defined Wide-Area Network (SD-WAN) model deployed at the gNB enabled the efficient management of data, network, identity, and server access. Performance evaluations indicate that SDN and NFV are reliable and scalable technologies for virtualised and distributed 5G VANET infrastructures. Our SDN-based in-vehicle traffic classification model for dynamic resource allocation achieved 100% accuracy, outperforming existing Artificial Intelligence (AI)-based methods with 88&amp;amp;ndash;99% accuracy. In addition, a significant increase of 187% in flow rates over time highlights the framework&amp;amp;rsquo;s decreasing latency, adaptability, and scalability in supporting URLLC class guarantees for critical vehicular services.</p>
	]]></content:encoded>

	<dc:title>Towards Intelligent 5G Infrastructures: Performance Evaluation of a Novel SDN-Enabled VANET Framework</dc:title>
			<dc:creator>Abiola Ifaloye</dc:creator>
			<dc:creator>Haifa Takruri</dc:creator>
			<dc:creator>Rabab Al-Zaidi</dc:creator>
		<dc:identifier>doi: 10.3390/network5030028</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-08-05</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-08-05</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>28</prism:startingPage>
		<prism:doi>10.3390/network5030028</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/28</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/27">

	<title>Network, Vol. 5, Pages 27: A Novel Cloud Energy Consumption Heuristic Based on a Network Slicing&amp;ndash;Ring Fencing Ratio</title>
	<link>https://www.mdpi.com/2673-8732/5/3/27</link>
	<description>The widespread adoption of cloud computing has amplified the demand for electric power. It is strategically important to address the limitations of reliable sources and sustainability of power. Research and investment in data centres and power infrastructure are therefore critically important for our digital economy. A novel heuristic for the minimisation of energy consumption in cloud computing is presented. It draws similarities to the concept of &amp;amp;ldquo;network slices&amp;amp;rdquo;, in which an orchestrator enables multiplexing to reduce the network &amp;amp;ldquo;churn&amp;amp;rdquo; often associated with significant losses of energy consumption. The novel network slicing&amp;amp;ndash;ring fencing ratio is a heuristic calculated through an iterative procedure for the reduction in cloud energy consumption. Simulation results show how the non-convex equation optimises power by reducing energy from 10,680 kJ to 912 kJ, which is a 91.46% efficiency gain. In comparison, the Heuristic AUGMENT Non-Convex algorithm (HA-NC, by Hossain and Ansari) reported a 312.74% increase in energy consumption from 2464 kJ to 10,168 kJ, while the Priority Selection Offloading algorithm (PSO, by Anajemba et al.) also reported a 150% increase in energy consumption, from 10,738 kJ to 26,845 kJ. The proposed network slicing&amp;amp;ndash;ring fencing ratio is seen to successfully balance energy consumption and computing performance. We therefore think the novel approach could be of interest to network architects and cloud operators.</description>
	<pubDate>2025-07-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 27: A Novel Cloud Energy Consumption Heuristic Based on a Network Slicing&amp;ndash;Ring Fencing Ratio</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/27">doi: 10.3390/network5030027</a></p>
	<p>Authors:
		Vinay Sriram Iyer
		Yasantha Samarawickrama
		Giovani Estrada
		</p>
	<p>The widespread adoption of cloud computing has amplified the demand for electric power. It is strategically important to address the limitations of reliable sources and sustainability of power. Research and investment in data centres and power infrastructure are therefore critically important for our digital economy. A novel heuristic for the minimisation of energy consumption in cloud computing is presented. It draws similarities to the concept of &amp;amp;ldquo;network slices&amp;amp;rdquo;, in which an orchestrator enables multiplexing to reduce the network &amp;amp;ldquo;churn&amp;amp;rdquo; often associated with significant losses of energy consumption. The novel network slicing&amp;amp;ndash;ring fencing ratio is a heuristic calculated through an iterative procedure for the reduction in cloud energy consumption. Simulation results show how the non-convex equation optimises power by reducing energy from 10,680 kJ to 912 kJ, which is a 91.46% efficiency gain. In comparison, the Heuristic AUGMENT Non-Convex algorithm (HA-NC, by Hossain and Ansari) reported a 312.74% increase in energy consumption from 2464 kJ to 10,168 kJ, while the Priority Selection Offloading algorithm (PSO, by Anajemba et al.) also reported a 150% increase in energy consumption, from 10,738 kJ to 26,845 kJ. The proposed network slicing&amp;amp;ndash;ring fencing ratio is seen to successfully balance energy consumption and computing performance. We therefore think the novel approach could be of interest to network architects and cloud operators.</p>
	]]></content:encoded>

	<dc:title>A Novel Cloud Energy Consumption Heuristic Based on a Network Slicing&amp;amp;ndash;Ring Fencing Ratio</dc:title>
			<dc:creator>Vinay Sriram Iyer</dc:creator>
			<dc:creator>Yasantha Samarawickrama</dc:creator>
			<dc:creator>Giovani Estrada</dc:creator>
		<dc:identifier>doi: 10.3390/network5030027</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-07-25</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-07-25</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>27</prism:startingPage>
		<prism:doi>10.3390/network5030027</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/27</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/26">

	<title>Network, Vol. 5, Pages 26: Applying Machine Learning to DEEC Protocol: Improved Cluster Formation in Wireless Sensor Networks</title>
	<link>https://www.mdpi.com/2673-8732/5/3/26</link>
	<description>Wireless Sensor Networks (WSNs) are specialised ad hoc networks composed of small, low-power, and often battery-operated sensor nodes with various sensors and wireless communication capabilities. These nodes collaborate to monitor and collect data from the physical environment, transmitting it to a central location or sink node for further processing and analysis. This study proposes two machine learning-based enhancements to the DEEC protocol for Wireless Sensor Networks (WSNs) by integrating the K-Nearest Neighbours (K-NN) and K-Means (K-M) machine learning (ML) algorithms. The Distributed Energy-Efficient Clustering with K-NN (DEEC-KNN) and with K-Means (DEEC-KM) approaches dynamically optimize cluster head selection to improve energy efficiency and network lifetime. These methods are validated through extensive simulations, demonstrating up to 110% improvement in packet delivery and significant gains in network stability compared with the original DEEC protocol. The adaptive clustering enabled by K-NN and K-Means is particularly effective for large-scale and dynamic WSN deployments where node failures and topology changes are frequent. These findings suggest that integrating ML with clustering protocols is a promising direction for future WSN design.</description>
	<pubDate>2025-07-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 26: Applying Machine Learning to DEEC Protocol: Improved Cluster Formation in Wireless Sensor Networks</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/26">doi: 10.3390/network5030026</a></p>
	<p>Authors:
		Abdulla Juwaied
		Lidia Jackowska-Strumillo
		</p>
	<p>Wireless Sensor Networks (WSNs) are specialised ad hoc networks composed of small, low-power, and often battery-operated sensor nodes with various sensors and wireless communication capabilities. These nodes collaborate to monitor and collect data from the physical environment, transmitting it to a central location or sink node for further processing and analysis. This study proposes two machine learning-based enhancements to the DEEC protocol for Wireless Sensor Networks (WSNs) by integrating the K-Nearest Neighbours (K-NN) and K-Means (K-M) machine learning (ML) algorithms. The Distributed Energy-Efficient Clustering with K-NN (DEEC-KNN) and with K-Means (DEEC-KM) approaches dynamically optimize cluster head selection to improve energy efficiency and network lifetime. These methods are validated through extensive simulations, demonstrating up to 110% improvement in packet delivery and significant gains in network stability compared with the original DEEC protocol. The adaptive clustering enabled by K-NN and K-Means is particularly effective for large-scale and dynamic WSN deployments where node failures and topology changes are frequent. These findings suggest that integrating ML with clustering protocols is a promising direction for future WSN design.</p>
	]]></content:encoded>

	<dc:title>Applying Machine Learning to DEEC Protocol: Improved Cluster Formation in Wireless Sensor Networks</dc:title>
			<dc:creator>Abdulla Juwaied</dc:creator>
			<dc:creator>Lidia Jackowska-Strumillo</dc:creator>
		<dc:identifier>doi: 10.3390/network5030026</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-07-24</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-07-24</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>26</prism:startingPage>
		<prism:doi>10.3390/network5030026</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/26</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/25">

	<title>Network, Vol. 5, Pages 25: Exploring the Performance of Transparent 5G NTN Architectures Based on Operational Mega-Constellations</title>
	<link>https://www.mdpi.com/2673-8732/5/3/25</link>
	<description>The evolution of 3GPP non-terrestrial networks (NTNs) is enabling new avenues for broadband connectivity via satellite, especially within the scope of 5G. The parallel rise in satellite mega-constellations has further fueled efforts toward ubiquitous global Internet access. This convergence has fostered collaboration between mobile network operators and satellite providers, allowing the former to leverage mature space infrastructure and the latter to integrate with terrestrial mobile standards. However, integrating these technologies presents significant architectural challenges. This study investigates 5G NTN architectures using satellite mega-constellations, focusing on transparent architectures where Starlink is employed to relay the backhaul, midhaul, and new radio (NR) links. The performance of these architectures is assessed through a testbed utilizing OpenAirInterface (OAI) and Open5GS, which collects key user-experience metrics such as round-trip time (RTT) and jitter when pinging the User Plane Function (UPF) in the 5G core (5GC). Results show that backhaul and midhaul relays maintain delays of 50&amp;amp;ndash;60 ms, while NR relays incur delays exceeding one second due to traffic overload introduced by the RFSimulator tool, which is indispensable to transmit the NR signal over Starlink. These findings suggest that while transparent architectures provide valuable insights and utility, regenerative architectures are essential for addressing current time issues and fully realizing the capabilities of space-based broadband services.</description>
	<pubDate>2025-07-18</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 25: Exploring the Performance of Transparent 5G NTN Architectures Based on Operational Mega-Constellations</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/25">doi: 10.3390/network5030025</a></p>
	<p>Authors:
		Oscar Baselga
		Anna Calveras
		Joan Adrià Ruiz-de-Azua
		</p>
	<p>The evolution of 3GPP non-terrestrial networks (NTNs) is enabling new avenues for broadband connectivity via satellite, especially within the scope of 5G. The parallel rise in satellite mega-constellations has further fueled efforts toward ubiquitous global Internet access. This convergence has fostered collaboration between mobile network operators and satellite providers, allowing the former to leverage mature space infrastructure and the latter to integrate with terrestrial mobile standards. However, integrating these technologies presents significant architectural challenges. This study investigates 5G NTN architectures using satellite mega-constellations, focusing on transparent architectures where Starlink is employed to relay the backhaul, midhaul, and new radio (NR) links. The performance of these architectures is assessed through a testbed utilizing OpenAirInterface (OAI) and Open5GS, which collects key user-experience metrics such as round-trip time (RTT) and jitter when pinging the User Plane Function (UPF) in the 5G core (5GC). Results show that backhaul and midhaul relays maintain delays of 50&amp;amp;ndash;60 ms, while NR relays incur delays exceeding one second due to traffic overload introduced by the RFSimulator tool, which is indispensable to transmit the NR signal over Starlink. These findings suggest that while transparent architectures provide valuable insights and utility, regenerative architectures are essential for addressing current time issues and fully realizing the capabilities of space-based broadband services.</p>
	]]></content:encoded>

	<dc:title>Exploring the Performance of Transparent 5G NTN Architectures Based on Operational Mega-Constellations</dc:title>
			<dc:creator>Oscar Baselga</dc:creator>
			<dc:creator>Anna Calveras</dc:creator>
			<dc:creator>Joan Adrià Ruiz-de-Azua</dc:creator>
		<dc:identifier>doi: 10.3390/network5030025</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-07-18</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-07-18</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>25</prism:startingPage>
		<prism:doi>10.3390/network5030025</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/25</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/24">

	<title>Network, Vol. 5, Pages 24: Architectural Design for Digital Twin Networks</title>
	<link>https://www.mdpi.com/2673-8732/5/3/24</link>
	<description>Digital Twin Networks are advanced digital replicas of physical network infrastructures, offering real-time monitoring, analysis, and optimization capabilities. Despite their potential, the absence of a standardized definition and implementation guidelines complicates practical deployment. The existing literature often lacks clarity on tool selection and implementation specifics. In response, this paper aims to address these challenges by providing a complete guide and reference list of essential tools to implement Digital Twin Networks. Following the current research and work-in-progress from the definition initiative, including our own contributions, we propose a structured approach to Digital Twin Network implementation. Our methodology integrates insights from diverse sources to establish a coherent framework for developers and researchers. By synthesizing insights from the literature and practical experience, we define key components and functionalities critical to Digital Twin Network architecture. Additionally, we highlight challenges inherent to Digital Twin Network implementation and offer strategic approaches and mindsets for addressing them. This includes considerations for scalability, interoperability, real-time communication, data modeling, and security, ensuring a holistic approach to building effective Digital Twin Network systems.</description>
	<pubDate>2025-07-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 24: Architectural Design for Digital Twin Networks</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/24">doi: 10.3390/network5030024</a></p>
	<p>Authors:
		Jorg Wieme
		Mathias Baert
		Jeroen Hoebeke
		</p>
	<p>Digital Twin Networks are advanced digital replicas of physical network infrastructures, offering real-time monitoring, analysis, and optimization capabilities. Despite their potential, the absence of a standardized definition and implementation guidelines complicates practical deployment. The existing literature often lacks clarity on tool selection and implementation specifics. In response, this paper aims to address these challenges by providing a complete guide and reference list of essential tools to implement Digital Twin Networks. Following the current research and work-in-progress from the definition initiative, including our own contributions, we propose a structured approach to Digital Twin Network implementation. Our methodology integrates insights from diverse sources to establish a coherent framework for developers and researchers. By synthesizing insights from the literature and practical experience, we define key components and functionalities critical to Digital Twin Network architecture. Additionally, we highlight challenges inherent to Digital Twin Network implementation and offer strategic approaches and mindsets for addressing them. This includes considerations for scalability, interoperability, real-time communication, data modeling, and security, ensuring a holistic approach to building effective Digital Twin Network systems.</p>
	]]></content:encoded>

	<dc:title>Architectural Design for Digital Twin Networks</dc:title>
			<dc:creator>Jorg Wieme</dc:creator>
			<dc:creator>Mathias Baert</dc:creator>
			<dc:creator>Jeroen Hoebeke</dc:creator>
		<dc:identifier>doi: 10.3390/network5030024</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-07-09</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-07-09</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>24</prism:startingPage>
		<prism:doi>10.3390/network5030024</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/24</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/23">

	<title>Network, Vol. 5, Pages 23: IoT Applications in Agriculture and Environment: A Systematic Review Based on Bibliometric Study in West Africa</title>
	<link>https://www.mdpi.com/2673-8732/5/3/23</link>
	<description>The Internet of Things (IoT) is an upcoming technology that is increasingly being used for monitoring and analysing environmental parameters and supports the progress of farm machinery. Agriculture is the main source of living for many people, including, for instance, farmers, agronomists and transporters. It can raise incomes, improve food security and benefit the environment. However, food systems are responsible for many environmental problems. While the use of IoT in agriculture and environment is widely deployed in many developed countries, it is underdeveloped in Africa, particularly in West Africa. This paper aims to provide a systematic review on this technology adoption for agriculture and environment in West African countries. To achieve this goal, the analysis of scientific contributions is performed by performing first a bibliometric study, focusing on the selected articles obtained using the Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) method, and second a qualitative study. The PRISMA analysis was performed based on 226 publications recorded from one database: Web Of Science (WoS). It has been demonstrated that the annual scientific production significantly increased during this last decade. Our conclusions highlight promising directions where IoT could significantly progress sustainability.</description>
	<pubDate>2025-07-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 23: IoT Applications in Agriculture and Environment: A Systematic Review Based on Bibliometric Study in West Africa</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/23">doi: 10.3390/network5030023</a></p>
	<p>Authors:
		Michel Dossou
		Steaven Chédé
		Anne-Carole Honfoga
		Marianne Balogoun
		Péniel Dassi
		François Rottenberg
		</p>
	<p>The Internet of Things (IoT) is an upcoming technology that is increasingly being used for monitoring and analysing environmental parameters and supports the progress of farm machinery. Agriculture is the main source of living for many people, including, for instance, farmers, agronomists and transporters. It can raise incomes, improve food security and benefit the environment. However, food systems are responsible for many environmental problems. While the use of IoT in agriculture and environment is widely deployed in many developed countries, it is underdeveloped in Africa, particularly in West Africa. This paper aims to provide a systematic review on this technology adoption for agriculture and environment in West African countries. To achieve this goal, the analysis of scientific contributions is performed by performing first a bibliometric study, focusing on the selected articles obtained using the Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) method, and second a qualitative study. The PRISMA analysis was performed based on 226 publications recorded from one database: Web Of Science (WoS). It has been demonstrated that the annual scientific production significantly increased during this last decade. Our conclusions highlight promising directions where IoT could significantly progress sustainability.</p>
	]]></content:encoded>

	<dc:title>IoT Applications in Agriculture and Environment: A Systematic Review Based on Bibliometric Study in West Africa</dc:title>
			<dc:creator>Michel Dossou</dc:creator>
			<dc:creator>Steaven Chédé</dc:creator>
			<dc:creator>Anne-Carole Honfoga</dc:creator>
			<dc:creator>Marianne Balogoun</dc:creator>
			<dc:creator>Péniel Dassi</dc:creator>
			<dc:creator>François Rottenberg</dc:creator>
		<dc:identifier>doi: 10.3390/network5030023</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-07-02</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-07-02</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>23</prism:startingPage>
		<prism:doi>10.3390/network5030023</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/23</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/3/22">

	<title>Network, Vol. 5, Pages 22: Experimental Studies on Low-Latency RIS Beam Tracking: Edge-Integrated and Visually Steered</title>
	<link>https://www.mdpi.com/2673-8732/5/3/22</link>
	<description>In this study, to address the problems of high feedback latency and redundant codebook traversal in traditional Reconfigurable Intelligent Surface (RIS) beam tracking systems, two novel experimental schemes are proposed: the Edge-Integrated RIS Control Mechanism (EIR-CM) and the Visually Steered RIS Control Mechanism (VSR-CM). The EIR-CM eliminates the feedback latency of the remote server and optimizes the local computation by integrating the RIS control system and the User Equipment (UE) into the same edge server to reduce the beam tuning time by 50%. The VSR-CM realizes beam tracking based on visual perception, and directly maps the UE position to the optimal RIS codebook with a response speed as low as milliseconds. Experimental results show that the EIR-CM reduces the RIS feedback latency to 1&amp;amp;ndash;2 s, and the VSR-CM can be further optimized to less than 0.5 s. The two mechanisms are applicable to 6G communications, smart transport, and drone networks, providing feasibility verification for low-latency and efficient RIS deployment.</description>
	<pubDate>2025-07-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 22: Experimental Studies on Low-Latency RIS Beam Tracking: Edge-Integrated and Visually Steered</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/3/22">doi: 10.3390/network5030022</a></p>
	<p>Authors:
		Zekai Wang
		Yuming Nie
		</p>
	<p>In this study, to address the problems of high feedback latency and redundant codebook traversal in traditional Reconfigurable Intelligent Surface (RIS) beam tracking systems, two novel experimental schemes are proposed: the Edge-Integrated RIS Control Mechanism (EIR-CM) and the Visually Steered RIS Control Mechanism (VSR-CM). The EIR-CM eliminates the feedback latency of the remote server and optimizes the local computation by integrating the RIS control system and the User Equipment (UE) into the same edge server to reduce the beam tuning time by 50%. The VSR-CM realizes beam tracking based on visual perception, and directly maps the UE position to the optimal RIS codebook with a response speed as low as milliseconds. Experimental results show that the EIR-CM reduces the RIS feedback latency to 1&amp;amp;ndash;2 s, and the VSR-CM can be further optimized to less than 0.5 s. The two mechanisms are applicable to 6G communications, smart transport, and drone networks, providing feasibility verification for low-latency and efficient RIS deployment.</p>
	]]></content:encoded>

	<dc:title>Experimental Studies on Low-Latency RIS Beam Tracking: Edge-Integrated and Visually Steered</dc:title>
			<dc:creator>Zekai Wang</dc:creator>
			<dc:creator>Yuming Nie</dc:creator>
		<dc:identifier>doi: 10.3390/network5030022</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-07-01</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-07-01</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>22</prism:startingPage>
		<prism:doi>10.3390/network5030022</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/3/22</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/2/21">

	<title>Network, Vol. 5, Pages 21: A Performance Evaluation for Software Defined Networks with P4</title>
	<link>https://www.mdpi.com/2673-8732/5/2/21</link>
	<description>The exponential growth in the number of devices connected via the internet has led to the need to achieve granular programmability for increased performance, resilience, reduced latency, and jitter. Software Defined Networking (SDN) and Programming Protocol independent Packet Processing (P4) are designed to introduce programmability into the control and data plane of networks, respectively. Despite their individual potential and capabilities, the performance of combining SDN and P4 remains underexplored. This study presents a comprehensive evaluation of SDN with data plane programmability using P4 (SDN+P4) against traditional SDN with Open vSwitch (SDN+OvS), aimed at answering the hypothesis that combining SDN and P4 strengthens the control and data plane programmability and offers improved management and adaptability, which would provide a platform with faster packet processing with reduced jitter, loss, and processing overhead. Mininet was employed to emulate three distinct topologies: multi-path, grid, and transit-stub. Various traffic types were transmitted to assess performance metrics across the three topologies. Our results demonstrate that SDN+P4 outperform SDN+OvS significantly due to parallel processing, flexible parsing, and reduced overhead. The evaluation demonstrates the potential of SDN+P4 to provide a more resilient and stringent service with improved network performance for the future internet and its heterogeneity of applications.</description>
	<pubDate>2025-06-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 21: A Performance Evaluation for Software Defined Networks with P4</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/2/21">doi: 10.3390/network5020021</a></p>
	<p>Authors:
		Omesh A. Fernando
		Hannan Xiao
		Joseph Spring
		Xianhui Che
		</p>
	<p>The exponential growth in the number of devices connected via the internet has led to the need to achieve granular programmability for increased performance, resilience, reduced latency, and jitter. Software Defined Networking (SDN) and Programming Protocol independent Packet Processing (P4) are designed to introduce programmability into the control and data plane of networks, respectively. Despite their individual potential and capabilities, the performance of combining SDN and P4 remains underexplored. This study presents a comprehensive evaluation of SDN with data plane programmability using P4 (SDN+P4) against traditional SDN with Open vSwitch (SDN+OvS), aimed at answering the hypothesis that combining SDN and P4 strengthens the control and data plane programmability and offers improved management and adaptability, which would provide a platform with faster packet processing with reduced jitter, loss, and processing overhead. Mininet was employed to emulate three distinct topologies: multi-path, grid, and transit-stub. Various traffic types were transmitted to assess performance metrics across the three topologies. Our results demonstrate that SDN+P4 outperform SDN+OvS significantly due to parallel processing, flexible parsing, and reduced overhead. The evaluation demonstrates the potential of SDN+P4 to provide a more resilient and stringent service with improved network performance for the future internet and its heterogeneity of applications.</p>
	]]></content:encoded>

	<dc:title>A Performance Evaluation for Software Defined Networks with P4</dc:title>
			<dc:creator>Omesh A. Fernando</dc:creator>
			<dc:creator>Hannan Xiao</dc:creator>
			<dc:creator>Joseph Spring</dc:creator>
			<dc:creator>Xianhui Che</dc:creator>
		<dc:identifier>doi: 10.3390/network5020021</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-06-11</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-06-11</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>21</prism:startingPage>
		<prism:doi>10.3390/network5020021</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/2/21</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/2/20">

	<title>Network, Vol. 5, Pages 20: A Practical Implementation of Post-Quantum Cryptography for Secure Wireless Communication</title>
	<link>https://www.mdpi.com/2673-8732/5/2/20</link>
	<description>Recent advances in quantum computing have prompted urgent consideration of the migration of classical cryptographic systems to post-quantum alternatives. However, it is impossible to fully understand the impact that migrating to current Post-Quantum Cryptography (PQC) algorithms will have on various applications without the actual implementation of quantum-resistant cryptography. On the other hand, PQC algorithms come with complexity and long processing times, which may impact the quality of service (QoS) of many applications. Therefore, PQC-based protocols with practical implementations across various applications are essential. This paper introduces a new framework for PQC standalone and PQC&amp;amp;ndash;AES (Advanced Encryption Standard) hybrid public-key encryption (PKE) protocols. Building on prior results, we focus on securing applications such as file transfer, video streaming, and chat-based communication using enhanced PQC-based protocols. The extended PQC-based protocols use a sequence number-based mechanism to effectively counter replay and man-in-the-middle attacks and mitigate standard cybersecurity attack vectors. Experimental evaluations examined encryption/decryption speeds, throughput, and processing overhead for the standalone PQC and the PQC&amp;amp;ndash;AES hybrid schemes, benchmarking them against traditional AES-256 in an existing client&amp;amp;ndash;server environment. The results demonstrate that the new approaches achieve a significant balance between security and system performance compared to conventional deployments. Furthermore, a comprehensive security analysis confirms the robustness and effectiveness of the proposed PQC-based protocols across diverse attack scenarios. Notably, the PQC&amp;amp;ndash;AES hybrid protocol demonstrates greater efficiency for applications handling larger data volumes (e.g., 10&amp;amp;ndash;100 KB) with reduced latency, underscoring the practical necessity of carefully balancing security and operational efficiency in the post-quantum migration process.</description>
	<pubDate>2025-06-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 20: A Practical Implementation of Post-Quantum Cryptography for Secure Wireless Communication</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/2/20">doi: 10.3390/network5020020</a></p>
	<p>Authors:
		Babatunde Ojetunde
		Takuya Kurihara
		Kazuto Yano
		Toshikazu Sakano
		Hiroyuki Yokoyama
		</p>
	<p>Recent advances in quantum computing have prompted urgent consideration of the migration of classical cryptographic systems to post-quantum alternatives. However, it is impossible to fully understand the impact that migrating to current Post-Quantum Cryptography (PQC) algorithms will have on various applications without the actual implementation of quantum-resistant cryptography. On the other hand, PQC algorithms come with complexity and long processing times, which may impact the quality of service (QoS) of many applications. Therefore, PQC-based protocols with practical implementations across various applications are essential. This paper introduces a new framework for PQC standalone and PQC&amp;amp;ndash;AES (Advanced Encryption Standard) hybrid public-key encryption (PKE) protocols. Building on prior results, we focus on securing applications such as file transfer, video streaming, and chat-based communication using enhanced PQC-based protocols. The extended PQC-based protocols use a sequence number-based mechanism to effectively counter replay and man-in-the-middle attacks and mitigate standard cybersecurity attack vectors. Experimental evaluations examined encryption/decryption speeds, throughput, and processing overhead for the standalone PQC and the PQC&amp;amp;ndash;AES hybrid schemes, benchmarking them against traditional AES-256 in an existing client&amp;amp;ndash;server environment. The results demonstrate that the new approaches achieve a significant balance between security and system performance compared to conventional deployments. Furthermore, a comprehensive security analysis confirms the robustness and effectiveness of the proposed PQC-based protocols across diverse attack scenarios. Notably, the PQC&amp;amp;ndash;AES hybrid protocol demonstrates greater efficiency for applications handling larger data volumes (e.g., 10&amp;amp;ndash;100 KB) with reduced latency, underscoring the practical necessity of carefully balancing security and operational efficiency in the post-quantum migration process.</p>
	]]></content:encoded>

	<dc:title>A Practical Implementation of Post-Quantum Cryptography for Secure Wireless Communication</dc:title>
			<dc:creator>Babatunde Ojetunde</dc:creator>
			<dc:creator>Takuya Kurihara</dc:creator>
			<dc:creator>Kazuto Yano</dc:creator>
			<dc:creator>Toshikazu Sakano</dc:creator>
			<dc:creator>Hiroyuki Yokoyama</dc:creator>
		<dc:identifier>doi: 10.3390/network5020020</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-06-10</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-06-10</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>20</prism:startingPage>
		<prism:doi>10.3390/network5020020</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/2/20</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/2/19">

	<title>Network, Vol. 5, Pages 19: RACHEIM: Reinforced Reliable Computing in Cloud by Ensuring Restricted Access Control</title>
	<link>https://www.mdpi.com/2673-8732/5/2/19</link>
	<description>Cloud computing has witnessed rapid growth and notable technological progress in recent years. Nevertheless, it is still regarded as being in its early developmental phase, with substantial potential remaining to be explored&amp;amp;mdash;particularly through integration with emerging technologies such as the Metaverse, Augmented Reality (AR), and Virtual Reality (VR). As the number of service users increases, so does the demand for computational resources, leading data owners to outsource processing tasks to remote cloud servers. The internet-based delivery of cloud computing services consequently expands the attack surface and impacts the trust relationship between the service user and the service provider. To address these challenges, this study proposes a restricted access control framework based on homomorphic encryption (HE) and identity-based encryption (IBE) mechanisms. A formal analysis of the proposed model is also conducted under an unauthenticated communication model. Simulation results indicate that the proposed approach achieves a 20&amp;amp;ndash;40% reduction in encryption and decryption times, respectively, compared with existing state-of-the-art homomorphic encryption schemes. The simulation was performed using a 2048-bit key and data size, consistent with current industry standards, to improve key management efficiency. Additionally, the role-based hierarchy was implemented in a Salesforce cloud environment to ensure secure and restricted access control.</description>
	<pubDate>2025-06-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 19: RACHEIM: Reinforced Reliable Computing in Cloud by Ensuring Restricted Access Control</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/2/19">doi: 10.3390/network5020019</a></p>
	<p>Authors:
		Urvashi Rahul Saxena
		Rajan Kadel
		</p>
	<p>Cloud computing has witnessed rapid growth and notable technological progress in recent years. Nevertheless, it is still regarded as being in its early developmental phase, with substantial potential remaining to be explored&amp;amp;mdash;particularly through integration with emerging technologies such as the Metaverse, Augmented Reality (AR), and Virtual Reality (VR). As the number of service users increases, so does the demand for computational resources, leading data owners to outsource processing tasks to remote cloud servers. The internet-based delivery of cloud computing services consequently expands the attack surface and impacts the trust relationship between the service user and the service provider. To address these challenges, this study proposes a restricted access control framework based on homomorphic encryption (HE) and identity-based encryption (IBE) mechanisms. A formal analysis of the proposed model is also conducted under an unauthenticated communication model. Simulation results indicate that the proposed approach achieves a 20&amp;amp;ndash;40% reduction in encryption and decryption times, respectively, compared with existing state-of-the-art homomorphic encryption schemes. The simulation was performed using a 2048-bit key and data size, consistent with current industry standards, to improve key management efficiency. Additionally, the role-based hierarchy was implemented in a Salesforce cloud environment to ensure secure and restricted access control.</p>
	]]></content:encoded>

	<dc:title>RACHEIM: Reinforced Reliable Computing in Cloud by Ensuring Restricted Access Control</dc:title>
			<dc:creator>Urvashi Rahul Saxena</dc:creator>
			<dc:creator>Rajan Kadel</dc:creator>
		<dc:identifier>doi: 10.3390/network5020019</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-06-09</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-06-09</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>19</prism:startingPage>
		<prism:doi>10.3390/network5020019</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/2/19</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/2/18">

	<title>Network, Vol. 5, Pages 18: The Role of Topological Parameters in Wavelength Requirements for Survivable Optical Backbone Networks</title>
	<link>https://www.mdpi.com/2673-8732/5/2/18</link>
	<description>As optical networks operate using light-based transmission, assigning wavelengths to the paths taken by traffic demands is a key aspect of their design. This paper revisits the wavelength assignment problem in optical backbone networks, focusing on survivability via 1 + 1 Optical Chanel (OCh) protection, which ensures fault tolerance by duplicating data over two disjoint optical paths. The analysis gives great emphasis to studying the influence of topological parameters on wavelength requirements, with algebraic connectivity being identified as the most significant parameter. The results show that, across a set of 27 real-world networks, the wavelength increment factor, defined as the ratio between the number of wavelengths required with protection and without protection, ranges from 1.49 to 3.07, with a mean value of 2.26. Using synthetic data, formulas were derived to estimate this factor from network parameters, resulting in a mean relative error of 12.7% and errors below 15% in 70% of the real-world cases studied.</description>
	<pubDate>2025-06-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 18: The Role of Topological Parameters in Wavelength Requirements for Survivable Optical Backbone Networks</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/2/18">doi: 10.3390/network5020018</a></p>
	<p>Authors:
		Filipe Carmo
		João Pires
		</p>
	<p>As optical networks operate using light-based transmission, assigning wavelengths to the paths taken by traffic demands is a key aspect of their design. This paper revisits the wavelength assignment problem in optical backbone networks, focusing on survivability via 1 + 1 Optical Chanel (OCh) protection, which ensures fault tolerance by duplicating data over two disjoint optical paths. The analysis gives great emphasis to studying the influence of topological parameters on wavelength requirements, with algebraic connectivity being identified as the most significant parameter. The results show that, across a set of 27 real-world networks, the wavelength increment factor, defined as the ratio between the number of wavelengths required with protection and without protection, ranges from 1.49 to 3.07, with a mean value of 2.26. Using synthetic data, formulas were derived to estimate this factor from network parameters, resulting in a mean relative error of 12.7% and errors below 15% in 70% of the real-world cases studied.</p>
	]]></content:encoded>

	<dc:title>The Role of Topological Parameters in Wavelength Requirements for Survivable Optical Backbone Networks</dc:title>
			<dc:creator>Filipe Carmo</dc:creator>
			<dc:creator>João Pires</dc:creator>
		<dc:identifier>doi: 10.3390/network5020018</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-06-04</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-06-04</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>18</prism:startingPage>
		<prism:doi>10.3390/network5020018</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/2/18</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/2/17">

	<title>Network, Vol. 5, Pages 17: Optimizing Energy Efficiency in Cloud Data Centers: A Reinforcement Learning-Based Virtual Machine Placement Strategy</title>
	<link>https://www.mdpi.com/2673-8732/5/2/17</link>
	<description>Cloud computing faces growing challenges in energy consumption due to the increasing demand for services and resource usage in data centers. To address this issue, we propose a novel energy-efficient virtual machine (VM) placement strategy that integrates reinforcement learning (Q-learning), a Firefly optimization algorithm, and a VM sensitivity classification model based on random forest and self-organizing map. The proposed method, RLVMP, classifies VMs as sensitive or insensitive and dynamically allocates resources to minimize energy consumption while ensuring compliance with service level agreements (SLAs). Experimental results using the CloudSim simulator, adapted with data from Microsoft Azure, show that our model significantly reduces energy consumption. Specifically, under the lr_1.2_mmt strategy, our model achieves a 5.4% reduction in energy consumption compared to PABFD, 12.8% compared to PSO, and 12% compared to genetic algorithms. Under the iqr_1.5_mc strategy, the reductions are even more significant: 12.11% compared to PABFD, 15.6% compared to PSO, and 18.67% compared to genetic algorithms. Furthermore, our model reduces the number of live migrations, which helps minimize SLA violations. Overall, the combination of Q-learning and the Firefly algorithm enables adaptive, SLA-compliant VM placement with improved energy efficiency.</description>
	<pubDate>2025-05-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 17: Optimizing Energy Efficiency in Cloud Data Centers: A Reinforcement Learning-Based Virtual Machine Placement Strategy</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/2/17">doi: 10.3390/network5020017</a></p>
	<p>Authors:
		Abdelhadi Amahrouch
		Youssef Saadi
		Said El Kafhali
		</p>
	<p>Cloud computing faces growing challenges in energy consumption due to the increasing demand for services and resource usage in data centers. To address this issue, we propose a novel energy-efficient virtual machine (VM) placement strategy that integrates reinforcement learning (Q-learning), a Firefly optimization algorithm, and a VM sensitivity classification model based on random forest and self-organizing map. The proposed method, RLVMP, classifies VMs as sensitive or insensitive and dynamically allocates resources to minimize energy consumption while ensuring compliance with service level agreements (SLAs). Experimental results using the CloudSim simulator, adapted with data from Microsoft Azure, show that our model significantly reduces energy consumption. Specifically, under the lr_1.2_mmt strategy, our model achieves a 5.4% reduction in energy consumption compared to PABFD, 12.8% compared to PSO, and 12% compared to genetic algorithms. Under the iqr_1.5_mc strategy, the reductions are even more significant: 12.11% compared to PABFD, 15.6% compared to PSO, and 18.67% compared to genetic algorithms. Furthermore, our model reduces the number of live migrations, which helps minimize SLA violations. Overall, the combination of Q-learning and the Firefly algorithm enables adaptive, SLA-compliant VM placement with improved energy efficiency.</p>
	]]></content:encoded>

	<dc:title>Optimizing Energy Efficiency in Cloud Data Centers: A Reinforcement Learning-Based Virtual Machine Placement Strategy</dc:title>
			<dc:creator>Abdelhadi Amahrouch</dc:creator>
			<dc:creator>Youssef Saadi</dc:creator>
			<dc:creator>Said El Kafhali</dc:creator>
		<dc:identifier>doi: 10.3390/network5020017</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-05-27</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-05-27</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>17</prism:startingPage>
		<prism:doi>10.3390/network5020017</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/2/17</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/2/16">

	<title>Network, Vol. 5, Pages 16: A Survey on Software Defined Network-Enabled Edge Cloud Networks: Challenges and Future Research Directions</title>
	<link>https://www.mdpi.com/2673-8732/5/2/16</link>
	<description>The explosion of connected devices and data transmission in the Internet of Things (IoT) era brings substantial burden on the capability of cloud computing. Moreover, these IoT devices are mostly positioned at the edge of a network and limited in resources. To address these challenges, edge cloud-distributed computing networks emerge. Because of the distributed nature of edge cloud networks, many research works considering software defined networks (SDNs) and network&amp;amp;ndash;function&amp;amp;ndash;virtualization (NFV) could be key enablers for managing, orchestrating, and load balancing resources. This article provides a comprehensive survey of these emerging technologies, focusing on SDN controllers, orchestration, and the function of artificial intelligence (AI) in enhancing the capabilities of controllers within the edge cloud computing networks. More specifically, we present an extensive survey on the research proposals on the integration of SDN controllers and orchestration with the edge cloud networks. We further introduce a holistic overview of SDN-enabled edge cloud networks and an inclusive summary of edge cloud use cases and their key challenges. Finally, we address some challenges and potential research directions for further exploration in this vital research area.</description>
	<pubDate>2025-05-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 16: A Survey on Software Defined Network-Enabled Edge Cloud Networks: Challenges and Future Research Directions</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/2/16">doi: 10.3390/network5020016</a></p>
	<p>Authors:
		Baha Uddin Kazi
		Md Kawsarul Islam
		Muhammad Mahmudul Haque Siddiqui
		Muhammad Jaseemuddin
		</p>
	<p>The explosion of connected devices and data transmission in the Internet of Things (IoT) era brings substantial burden on the capability of cloud computing. Moreover, these IoT devices are mostly positioned at the edge of a network and limited in resources. To address these challenges, edge cloud-distributed computing networks emerge. Because of the distributed nature of edge cloud networks, many research works considering software defined networks (SDNs) and network&amp;amp;ndash;function&amp;amp;ndash;virtualization (NFV) could be key enablers for managing, orchestrating, and load balancing resources. This article provides a comprehensive survey of these emerging technologies, focusing on SDN controllers, orchestration, and the function of artificial intelligence (AI) in enhancing the capabilities of controllers within the edge cloud computing networks. More specifically, we present an extensive survey on the research proposals on the integration of SDN controllers and orchestration with the edge cloud networks. We further introduce a holistic overview of SDN-enabled edge cloud networks and an inclusive summary of edge cloud use cases and their key challenges. Finally, we address some challenges and potential research directions for further exploration in this vital research area.</p>
	]]></content:encoded>

	<dc:title>A Survey on Software Defined Network-Enabled Edge Cloud Networks: Challenges and Future Research Directions</dc:title>
			<dc:creator>Baha Uddin Kazi</dc:creator>
			<dc:creator>Md Kawsarul Islam</dc:creator>
			<dc:creator>Muhammad Mahmudul Haque Siddiqui</dc:creator>
			<dc:creator>Muhammad Jaseemuddin</dc:creator>
		<dc:identifier>doi: 10.3390/network5020016</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-05-20</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-05-20</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>16</prism:startingPage>
		<prism:doi>10.3390/network5020016</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/2/16</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/2/15">

	<title>Network, Vol. 5, Pages 15: Evaluation of TOPSIS Algorithm for Multi-Criteria Handover in LEO Satellite Networks: A Sensitivity Analysis</title>
	<link>https://www.mdpi.com/2673-8732/5/2/15</link>
	<description>The Technique for Order Preference by Similarity to Ideal Solution (TOPSIS) is widely recognized as an effective multi-criteria decision-making algorithm for handover management in terrestrial cellular networks, especially in scenarios involving dynamic and multi-faceted criteria. While TOPSIS is widely adopted in terrestrial cellular networks for handover management, its application in satellite networks, particularly in Low Earth Orbit (LEO) constellations, remains limited and underexplored. In this work, the performance of three TOPSIS algorithms is evaluated for handover management in LEO satellite networks, where efficient handover management is crucial due to rapid changes in satellite positions and network conditions. Sensitivity analysis is conducted on Standard Deviation TOPSIS (SD-TOPSIS), Entropy-TOPSIS, and Importance-TOPSIS in the context of LEO satellite networks, assessing their responsiveness to small variations in key performance metrics such as upload speed, download speed, ping, and packet loss. This study uses real-world data from &amp;amp;ldquo;Starlink-on-the-road-Dataset&amp;amp;rdquo;. Results show that SD-TOPSIS effectively optimizes handover management in dynamic LEO satellite networks thanks to its lower standard deviation scores and reduced score variation rate, thus demonstrating superior stability and lower sensitivity to small variations in performance metrics values compared to both Entropy-TOPSIS and Importance-TOPSIS. This ensures more consistent decision-making, avoidance of unnecessary handovers, and enhanced robustness in rapidly-changing network conditions, making it particularly suitable for real-time services that require stable, low-latency, and reliable connectivity.</description>
	<pubDate>2025-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 15: Evaluation of TOPSIS Algorithm for Multi-Criteria Handover in LEO Satellite Networks: A Sensitivity Analysis</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/2/15">doi: 10.3390/network5020015</a></p>
	<p>Authors:
		Pascal Buhinyori Ngango
		Marie-Line Lufua Binda
		Michel Matalatala Tamasala
		Pierre Sedi Nzakuna
		Vincenzo Paciello
		Angelo Kuti Lusala
		</p>
	<p>The Technique for Order Preference by Similarity to Ideal Solution (TOPSIS) is widely recognized as an effective multi-criteria decision-making algorithm for handover management in terrestrial cellular networks, especially in scenarios involving dynamic and multi-faceted criteria. While TOPSIS is widely adopted in terrestrial cellular networks for handover management, its application in satellite networks, particularly in Low Earth Orbit (LEO) constellations, remains limited and underexplored. In this work, the performance of three TOPSIS algorithms is evaluated for handover management in LEO satellite networks, where efficient handover management is crucial due to rapid changes in satellite positions and network conditions. Sensitivity analysis is conducted on Standard Deviation TOPSIS (SD-TOPSIS), Entropy-TOPSIS, and Importance-TOPSIS in the context of LEO satellite networks, assessing their responsiveness to small variations in key performance metrics such as upload speed, download speed, ping, and packet loss. This study uses real-world data from &amp;amp;ldquo;Starlink-on-the-road-Dataset&amp;amp;rdquo;. Results show that SD-TOPSIS effectively optimizes handover management in dynamic LEO satellite networks thanks to its lower standard deviation scores and reduced score variation rate, thus demonstrating superior stability and lower sensitivity to small variations in performance metrics values compared to both Entropy-TOPSIS and Importance-TOPSIS. This ensures more consistent decision-making, avoidance of unnecessary handovers, and enhanced robustness in rapidly-changing network conditions, making it particularly suitable for real-time services that require stable, low-latency, and reliable connectivity.</p>
	]]></content:encoded>

	<dc:title>Evaluation of TOPSIS Algorithm for Multi-Criteria Handover in LEO Satellite Networks: A Sensitivity Analysis</dc:title>
			<dc:creator>Pascal Buhinyori Ngango</dc:creator>
			<dc:creator>Marie-Line Lufua Binda</dc:creator>
			<dc:creator>Michel Matalatala Tamasala</dc:creator>
			<dc:creator>Pierre Sedi Nzakuna</dc:creator>
			<dc:creator>Vincenzo Paciello</dc:creator>
			<dc:creator>Angelo Kuti Lusala</dc:creator>
		<dc:identifier>doi: 10.3390/network5020015</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-05-02</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-05-02</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>15</prism:startingPage>
		<prism:doi>10.3390/network5020015</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/2/15</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/2/14">

	<title>Network, Vol. 5, Pages 14: State of the Art in Internet of Things Standards and Protocols for Precision Agriculture with an Approach to Semantic Interoperability</title>
	<link>https://www.mdpi.com/2673-8732/5/2/14</link>
	<description>The integration of Internet of Things (IoT) technology into the agricultural sector enables the collection and analysis of large amounts of data, facilitating greater control over internal processes, resulting in cost reduction and improved quality of the final product. One of the main challenges in designing an IoT system is the need for interoperability among devices: different sensors collect information in non-homogeneous formats, which are often incompatible with each other. Therefore, the user of the system is forced to use different platforms and software to consult the data, making the analysis complex and cumbersome. The solution to this problem lies in the adoption of an IoT standard that standardizes the output of the data. This paper first provides an overview of the standards and protocols used in precision farming and then presents a system architecture designed to collect measurements from sensors and translate them into a standard. The standard is selected based on an analysis of the state of the art and tailored to meet the specific needs of precision agriculture. With the introduction of a connector device, the system can accommodate any number of different sensors while maintaining the output data in a uniform format. Each type of sensor is associated with a specific connector that intercepts the data intended for the database and translates it into the standard format before forwarding it to the central server. Finally, examples with real sensors are presented to illustrate the operation of the connectors and their role in an interoperable architecture, aiming to combine flexibility and ease of use with low implementation costs.</description>
	<pubDate>2025-04-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 14: State of the Art in Internet of Things Standards and Protocols for Precision Agriculture with an Approach to Semantic Interoperability</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/2/14">doi: 10.3390/network5020014</a></p>
	<p>Authors:
		Eduard Roccatello
		Antonino Pagano
		Nicolò Levorato
		Massimo Rumor
		</p>
	<p>The integration of Internet of Things (IoT) technology into the agricultural sector enables the collection and analysis of large amounts of data, facilitating greater control over internal processes, resulting in cost reduction and improved quality of the final product. One of the main challenges in designing an IoT system is the need for interoperability among devices: different sensors collect information in non-homogeneous formats, which are often incompatible with each other. Therefore, the user of the system is forced to use different platforms and software to consult the data, making the analysis complex and cumbersome. The solution to this problem lies in the adoption of an IoT standard that standardizes the output of the data. This paper first provides an overview of the standards and protocols used in precision farming and then presents a system architecture designed to collect measurements from sensors and translate them into a standard. The standard is selected based on an analysis of the state of the art and tailored to meet the specific needs of precision agriculture. With the introduction of a connector device, the system can accommodate any number of different sensors while maintaining the output data in a uniform format. Each type of sensor is associated with a specific connector that intercepts the data intended for the database and translates it into the standard format before forwarding it to the central server. Finally, examples with real sensors are presented to illustrate the operation of the connectors and their role in an interoperable architecture, aiming to combine flexibility and ease of use with low implementation costs.</p>
	]]></content:encoded>

	<dc:title>State of the Art in Internet of Things Standards and Protocols for Precision Agriculture with an Approach to Semantic Interoperability</dc:title>
			<dc:creator>Eduard Roccatello</dc:creator>
			<dc:creator>Antonino Pagano</dc:creator>
			<dc:creator>Nicolò Levorato</dc:creator>
			<dc:creator>Massimo Rumor</dc:creator>
		<dc:identifier>doi: 10.3390/network5020014</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-04-21</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-04-21</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>14</prism:startingPage>
		<prism:doi>10.3390/network5020014</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/2/14</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/2/13">

	<title>Network, Vol. 5, Pages 13: Design and Analysis of an Effective Architecture for Machine Learning Based Intrusion Detection Systems</title>
	<link>https://www.mdpi.com/2673-8732/5/2/13</link>
	<description>The increase in new cyber threats is the result of the rapid growth of using the Internet, thus raising questions about the effectiveness of traditional Intrusion Detection Systems (IDSs). Machine learning (ML) technology is used to enhance cybersecurity in general and especially for reactive approaches, such as traditional IDSs. In several instances, it is seen that a single assailant may direct their efforts towards different servers belonging to an organization. This behavior is often perceived by IDSs as infrequent attacks, thus diminishing the effectiveness of detection. In this context, this paper aims to create a machine learning-based IDS model able to detect malicious traffic received by different organizational network interfaces. A centralized proxy server is designed to receive all the incoming traffic at the organization&amp;amp;rsquo;s servers, scan the traffic by using the proposed IDS, and then redirect the traffic to the requested server. The proposed IDS was evaluated by using three datasets: CIC-MalMem-2022, CIC-IDS-2018, and CIC-IDS-2017. The XGBoost model showed exceptional performance in rapid detection, achieving 99.96%, 99.73%, and 99.84% accuracy rates within short time intervals. The Stacking model achieved the highest level of accuracy among the evaluated models. The developed IDS demonstrated superior accuracy and detection time outcomes compared with previous research in the field.</description>
	<pubDate>2025-04-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 13: Design and Analysis of an Effective Architecture for Machine Learning Based Intrusion Detection Systems</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/2/13">doi: 10.3390/network5020013</a></p>
	<p>Authors:
		Noora Alromaihi
		Mohsen Rouached
		Aymen Akremi
		</p>
	<p>The increase in new cyber threats is the result of the rapid growth of using the Internet, thus raising questions about the effectiveness of traditional Intrusion Detection Systems (IDSs). Machine learning (ML) technology is used to enhance cybersecurity in general and especially for reactive approaches, such as traditional IDSs. In several instances, it is seen that a single assailant may direct their efforts towards different servers belonging to an organization. This behavior is often perceived by IDSs as infrequent attacks, thus diminishing the effectiveness of detection. In this context, this paper aims to create a machine learning-based IDS model able to detect malicious traffic received by different organizational network interfaces. A centralized proxy server is designed to receive all the incoming traffic at the organization&amp;amp;rsquo;s servers, scan the traffic by using the proposed IDS, and then redirect the traffic to the requested server. The proposed IDS was evaluated by using three datasets: CIC-MalMem-2022, CIC-IDS-2018, and CIC-IDS-2017. The XGBoost model showed exceptional performance in rapid detection, achieving 99.96%, 99.73%, and 99.84% accuracy rates within short time intervals. The Stacking model achieved the highest level of accuracy among the evaluated models. The developed IDS demonstrated superior accuracy and detection time outcomes compared with previous research in the field.</p>
	]]></content:encoded>

	<dc:title>Design and Analysis of an Effective Architecture for Machine Learning Based Intrusion Detection Systems</dc:title>
			<dc:creator>Noora Alromaihi</dc:creator>
			<dc:creator>Mohsen Rouached</dc:creator>
			<dc:creator>Aymen Akremi</dc:creator>
		<dc:identifier>doi: 10.3390/network5020013</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-04-14</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-04-14</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>13</prism:startingPage>
		<prism:doi>10.3390/network5020013</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/2/13</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/2/12">

	<title>Network, Vol. 5, Pages 12: Age of Information Minimization in Vehicular Edge Computing Networks: A Mask-Assisted Hybrid PPO-Based Method</title>
	<link>https://www.mdpi.com/2673-8732/5/2/12</link>
	<description>With the widespread deployment of various emerging intelligent applications, information timeliness is crucial for intelligent decision-making in vehicular networks, where vehicular edge computing (VEC) has become an important paradigm to enhance computing capabilities by offloading tasks to edge nodes. To promote the information timeliness in VEC, an optimization problem is formulated to minimize the age of information (AoI) by jointly optimizing task offloading and subcarrier allocation. Due to the time-varying channel and the coupling of the continuous and discrete optimization variables, the problem exhibits non-convexity, which is difficult to solve using traditional mathematical optimization methods. To efficiently tackle this challenge, we employ a hybrid proximal policy optimization (HPPO)-based deep reinforcement learning (DRL) method by designing the mixed action space involving both continuous and discrete variables. Moreover, an action masking mechanism is designed to filter out invalid actions in the action space caused by limitations in the effective communication distance between vehicles. As a result, a mask-assisted HPPO (MHPPO) method is proposed by integrating the action masking mechanism into the HPPO. Simulation results show that the proposed MHPPO method achieves an approximately 28.9% reduction in AoI compared with the HPPO method and about a 23% reduction compared with the mask-assisted deep deterministic policy gradient (MDDPG).</description>
	<pubDate>2025-04-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 12: Age of Information Minimization in Vehicular Edge Computing Networks: A Mask-Assisted Hybrid PPO-Based Method</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/2/12">doi: 10.3390/network5020012</a></p>
	<p>Authors:
		Xiaoli Qin
		Zhifei Zhang
		Chanyuan Meng
		Rui Dong
		Ke Xiong
		Pingyi Fan
		</p>
	<p>With the widespread deployment of various emerging intelligent applications, information timeliness is crucial for intelligent decision-making in vehicular networks, where vehicular edge computing (VEC) has become an important paradigm to enhance computing capabilities by offloading tasks to edge nodes. To promote the information timeliness in VEC, an optimization problem is formulated to minimize the age of information (AoI) by jointly optimizing task offloading and subcarrier allocation. Due to the time-varying channel and the coupling of the continuous and discrete optimization variables, the problem exhibits non-convexity, which is difficult to solve using traditional mathematical optimization methods. To efficiently tackle this challenge, we employ a hybrid proximal policy optimization (HPPO)-based deep reinforcement learning (DRL) method by designing the mixed action space involving both continuous and discrete variables. Moreover, an action masking mechanism is designed to filter out invalid actions in the action space caused by limitations in the effective communication distance between vehicles. As a result, a mask-assisted HPPO (MHPPO) method is proposed by integrating the action masking mechanism into the HPPO. Simulation results show that the proposed MHPPO method achieves an approximately 28.9% reduction in AoI compared with the HPPO method and about a 23% reduction compared with the mask-assisted deep deterministic policy gradient (MDDPG).</p>
	]]></content:encoded>

	<dc:title>Age of Information Minimization in Vehicular Edge Computing Networks: A Mask-Assisted Hybrid PPO-Based Method</dc:title>
			<dc:creator>Xiaoli Qin</dc:creator>
			<dc:creator>Zhifei Zhang</dc:creator>
			<dc:creator>Chanyuan Meng</dc:creator>
			<dc:creator>Rui Dong</dc:creator>
			<dc:creator>Ke Xiong</dc:creator>
			<dc:creator>Pingyi Fan</dc:creator>
		<dc:identifier>doi: 10.3390/network5020012</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-04-14</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-04-14</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>12</prism:startingPage>
		<prism:doi>10.3390/network5020012</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/2/12</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/2/11">

	<title>Network, Vol. 5, Pages 11: An Experimental Comparison of Basic Device Localization Systems in Wireless Sensor Networks</title>
	<link>https://www.mdpi.com/2673-8732/5/2/11</link>
	<description>Localization plays a crucial role in wireless sensor networks (WSNs) and it has sparked significant research interest. GPSs provide quite accurate positioning estimations, but they are ineffective indoors and in environments like underwater. Power usage and cost are further disadvantages, and so many alternatives have been proposed. Many works in the literature still base localization on RSSI measurements and often rely on methods to mitigate the effects of fluctuations in values, so it is important to know real values of RSSIs measured using common devices. This work presents the main localization techniques and exploits a real testbed to collect and evaluate RSSI measurements. An accuracy evaluation and a comparison among several localization techniques are also provided.</description>
	<pubDate>2025-04-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 11: An Experimental Comparison of Basic Device Localization Systems in Wireless Sensor Networks</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/2/11">doi: 10.3390/network5020011</a></p>
	<p>Authors:
		Maurizio D’Arienzo
		</p>
	<p>Localization plays a crucial role in wireless sensor networks (WSNs) and it has sparked significant research interest. GPSs provide quite accurate positioning estimations, but they are ineffective indoors and in environments like underwater. Power usage and cost are further disadvantages, and so many alternatives have been proposed. Many works in the literature still base localization on RSSI measurements and often rely on methods to mitigate the effects of fluctuations in values, so it is important to know real values of RSSIs measured using common devices. This work presents the main localization techniques and exploits a real testbed to collect and evaluate RSSI measurements. An accuracy evaluation and a comparison among several localization techniques are also provided.</p>
	]]></content:encoded>

	<dc:title>An Experimental Comparison of Basic Device Localization Systems in Wireless Sensor Networks</dc:title>
			<dc:creator>Maurizio D’Arienzo</dc:creator>
		<dc:identifier>doi: 10.3390/network5020011</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-04-14</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-04-14</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>11</prism:startingPage>
		<prism:doi>10.3390/network5020011</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/2/11</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/2/10">

	<title>Network, Vol. 5, Pages 10: A Survey of Quality-of-Service and Quality-of-Experience Provisioning in Information-Centric Networks</title>
	<link>https://www.mdpi.com/2673-8732/5/2/10</link>
	<description>Information-centric networking (ICN) is a promising approach to address the limitations of current host-centric IP-based networking. ICN models feature ubiquitous in-network caching to provide faster and more reliable content delivery, name-based routing to provide better scalability, and self-certifying contents to ensure better security. Due to the differences in the core architecture of ICN compared to existing IP-based networks, it requires special considerations to provide quality-of-service (QoS) or quality-of-experience (QoE) support for applications based on ICNs. This paper discusses the latest advances in QoS and QoE research for ICNs. First, an overview of ICN architectures is given, followed by a summary of different factors that influence QoS and QoE. Approaches for improving QoS and QoE in ICNs are then discussed in five main categories: in-network caching, name resolution and routing, transmission and flow control, software-defined networking, and media-streaming-based strategies. Finally, open research questions for providing QoS and QoE support in ICNs are outlined for future research.</description>
	<pubDate>2025-04-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 10: A Survey of Quality-of-Service and Quality-of-Experience Provisioning in Information-Centric Networks</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/2/10">doi: 10.3390/network5020010</a></p>
	<p>Authors:
		Nazmus Sadat
		Rui Dai
		</p>
	<p>Information-centric networking (ICN) is a promising approach to address the limitations of current host-centric IP-based networking. ICN models feature ubiquitous in-network caching to provide faster and more reliable content delivery, name-based routing to provide better scalability, and self-certifying contents to ensure better security. Due to the differences in the core architecture of ICN compared to existing IP-based networks, it requires special considerations to provide quality-of-service (QoS) or quality-of-experience (QoE) support for applications based on ICNs. This paper discusses the latest advances in QoS and QoE research for ICNs. First, an overview of ICN architectures is given, followed by a summary of different factors that influence QoS and QoE. Approaches for improving QoS and QoE in ICNs are then discussed in five main categories: in-network caching, name resolution and routing, transmission and flow control, software-defined networking, and media-streaming-based strategies. Finally, open research questions for providing QoS and QoE support in ICNs are outlined for future research.</p>
	]]></content:encoded>

	<dc:title>A Survey of Quality-of-Service and Quality-of-Experience Provisioning in Information-Centric Networks</dc:title>
			<dc:creator>Nazmus Sadat</dc:creator>
			<dc:creator>Rui Dai</dc:creator>
		<dc:identifier>doi: 10.3390/network5020010</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-04-14</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-04-14</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>10</prism:startingPage>
		<prism:doi>10.3390/network5020010</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/2/10</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/2/9">

	<title>Network, Vol. 5, Pages 9: Bounce: A High Performance Satellite-Based Blockchain System</title>
	<link>https://www.mdpi.com/2673-8732/5/2/9</link>
	<description>Blockchains are designed to produce a secure, append-only sequence of transactions. Establishing transaction sequentiality is typically achieved by underlying consensus protocols that either prevent forks entirely (no-forking-ever) or make forks short-lived. The main challenges facing blockchains are to achieve this no-forking condition while achieving high throughput, low response time, and low energy costs. This paper presents the Bounce blockchain protocol along with throughput and response time experiments. The core of the Bounce system is a set of satellites that partition time slots. The satellite for slot i signs a commit record that includes the hash of the commit record of slot i&amp;amp;minus;1 as well as a sequence of zero or more Merkle tree roots whose corresponding Merkle trees each has thousands or millions of transactions. The ledger consists of the transactions in the sequence of the Merkle trees corresponding to the roots of the sequence of commit records. Thus, the satellites work as arbiters that decide the next block(s) for the blockchain. Satellites orbiting around the Earth are harder to tamper with and harder to isolate than terrestrial data centers, though our protocol could work with terrestrial data centers as well. Under reasonable assumptions&amp;amp;mdash;intermittently failing but non-Byzantine (i.e., non-traitorous) satellites, possibly Byzantine Ground Stations, and &amp;amp;ldquo;exposure-averse&amp;amp;rdquo; administrators&amp;amp;mdash;the Bounce System achieves high availability and a no-fork-ever blockchain. Our experiments show that the protocol achieves high transactional throughput (5.2 million transactions per two-second slot), low response time (less than three seconds for &amp;amp;ldquo;premium&amp;amp;rdquo; transactions and less than ten seconds for &amp;amp;ldquo;economy&amp;amp;rdquo; transactions), and minimal energy consumption (under 0.05 joules per transaction). Moreover, given five more cloud sites of the kinds currently available in CloudLab, Clemson, we show how the design could achieve throughputs of 15.2 million transactions per two second slot with the same response time profile.</description>
	<pubDate>2025-03-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 9: Bounce: A High Performance Satellite-Based Blockchain System</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/2/9">doi: 10.3390/network5020009</a></p>
	<p>Authors:
		Xiaoteng Liu
		Taegyun Kim
		Dennis E. Shasha
		</p>
	<p>Blockchains are designed to produce a secure, append-only sequence of transactions. Establishing transaction sequentiality is typically achieved by underlying consensus protocols that either prevent forks entirely (no-forking-ever) or make forks short-lived. The main challenges facing blockchains are to achieve this no-forking condition while achieving high throughput, low response time, and low energy costs. This paper presents the Bounce blockchain protocol along with throughput and response time experiments. The core of the Bounce system is a set of satellites that partition time slots. The satellite for slot i signs a commit record that includes the hash of the commit record of slot i&amp;amp;minus;1 as well as a sequence of zero or more Merkle tree roots whose corresponding Merkle trees each has thousands or millions of transactions. The ledger consists of the transactions in the sequence of the Merkle trees corresponding to the roots of the sequence of commit records. Thus, the satellites work as arbiters that decide the next block(s) for the blockchain. Satellites orbiting around the Earth are harder to tamper with and harder to isolate than terrestrial data centers, though our protocol could work with terrestrial data centers as well. Under reasonable assumptions&amp;amp;mdash;intermittently failing but non-Byzantine (i.e., non-traitorous) satellites, possibly Byzantine Ground Stations, and &amp;amp;ldquo;exposure-averse&amp;amp;rdquo; administrators&amp;amp;mdash;the Bounce System achieves high availability and a no-fork-ever blockchain. Our experiments show that the protocol achieves high transactional throughput (5.2 million transactions per two-second slot), low response time (less than three seconds for &amp;amp;ldquo;premium&amp;amp;rdquo; transactions and less than ten seconds for &amp;amp;ldquo;economy&amp;amp;rdquo; transactions), and minimal energy consumption (under 0.05 joules per transaction). Moreover, given five more cloud sites of the kinds currently available in CloudLab, Clemson, we show how the design could achieve throughputs of 15.2 million transactions per two second slot with the same response time profile.</p>
	]]></content:encoded>

	<dc:title>Bounce: A High Performance Satellite-Based Blockchain System</dc:title>
			<dc:creator>Xiaoteng Liu</dc:creator>
			<dc:creator>Taegyun Kim</dc:creator>
			<dc:creator>Dennis E. Shasha</dc:creator>
		<dc:identifier>doi: 10.3390/network5020009</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-03-31</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-03-31</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>9</prism:startingPage>
		<prism:doi>10.3390/network5020009</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/2/9</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/1/8">

	<title>Network, Vol. 5, Pages 8: A Machine Learning-Based Hybrid Encryption Approach for Securing Messages in Software-Defined Networking</title>
	<link>https://www.mdpi.com/2673-8732/5/1/8</link>
	<description>The security of a network is based on the foundation of confidentiality, integrity, and availability, often referred to as the CIA triad. The privacy of data over a network, maintained by confidentiality, has long been one of the major issues in network settings. With the decoupling of the data plane and control plane in the software-defined networking (SDN) environment, this challenge is significantly amplified. This paper aims to address the challenges of confidentiality in SDN by introducing a genetic algorithm-based hybrid encryption network policy to secure messages across the network. The proposed approach achieved an average entropy of 0.989, revealing a significant improvement in the strength of the encryption with the hybrid mechanism. However, the method exhibited processing overhead, significantly increasing the transmission time for encrypted messages compared to unencrypted transmission. Compared to standalone AES, DES, and RSA, this approach shows better encryption randomness, but a trade-off between security and network performance is evident in the absence of load-balancing techniques.</description>
	<pubDate>2025-03-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 8: A Machine Learning-Based Hybrid Encryption Approach for Securing Messages in Software-Defined Networking</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/1/8">doi: 10.3390/network5010008</a></p>
	<p>Authors:
		Chitran Pokhrel
		Roshani Ghimire
		Babu R. Dawadi
		Pietro Manzoni
		</p>
	<p>The security of a network is based on the foundation of confidentiality, integrity, and availability, often referred to as the CIA triad. The privacy of data over a network, maintained by confidentiality, has long been one of the major issues in network settings. With the decoupling of the data plane and control plane in the software-defined networking (SDN) environment, this challenge is significantly amplified. This paper aims to address the challenges of confidentiality in SDN by introducing a genetic algorithm-based hybrid encryption network policy to secure messages across the network. The proposed approach achieved an average entropy of 0.989, revealing a significant improvement in the strength of the encryption with the hybrid mechanism. However, the method exhibited processing overhead, significantly increasing the transmission time for encrypted messages compared to unencrypted transmission. Compared to standalone AES, DES, and RSA, this approach shows better encryption randomness, but a trade-off between security and network performance is evident in the absence of load-balancing techniques.</p>
	]]></content:encoded>

	<dc:title>A Machine Learning-Based Hybrid Encryption Approach for Securing Messages in Software-Defined Networking</dc:title>
			<dc:creator>Chitran Pokhrel</dc:creator>
			<dc:creator>Roshani Ghimire</dc:creator>
			<dc:creator>Babu R. Dawadi</dc:creator>
			<dc:creator>Pietro Manzoni</dc:creator>
		<dc:identifier>doi: 10.3390/network5010008</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-03-11</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-03-11</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>8</prism:startingPage>
		<prism:doi>10.3390/network5010008</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/1/8</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/1/7">

	<title>Network, Vol. 5, Pages 7: Network Tower Sharing Analysis in Greece: A Structure&amp;ndash;Conduct&amp;ndash;Performance Approach</title>
	<link>https://www.mdpi.com/2673-8732/5/1/7</link>
	<description>The paper intends to contribute to readers&amp;amp;rsquo; comprehension of the Greek telecommunications market, focusing on the strategic decisions associated with network tower-sharing analysis in Greece. The Greek telecommunications industry is described for the first time following the Structure&amp;amp;ndash;Conduct&amp;amp;ndash;Performance (SCP) paradigm of Industrial Organisation (IO), as a methodological tool of analysis. In that respect, an SCP model in its extended form is constructed, aiming to examine how structure, conduct, and performance interrelate to each other. More precisely, the SCP model explains how strategic decisions regarding tower infrastructure sharing between 2013&amp;amp;ndash;2022 were developed, as a result of a series of interactions and feedback effects, amongst market structure, operators&amp;amp;rsquo; conducts, and performances, resulting in strengthening competition and reshaping market structure with the entrance of a new player in the Greek mobile market, an independent TowerCo (Athens, Greece) in Greece. International tendencies and competition issues influencing domestic growth potentialities and alternative operators&amp;amp;rsquo; concentration will be addressed, too. The paper concludes with presenting a basically qualitative, explanatory interpretive analysis of the perspectives of network tower-sharing analysis in the Greek telecommunication industry, including policy recommendations for the near future and thoughts on future research, as well.</description>
	<pubDate>2025-02-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 7: Network Tower Sharing Analysis in Greece: A Structure&amp;ndash;Conduct&amp;ndash;Performance Approach</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/1/7">doi: 10.3390/network5010007</a></p>
	<p>Authors:
		Vasileios Argyroulis
		Antonios Kargas
		Dimitris Varoutas
		</p>
	<p>The paper intends to contribute to readers&amp;amp;rsquo; comprehension of the Greek telecommunications market, focusing on the strategic decisions associated with network tower-sharing analysis in Greece. The Greek telecommunications industry is described for the first time following the Structure&amp;amp;ndash;Conduct&amp;amp;ndash;Performance (SCP) paradigm of Industrial Organisation (IO), as a methodological tool of analysis. In that respect, an SCP model in its extended form is constructed, aiming to examine how structure, conduct, and performance interrelate to each other. More precisely, the SCP model explains how strategic decisions regarding tower infrastructure sharing between 2013&amp;amp;ndash;2022 were developed, as a result of a series of interactions and feedback effects, amongst market structure, operators&amp;amp;rsquo; conducts, and performances, resulting in strengthening competition and reshaping market structure with the entrance of a new player in the Greek mobile market, an independent TowerCo (Athens, Greece) in Greece. International tendencies and competition issues influencing domestic growth potentialities and alternative operators&amp;amp;rsquo; concentration will be addressed, too. The paper concludes with presenting a basically qualitative, explanatory interpretive analysis of the perspectives of network tower-sharing analysis in the Greek telecommunication industry, including policy recommendations for the near future and thoughts on future research, as well.</p>
	]]></content:encoded>

	<dc:title>Network Tower Sharing Analysis in Greece: A Structure&amp;amp;ndash;Conduct&amp;amp;ndash;Performance Approach</dc:title>
			<dc:creator>Vasileios Argyroulis</dc:creator>
			<dc:creator>Antonios Kargas</dc:creator>
			<dc:creator>Dimitris Varoutas</dc:creator>
		<dc:identifier>doi: 10.3390/network5010007</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-02-20</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-02-20</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>7</prism:startingPage>
		<prism:doi>10.3390/network5010007</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/1/7</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/1/5">

	<title>Network, Vol. 5, Pages 5: Signature-Based Security Analysis and Detection of IoT Threats in Advanced Message Queuing Protocol</title>
	<link>https://www.mdpi.com/2673-8732/5/1/5</link>
	<description>The Advanced Message Queuing Protocol (AMQP) is a widely used communication standard in IoT systems due to its robust and reliable message delivery capabilities. However, its increasing adoption has made it a target for various cyber threats, including Distributed Denial of Service (DDoS), Man-in-the-Middle (MitM), and brute force attacks. This study presents a comprehensive analysis of AMQP-specific vulnerabilities and introduces a statistical model for the detection and classification of malicious activities in IoT networks. Leveraging a custom-designed IoT testbed, realistic attack scenarios were simulated, and a dataset encompassing normal, malicious, and mixed traffic was generated. Unique attack signatures were identified and validated through repeated experiments, forming the foundation of a signature-based detection mechanism tailored for AMQP networks. The proposed model demonstrated high accuracy in detecting and classifying attack-specific traffic while maintaining a low false positive rate for benign traffic. Notable results include effective detection of RST packets in DDoS scenarios, precise classification of MitM attack patterns, and identification of brute force attempts on AMQP systems. This research highlights the efficacy of signature-based approaches in enhancing IoT security and offers a benchmark for future machine learning-driven detection systems. By addressing AMQP-specific challenges, the study contributes to the development of resilient and secure IoT ecosystems.</description>
	<pubDate>2025-02-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 5: Signature-Based Security Analysis and Detection of IoT Threats in Advanced Message Queuing Protocol</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/1/5">doi: 10.3390/network5010005</a></p>
	<p>Authors:
		Mohammad Emran Hashimyar
		Mahdi Aiash
		Ali Khoshkholghi
		Giacomo Nalli
		</p>
	<p>The Advanced Message Queuing Protocol (AMQP) is a widely used communication standard in IoT systems due to its robust and reliable message delivery capabilities. However, its increasing adoption has made it a target for various cyber threats, including Distributed Denial of Service (DDoS), Man-in-the-Middle (MitM), and brute force attacks. This study presents a comprehensive analysis of AMQP-specific vulnerabilities and introduces a statistical model for the detection and classification of malicious activities in IoT networks. Leveraging a custom-designed IoT testbed, realistic attack scenarios were simulated, and a dataset encompassing normal, malicious, and mixed traffic was generated. Unique attack signatures were identified and validated through repeated experiments, forming the foundation of a signature-based detection mechanism tailored for AMQP networks. The proposed model demonstrated high accuracy in detecting and classifying attack-specific traffic while maintaining a low false positive rate for benign traffic. Notable results include effective detection of RST packets in DDoS scenarios, precise classification of MitM attack patterns, and identification of brute force attempts on AMQP systems. This research highlights the efficacy of signature-based approaches in enhancing IoT security and offers a benchmark for future machine learning-driven detection systems. By addressing AMQP-specific challenges, the study contributes to the development of resilient and secure IoT ecosystems.</p>
	]]></content:encoded>

	<dc:title>Signature-Based Security Analysis and Detection of IoT Threats in Advanced Message Queuing Protocol</dc:title>
			<dc:creator>Mohammad Emran Hashimyar</dc:creator>
			<dc:creator>Mahdi Aiash</dc:creator>
			<dc:creator>Ali Khoshkholghi</dc:creator>
			<dc:creator>Giacomo Nalli</dc:creator>
		<dc:identifier>doi: 10.3390/network5010005</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-02-17</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-02-17</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>5</prism:startingPage>
		<prism:doi>10.3390/network5010005</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/1/5</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/1/6">

	<title>Network, Vol. 5, Pages 6: GAOR: Genetic Algorithm-Based Optimization for Machine Learning Robustness in Communication Networks</title>
	<link>https://www.mdpi.com/2673-8732/5/1/6</link>
	<description>Machine learning (ML) promises advances in automation and threat detection for the future generations of communication networks. However, new threats are introduced, as adversaries target ML systems with malicious data. Adversarial attacks on tree-based ML models involve crafting input perturbations that exploit non-smooth decision boundaries, causing misclassifications. These so-called evasion attacks are imperceptible, as they do not significantly alter the input data distribution and have been shown to degrade the performance of tree-based models across various tasks. Adversarial training and genetic algorithms have been proposed as potential defenses against these attacks. In this paper, we explore the robustness of tree-based models for network intrusion detection systems. This study evaluates an optimization approach inspired by genetic algorithms to generate adversarial samples and studies the impact of adversarial training on the accuracy of attack detection. This paper exposed random forest and extreme gradient boosting classifiers to various adversarial samples generated from communication network-related CIC-IDS2019 and 5G-NIDD datasets. The results indicate that the improvements of robustness to adversarial attacks come with a cost to the accuracy of the network intrusion detection models. These costs can be optimized with intelligent, use case-specific feature engineering.</description>
	<pubDate>2025-02-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 6: GAOR: Genetic Algorithm-Based Optimization for Machine Learning Robustness in Communication Networks</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/1/6">doi: 10.3390/network5010006</a></p>
	<p>Authors:
		Aderonke Thompson
		Jani Suomalainen
		</p>
	<p>Machine learning (ML) promises advances in automation and threat detection for the future generations of communication networks. However, new threats are introduced, as adversaries target ML systems with malicious data. Adversarial attacks on tree-based ML models involve crafting input perturbations that exploit non-smooth decision boundaries, causing misclassifications. These so-called evasion attacks are imperceptible, as they do not significantly alter the input data distribution and have been shown to degrade the performance of tree-based models across various tasks. Adversarial training and genetic algorithms have been proposed as potential defenses against these attacks. In this paper, we explore the robustness of tree-based models for network intrusion detection systems. This study evaluates an optimization approach inspired by genetic algorithms to generate adversarial samples and studies the impact of adversarial training on the accuracy of attack detection. This paper exposed random forest and extreme gradient boosting classifiers to various adversarial samples generated from communication network-related CIC-IDS2019 and 5G-NIDD datasets. The results indicate that the improvements of robustness to adversarial attacks come with a cost to the accuracy of the network intrusion detection models. These costs can be optimized with intelligent, use case-specific feature engineering.</p>
	]]></content:encoded>

	<dc:title>GAOR: Genetic Algorithm-Based Optimization for Machine Learning Robustness in Communication Networks</dc:title>
			<dc:creator>Aderonke Thompson</dc:creator>
			<dc:creator>Jani Suomalainen</dc:creator>
		<dc:identifier>doi: 10.3390/network5010006</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-02-17</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-02-17</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>6</prism:startingPage>
		<prism:doi>10.3390/network5010006</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/1/6</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/1/4">

	<title>Network, Vol. 5, Pages 4: Simulation-Based Evaluation of V2X System with Variable Computational Infrastructure</title>
	<link>https://www.mdpi.com/2673-8732/5/1/4</link>
	<description>The issue of organizing efficient interaction between vehicle-to-everything (V2X) system elements has become increasingly critical in recent years. Utilizing V2X technology enables achieving the necessary balance of safety, reducing system load, and ensuring a high degree of vehicle automation. This study aims to develop a simulation system for V2X applications in various element placement configurations and conduct a numerical analysis of several V2X system interaction schemes. The research analyzes various methods, including clustering, edge computing, and fog computing, aimed at minimizing system losses. The results demonstrate that each proposed model can be effectively implemented on mobile nodes. The results also provide insights into the average expected request processing times, thereby enhancing the organization of the V2X system. The authors propose a model that enables the distribution of system parameters and resources for diverse computational tasks, which is essential for the successful implementation and utilization of V2X technology.</description>
	<pubDate>2025-02-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 4: Simulation-Based Evaluation of V2X System with Variable Computational Infrastructure</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/1/4">doi: 10.3390/network5010004</a></p>
	<p>Authors:
		Andrei Vladyko
		Pavel Plotnikov
		Gleb Tambovtsev
		</p>
	<p>The issue of organizing efficient interaction between vehicle-to-everything (V2X) system elements has become increasingly critical in recent years. Utilizing V2X technology enables achieving the necessary balance of safety, reducing system load, and ensuring a high degree of vehicle automation. This study aims to develop a simulation system for V2X applications in various element placement configurations and conduct a numerical analysis of several V2X system interaction schemes. The research analyzes various methods, including clustering, edge computing, and fog computing, aimed at minimizing system losses. The results demonstrate that each proposed model can be effectively implemented on mobile nodes. The results also provide insights into the average expected request processing times, thereby enhancing the organization of the V2X system. The authors propose a model that enables the distribution of system parameters and resources for diverse computational tasks, which is essential for the successful implementation and utilization of V2X technology.</p>
	]]></content:encoded>

	<dc:title>Simulation-Based Evaluation of V2X System with Variable Computational Infrastructure</dc:title>
			<dc:creator>Andrei Vladyko</dc:creator>
			<dc:creator>Pavel Plotnikov</dc:creator>
			<dc:creator>Gleb Tambovtsev</dc:creator>
		<dc:identifier>doi: 10.3390/network5010004</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-02-14</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-02-14</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>4</prism:startingPage>
		<prism:doi>10.3390/network5010004</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/1/4</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/1/3">

	<title>Network, Vol. 5, Pages 3: Modified Index Policies for Multi-Armed Bandits with Network-like Markovian Dependencies</title>
	<link>https://www.mdpi.com/2673-8732/5/1/3</link>
	<description>Sequential decision-making in dynamic and interconnected environments is a cornerstone of numerous applications, ranging from communication networks and finance to distributed blockchain systems and IoT frameworks. The multi-armed bandit (MAB) problem is a fundamental model in this domain that traditionally assumes independent and identically distributed (iid) rewards, which limits its effectiveness in capturing the inherent dependencies and state dynamics present in some real-world scenarios. In this paper, we lay a theoretical framework for a modified MAB model in which each arm&amp;amp;rsquo;s reward is generated by a hidden Markov process. In our model, each arm undergoes Markov state transitions independent of play in a way that results in varying reward distributions and heightened uncertainty in reward observations. The number of states for each arm can be up to three states. A key challenge arises from the fact that the underlying states governing each arm&amp;amp;rsquo;s rewards remain hidden at the time of selection. To address this, we adapt traditional index-based policies and develop a modified index approach tailored to accommodate Markovian transitions and enhance selection efficiency for our model. Our proposed proposed Markovian Upper Confidence Bound (MC-UCB) policy achieves logarithmic regret. Comparative analysis with the classical UCB algorithm reveals that MC-UCB consistently achieves approximately a 15% reduction in cumulative regret. This work provides significant theoretical insights and lays a robust foundation for future research aimed at optimizing decision-making processes in complex, networked systems with hidden state dependencies.</description>
	<pubDate>2025-01-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 3: Modified Index Policies for Multi-Armed Bandits with Network-like Markovian Dependencies</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/1/3">doi: 10.3390/network5010003</a></p>
	<p>Authors:
		Abdalaziz Sawwan
		Jie Wu
		</p>
	<p>Sequential decision-making in dynamic and interconnected environments is a cornerstone of numerous applications, ranging from communication networks and finance to distributed blockchain systems and IoT frameworks. The multi-armed bandit (MAB) problem is a fundamental model in this domain that traditionally assumes independent and identically distributed (iid) rewards, which limits its effectiveness in capturing the inherent dependencies and state dynamics present in some real-world scenarios. In this paper, we lay a theoretical framework for a modified MAB model in which each arm&amp;amp;rsquo;s reward is generated by a hidden Markov process. In our model, each arm undergoes Markov state transitions independent of play in a way that results in varying reward distributions and heightened uncertainty in reward observations. The number of states for each arm can be up to three states. A key challenge arises from the fact that the underlying states governing each arm&amp;amp;rsquo;s rewards remain hidden at the time of selection. To address this, we adapt traditional index-based policies and develop a modified index approach tailored to accommodate Markovian transitions and enhance selection efficiency for our model. Our proposed proposed Markovian Upper Confidence Bound (MC-UCB) policy achieves logarithmic regret. Comparative analysis with the classical UCB algorithm reveals that MC-UCB consistently achieves approximately a 15% reduction in cumulative regret. This work provides significant theoretical insights and lays a robust foundation for future research aimed at optimizing decision-making processes in complex, networked systems with hidden state dependencies.</p>
	]]></content:encoded>

	<dc:title>Modified Index Policies for Multi-Armed Bandits with Network-like Markovian Dependencies</dc:title>
			<dc:creator>Abdalaziz Sawwan</dc:creator>
			<dc:creator>Jie Wu</dc:creator>
		<dc:identifier>doi: 10.3390/network5010003</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-01-29</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-01-29</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>3</prism:startingPage>
		<prism:doi>10.3390/network5010003</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/1/3</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/1/2">

	<title>Network, Vol. 5, Pages 2: GreenNav: Spatiotemporal Prediction of CO2 Emissions in Paris Road Traffic Using a Hybrid CNN-LSTM Model</title>
	<link>https://www.mdpi.com/2673-8732/5/1/2</link>
	<description>In a global context where reducing the carbon footprint has become an urgent necessity, this article presents a hybrid CNN-LSTM prediction model to estimate CO2 emission rates of Paris road traffic using spatio-temporal data. Our hybrid prediction model relies on a real-time road traffic database that we built by fusing several APIs and datasets. In particular, we trained two specialized models: a CNN to extract spatial patterns and an LSTM to capture temporal dynamics. By merging their outputs, we leverage both spatial and temporal dependencies, ensuring more accurate predictions. Thus, this article aims to compare various strategies and configurations, allowing us to identify the optimal architecture and parameters for our CNN-LSTM model. Moreover, to refine the predictive learning evolution of our hybrid model, we used optimization techniques like gradient descent to monitor the learning progress. The results show that our hybrid CNN-LSTM model achieved an R2 value of 0.91 and an RMSE of 0.086, outperforming conventional models regarding CO2 emission rate prediction accuracy. These results validate the efficiency and relevance of using hybrid CNN-LSTM models for the spatio-temporal modelling of CO2 emissions in the context of road traffic.</description>
	<pubDate>2025-01-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 2: GreenNav: Spatiotemporal Prediction of CO2 Emissions in Paris Road Traffic Using a Hybrid CNN-LSTM Model</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/1/2">doi: 10.3390/network5010002</a></p>
	<p>Authors:
		Youssef Mekouar
		Imad Saleh
		Mohammed Karim
		</p>
	<p>In a global context where reducing the carbon footprint has become an urgent necessity, this article presents a hybrid CNN-LSTM prediction model to estimate CO2 emission rates of Paris road traffic using spatio-temporal data. Our hybrid prediction model relies on a real-time road traffic database that we built by fusing several APIs and datasets. In particular, we trained two specialized models: a CNN to extract spatial patterns and an LSTM to capture temporal dynamics. By merging their outputs, we leverage both spatial and temporal dependencies, ensuring more accurate predictions. Thus, this article aims to compare various strategies and configurations, allowing us to identify the optimal architecture and parameters for our CNN-LSTM model. Moreover, to refine the predictive learning evolution of our hybrid model, we used optimization techniques like gradient descent to monitor the learning progress. The results show that our hybrid CNN-LSTM model achieved an R2 value of 0.91 and an RMSE of 0.086, outperforming conventional models regarding CO2 emission rate prediction accuracy. These results validate the efficiency and relevance of using hybrid CNN-LSTM models for the spatio-temporal modelling of CO2 emissions in the context of road traffic.</p>
	]]></content:encoded>

	<dc:title>GreenNav: Spatiotemporal Prediction of CO2 Emissions in Paris Road Traffic Using a Hybrid CNN-LSTM Model</dc:title>
			<dc:creator>Youssef Mekouar</dc:creator>
			<dc:creator>Imad Saleh</dc:creator>
			<dc:creator>Mohammed Karim</dc:creator>
		<dc:identifier>doi: 10.3390/network5010002</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-01-10</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-01-10</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2</prism:startingPage>
		<prism:doi>10.3390/network5010002</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/1/2</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/5/1/1">

	<title>Network, Vol. 5, Pages 1: Enhancing Communication Networks in the New Era with Artificial Intelligence: Techniques, Applications, and Future Directions</title>
	<link>https://www.mdpi.com/2673-8732/5/1/1</link>
	<description>Artificial intelligence (AI) transforms communication networks by enabling more efficient data management, enhanced security, and optimized performance across diverse environments, from dense urban 5G/6G networks to expansive IoT and cloud-based systems. Motivated by the increasing need for reliable, high-speed, and secure connectivity, this study explores key AI applications, including traffic prediction, load balancing, intrusion detection, and self-organizing network capabilities. Through detailed case studies, I illustrate AI&amp;amp;rsquo;s effectiveness in managing bandwidth in high-density urban networks, securing IoT devices and edge networks, and enhancing security in cloud-based communications through real-time intrusion and anomaly detection. The findings demonstrate AI&amp;amp;rsquo;s substantial impact on creating adaptive, secure, and efficient communication networks, addressing current and future challenges. Key directions for future work include advancing AI-driven network resilience, refining predictive models, and exploring ethical considerations for AI deployment in network management.</description>
	<pubDate>2025-01-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 5, Pages 1: Enhancing Communication Networks in the New Era with Artificial Intelligence: Techniques, Applications, and Future Directions</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/5/1/1">doi: 10.3390/network5010001</a></p>
	<p>Authors:
		Mohammed El-Hajj
		</p>
	<p>Artificial intelligence (AI) transforms communication networks by enabling more efficient data management, enhanced security, and optimized performance across diverse environments, from dense urban 5G/6G networks to expansive IoT and cloud-based systems. Motivated by the increasing need for reliable, high-speed, and secure connectivity, this study explores key AI applications, including traffic prediction, load balancing, intrusion detection, and self-organizing network capabilities. Through detailed case studies, I illustrate AI&amp;amp;rsquo;s effectiveness in managing bandwidth in high-density urban networks, securing IoT devices and edge networks, and enhancing security in cloud-based communications through real-time intrusion and anomaly detection. The findings demonstrate AI&amp;amp;rsquo;s substantial impact on creating adaptive, secure, and efficient communication networks, addressing current and future challenges. Key directions for future work include advancing AI-driven network resilience, refining predictive models, and exploring ethical considerations for AI deployment in network management.</p>
	]]></content:encoded>

	<dc:title>Enhancing Communication Networks in the New Era with Artificial Intelligence: Techniques, Applications, and Future Directions</dc:title>
			<dc:creator>Mohammed El-Hajj</dc:creator>
		<dc:identifier>doi: 10.3390/network5010001</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2025-01-06</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2025-01-06</prism:publicationDate>
	<prism:volume>5</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>1</prism:startingPage>
		<prism:doi>10.3390/network5010001</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/5/1/1</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/4/4/29">

	<title>Network, Vol. 4, Pages 586-608: Evaluation of Battery Management Systems for Electric Vehicles Using Traditional and Modern Estimation Methods</title>
	<link>https://www.mdpi.com/2673-8732/4/4/29</link>
	<description>This paper presents the development of an advanced battery management system (BMS) for electric vehicles (EVs), designed to enhance battery performance, safety, and longevity. Central to the BMS is its precise monitoring of critical parameters, including voltage, current, and temperature, enabled by dedicated sensors. These sensors facilitate accurate calculations of the state of charge (SOC) and state of health (SOH), with real-time data displayed through an IoT cloud interface. The proposed BMS employs data-driven approaches, like advanced Kalman filters (KF), for battery state estimation, allowing continuous updates to the battery state with improved accuracy and adaptability during each charging cycle. Simulation tests conducted in MATLAB&amp;amp;rsquo;s Simulink across multiple charging and discharging cycles demonstrate the superior accuracy of the advanced Kalman filter (KF), in handling non-linear battery behaviours. Results indicate that the proposed BMS achieves a significantly lower error margin in SOC tracking, ranging from 0.32% to 1%, compared to traditional methods with error margins up to 5%. These findings underscore the importance of integrating robust sensor systems in BMSs to optimise EV battery management, reduce maintenance costs, and improve battery sustainability.</description>
	<pubDate>2024-12-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 4, Pages 586-608: Evaluation of Battery Management Systems for Electric Vehicles Using Traditional and Modern Estimation Methods</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/4/4/29">doi: 10.3390/network4040029</a></p>
	<p>Authors:
		Muhammad Talha Mumtaz Noreen
		Mohammad Hossein Fouladfar
		Nagham Saeed
		</p>
	<p>This paper presents the development of an advanced battery management system (BMS) for electric vehicles (EVs), designed to enhance battery performance, safety, and longevity. Central to the BMS is its precise monitoring of critical parameters, including voltage, current, and temperature, enabled by dedicated sensors. These sensors facilitate accurate calculations of the state of charge (SOC) and state of health (SOH), with real-time data displayed through an IoT cloud interface. The proposed BMS employs data-driven approaches, like advanced Kalman filters (KF), for battery state estimation, allowing continuous updates to the battery state with improved accuracy and adaptability during each charging cycle. Simulation tests conducted in MATLAB&amp;amp;rsquo;s Simulink across multiple charging and discharging cycles demonstrate the superior accuracy of the advanced Kalman filter (KF), in handling non-linear battery behaviours. Results indicate that the proposed BMS achieves a significantly lower error margin in SOC tracking, ranging from 0.32% to 1%, compared to traditional methods with error margins up to 5%. These findings underscore the importance of integrating robust sensor systems in BMSs to optimise EV battery management, reduce maintenance costs, and improve battery sustainability.</p>
	]]></content:encoded>

	<dc:title>Evaluation of Battery Management Systems for Electric Vehicles Using Traditional and Modern Estimation Methods</dc:title>
			<dc:creator>Muhammad Talha Mumtaz Noreen</dc:creator>
			<dc:creator>Mohammad Hossein Fouladfar</dc:creator>
			<dc:creator>Nagham Saeed</dc:creator>
		<dc:identifier>doi: 10.3390/network4040029</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2024-12-21</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2024-12-21</prism:publicationDate>
	<prism:volume>4</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>586</prism:startingPage>
		<prism:doi>10.3390/network4040029</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/4/4/29</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/4/4/28">

	<title>Network, Vol. 4, Pages 567-585: Secured Real-Time Machine Communication Protocol</title>
	<link>https://www.mdpi.com/2673-8732/4/4/28</link>
	<description>In this paper, we introduce the Secured Real-Time Machine Communication Protocol (SRMCP), a novel industrial communication protocol designed to address the increasing demand for security and performance in Industry 4.0 environments. SRMCP integrates post-quantum cryptographic techniques, including the Kyber Key Encapsulation Mechanism (Kyber-KEM) and AES-GCM encryption, to ensure robust protection against both current and future cryptographic threats. We also present an innovative &amp;amp;ldquo;Port Hopping&amp;amp;rdquo; mechanism inspired by frequency hopping, enhancing security by distributing communication across multiple channels. Comparative performance analysis was conducted with widely-used protocols such as ModBus and the OPC UA, focusing on key metrics such as connection, reading, and writing times across local and remote networks. Results demonstrate that SRMCP outperforms ModBus in reading and writing operations while offering enhanced security, although it has a higher connection time due to its dual-layer encryption. The OPC UA, while secure, lags significantly in performance, making it less suitable for real-time applications. The findings suggest that SRMCP is a viable solution for secure and efficient machine communication in modern industrial settings, particularly where quantum-safe security is a concern.</description>
	<pubDate>2024-12-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 4, Pages 567-585: Secured Real-Time Machine Communication Protocol</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/4/4/28">doi: 10.3390/network4040028</a></p>
	<p>Authors:
		Yifei Ren
		Lakmal Rupasinghe
		Siavash Khaksar
		Nasim Ferdosian
		Iain Murray
		</p>
	<p>In this paper, we introduce the Secured Real-Time Machine Communication Protocol (SRMCP), a novel industrial communication protocol designed to address the increasing demand for security and performance in Industry 4.0 environments. SRMCP integrates post-quantum cryptographic techniques, including the Kyber Key Encapsulation Mechanism (Kyber-KEM) and AES-GCM encryption, to ensure robust protection against both current and future cryptographic threats. We also present an innovative &amp;amp;ldquo;Port Hopping&amp;amp;rdquo; mechanism inspired by frequency hopping, enhancing security by distributing communication across multiple channels. Comparative performance analysis was conducted with widely-used protocols such as ModBus and the OPC UA, focusing on key metrics such as connection, reading, and writing times across local and remote networks. Results demonstrate that SRMCP outperforms ModBus in reading and writing operations while offering enhanced security, although it has a higher connection time due to its dual-layer encryption. The OPC UA, while secure, lags significantly in performance, making it less suitable for real-time applications. The findings suggest that SRMCP is a viable solution for secure and efficient machine communication in modern industrial settings, particularly where quantum-safe security is a concern.</p>
	]]></content:encoded>

	<dc:title>Secured Real-Time Machine Communication Protocol</dc:title>
			<dc:creator>Yifei Ren</dc:creator>
			<dc:creator>Lakmal Rupasinghe</dc:creator>
			<dc:creator>Siavash Khaksar</dc:creator>
			<dc:creator>Nasim Ferdosian</dc:creator>
			<dc:creator>Iain Murray</dc:creator>
		<dc:identifier>doi: 10.3390/network4040028</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2024-12-12</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2024-12-12</prism:publicationDate>
	<prism:volume>4</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>567</prism:startingPage>
		<prism:doi>10.3390/network4040028</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/4/4/28</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/4/4/27">

	<title>Network, Vol. 4, Pages 545-566: Advancements in Indoor Precision Positioning: A Comprehensive Survey of UWB and Wi-Fi RTT Positioning Technologies</title>
	<link>https://www.mdpi.com/2673-8732/4/4/27</link>
	<description>High-precision indoor positioning is essential for various applications, such as the Internet of Things, robotics, and smart manufacturing, requiring accuracy better than 1 m. Conventional indoor positioning methods, like Wi-Fi or Bluetooth fingerprinting, typically provide low accuracy within a range of several meters, while techniques such as laser or visual odometry often require fusion with absolute positioning methods. Ultra-wideband (UWB) and Wi-Fi Round-Trip Time (RTT) are emerging radio positioning technologies supported by industry leaders like Apple and Google, respectively, both capable of achieving high-precision indoor positioning. This paper offers a comprehensive survey of UWB and Wi-Fi positioning, beginning with an overview of UWB and Wi-Fi RTT ranging, followed by an explanation of the fundamental principles of UWB and Wi-Fi RTT-based geometric positioning. Additionally, it compares the strengths and limitations of UWB and Wi-Fi RTT technologies and reviews advanced studies that address practical challenges in UWB and Wi-Fi RTT positioning, such as accuracy, reliability, continuity, and base station coordinate calibration issues. These challenges are primarily addressed through a multi-sensor fusion approach that integrates relative and absolute positioning. Finally, this paper highlights future directions for the development of UWB- and Wi-Fi RTT-based indoor positioning technologies.</description>
	<pubDate>2024-11-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 4, Pages 545-566: Advancements in Indoor Precision Positioning: A Comprehensive Survey of UWB and Wi-Fi RTT Positioning Technologies</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/4/4/27">doi: 10.3390/network4040027</a></p>
	<p>Authors:
		Jiageng Qiao
		Fan Yang
		Jingbin Liu
		Gege Huang
		Wei Zhang
		Mengxiang Li
		</p>
	<p>High-precision indoor positioning is essential for various applications, such as the Internet of Things, robotics, and smart manufacturing, requiring accuracy better than 1 m. Conventional indoor positioning methods, like Wi-Fi or Bluetooth fingerprinting, typically provide low accuracy within a range of several meters, while techniques such as laser or visual odometry often require fusion with absolute positioning methods. Ultra-wideband (UWB) and Wi-Fi Round-Trip Time (RTT) are emerging radio positioning technologies supported by industry leaders like Apple and Google, respectively, both capable of achieving high-precision indoor positioning. This paper offers a comprehensive survey of UWB and Wi-Fi positioning, beginning with an overview of UWB and Wi-Fi RTT ranging, followed by an explanation of the fundamental principles of UWB and Wi-Fi RTT-based geometric positioning. Additionally, it compares the strengths and limitations of UWB and Wi-Fi RTT technologies and reviews advanced studies that address practical challenges in UWB and Wi-Fi RTT positioning, such as accuracy, reliability, continuity, and base station coordinate calibration issues. These challenges are primarily addressed through a multi-sensor fusion approach that integrates relative and absolute positioning. Finally, this paper highlights future directions for the development of UWB- and Wi-Fi RTT-based indoor positioning technologies.</p>
	]]></content:encoded>

	<dc:title>Advancements in Indoor Precision Positioning: A Comprehensive Survey of UWB and Wi-Fi RTT Positioning Technologies</dc:title>
			<dc:creator>Jiageng Qiao</dc:creator>
			<dc:creator>Fan Yang</dc:creator>
			<dc:creator>Jingbin Liu</dc:creator>
			<dc:creator>Gege Huang</dc:creator>
			<dc:creator>Wei Zhang</dc:creator>
			<dc:creator>Mengxiang Li</dc:creator>
		<dc:identifier>doi: 10.3390/network4040027</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2024-11-29</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2024-11-29</prism:publicationDate>
	<prism:volume>4</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>545</prism:startingPage>
		<prism:doi>10.3390/network4040027</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/4/4/27</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/4/4/26">

	<title>Network, Vol. 4, Pages 523-544: Traffic-Driven Controller-Load-Balancing over Multi-Controller Software-Defined Networking Environment</title>
	<link>https://www.mdpi.com/2673-8732/4/4/26</link>
	<description>Currently, more studies are focusing on traffic classification in software-defined networks (SDNs). Accurate classification and selecting the appropriate controller have benefited from the application of machine learning (ML) in practice. In this research, we study different classification models to see which one best classifies the generated dataset and goes on to be implemented for real-time classification. In our case, the classification and regression tree (CART) classifier produces the best classification results for the generated dataset, and logistic regression is also considerable. Based on the evaluation of various algorithmic outputs for the training and validation datasets, and also when execution time is taken into account, the CART is found to be the best algorithm. While testing the impact of load balancing in a multi-controller SDN environment, in different load case scenarios, we observe network performance parameters like bit rate, packet rate, and jitter. Here, the use of traffic classification-based load balancing improves the bit rate as well as the packet rate of traffic flow on a network and thus considerably enhances throughput. Finally, the reduction in jitter while increasing the controllers confirms the improvement in QoS in a balanced multi-controller SDN environment.</description>
	<pubDate>2024-11-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 4, Pages 523-544: Traffic-Driven Controller-Load-Balancing over Multi-Controller Software-Defined Networking Environment</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/4/4/26">doi: 10.3390/network4040026</a></p>
	<p>Authors:
		Binod Sapkota
		Babu R. Dawadi
		Shashidhar R. Joshi
		Gopal Karn
		</p>
	<p>Currently, more studies are focusing on traffic classification in software-defined networks (SDNs). Accurate classification and selecting the appropriate controller have benefited from the application of machine learning (ML) in practice. In this research, we study different classification models to see which one best classifies the generated dataset and goes on to be implemented for real-time classification. In our case, the classification and regression tree (CART) classifier produces the best classification results for the generated dataset, and logistic regression is also considerable. Based on the evaluation of various algorithmic outputs for the training and validation datasets, and also when execution time is taken into account, the CART is found to be the best algorithm. While testing the impact of load balancing in a multi-controller SDN environment, in different load case scenarios, we observe network performance parameters like bit rate, packet rate, and jitter. Here, the use of traffic classification-based load balancing improves the bit rate as well as the packet rate of traffic flow on a network and thus considerably enhances throughput. Finally, the reduction in jitter while increasing the controllers confirms the improvement in QoS in a balanced multi-controller SDN environment.</p>
	]]></content:encoded>

	<dc:title>Traffic-Driven Controller-Load-Balancing over Multi-Controller Software-Defined Networking Environment</dc:title>
			<dc:creator>Binod Sapkota</dc:creator>
			<dc:creator>Babu R. Dawadi</dc:creator>
			<dc:creator>Shashidhar R. Joshi</dc:creator>
			<dc:creator>Gopal Karn</dc:creator>
		<dc:identifier>doi: 10.3390/network4040026</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2024-11-15</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2024-11-15</prism:publicationDate>
	<prism:volume>4</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>523</prism:startingPage>
		<prism:doi>10.3390/network4040026</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/4/4/26</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/4/4/25">

	<title>Network, Vol. 4, Pages 498-522: Exploring the Impact of Resource Management Strategies on Simulated Edge Cloud Performance: An Experimental Study</title>
	<link>https://www.mdpi.com/2673-8732/4/4/25</link>
	<description>Edge computing has emerged as a critical technology for meeting the needs of latency-sensitive applications and reducing network congestion. This goal is achieved mainly by distributing computational resources closer to end users and away from traditional data centers. Optimizing the utilization of limited edge cloud resources and improving the performance of edge computing systems requires efficient resource-management techniques. In this paper, we primarily discuss the use of simulation tools&amp;amp;mdash;EdgeSimPy in particular&amp;amp;mdash;to assess edge cloud resource management methods. We give a summary of the main difficulties in managing a limited pool of resources in edge cloud computing, and we go over how simulation programs like EdgeSimPy work and evaluate resource management algorithms. The scenarios we consider for this evaluation involve edge computing while taking into account variables like user location, resource availability, and network structure. We evaluate four resource management algorithms in a fixed, simulated edge computing environment to determine their performance regarding their CPU usage, memory usage, disk usage, power consumption, and latency performance metrics to determine which method performs better in a fixed scenario. This allows us to determine the optimal algorithm for tasks that prioritize minimal resource use, low latency, or a combination of the two. Furthermore, we outline areas of unfilled research needs and potential paths forward for improving the reliability and realism of edge cloud simulation tools.</description>
	<pubDate>2024-11-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 4, Pages 498-522: Exploring the Impact of Resource Management Strategies on Simulated Edge Cloud Performance: An Experimental Study</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/4/4/25">doi: 10.3390/network4040025</a></p>
	<p>Authors:
		Nikolaos Kaftantzis
		Dimitrios G. Kogias
		Charalampos Z. Patrikakis
		</p>
	<p>Edge computing has emerged as a critical technology for meeting the needs of latency-sensitive applications and reducing network congestion. This goal is achieved mainly by distributing computational resources closer to end users and away from traditional data centers. Optimizing the utilization of limited edge cloud resources and improving the performance of edge computing systems requires efficient resource-management techniques. In this paper, we primarily discuss the use of simulation tools&amp;amp;mdash;EdgeSimPy in particular&amp;amp;mdash;to assess edge cloud resource management methods. We give a summary of the main difficulties in managing a limited pool of resources in edge cloud computing, and we go over how simulation programs like EdgeSimPy work and evaluate resource management algorithms. The scenarios we consider for this evaluation involve edge computing while taking into account variables like user location, resource availability, and network structure. We evaluate four resource management algorithms in a fixed, simulated edge computing environment to determine their performance regarding their CPU usage, memory usage, disk usage, power consumption, and latency performance metrics to determine which method performs better in a fixed scenario. This allows us to determine the optimal algorithm for tasks that prioritize minimal resource use, low latency, or a combination of the two. Furthermore, we outline areas of unfilled research needs and potential paths forward for improving the reliability and realism of edge cloud simulation tools.</p>
	]]></content:encoded>

	<dc:title>Exploring the Impact of Resource Management Strategies on Simulated Edge Cloud Performance: An Experimental Study</dc:title>
			<dc:creator>Nikolaos Kaftantzis</dc:creator>
			<dc:creator>Dimitrios G. Kogias</dc:creator>
			<dc:creator>Charalampos Z. Patrikakis</dc:creator>
		<dc:identifier>doi: 10.3390/network4040025</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2024-11-06</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2024-11-06</prism:publicationDate>
	<prism:volume>4</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>498</prism:startingPage>
		<prism:doi>10.3390/network4040025</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/4/4/25</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/4/4/24">

	<title>Network, Vol. 4, Pages 468-497: Multi-Phase Adaptive Recoding: An Analogue of Partial Retransmission in Batched Network Coding</title>
	<link>https://www.mdpi.com/2673-8732/4/4/24</link>
	<description>Batched network coding (BNC) is a practical realization of random linear network coding (RLNC) designed for reliable network transmission in multi-hop networks with packet loss. By grouping coded packets into batches and restricting the use of RLNC within the same batch, BNC resolves the issue of RLNC that has high computational and storage costs at the intermediate nodes. A simple and common way to apply BNC is to fire and forget the recoded packets at the intermediate nodes, as BNC can act as an erasure code for data recovery. Due to the finiteness of batch size, the recoding strategy is a critical design that affects the throughput, the storage requirements, and the computational cost of BNC. The gain of the recoding strategy can be enhanced with the aid of a feedback mechanism, however the utilization and development of this mechanism is not yet standardized. In this paper, we investigate a multi-phase recoding mechanism for BNC. In each phase, recoding depends on the amount of innovative information remained at the current node after the transmission of the previous phases was completed. Relevant information can be obtained via hop-by-hop feedback; then, a more precise recoding scheme that allocates networking resources can be established. Unlike hop-by-hop retransmission schemes, the reception status of individual packets does not need to be known and packets to be sent in the next phase may not be the lost packets in the previous phase. Further, due to the loss-tolerance feature of BNC, it is unnecessary to pass all innovative information to the next node. This study illustrates that multi-phase recoding can significantly boost the throughput and reduce the decoding time as compared with the traditional single-phase recoding approach This opens a new window in developing better strategies for designing BNC rather than sending more batches in a blind manner.</description>
	<pubDate>2024-10-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 4, Pages 468-497: Multi-Phase Adaptive Recoding: An Analogue of Partial Retransmission in Batched Network Coding</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/4/4/24">doi: 10.3390/network4040024</a></p>
	<p>Authors:
		Hoover H. F. Yin
		Mehrdad Tahernia
		Hugo Wai Leung Mak
		</p>
	<p>Batched network coding (BNC) is a practical realization of random linear network coding (RLNC) designed for reliable network transmission in multi-hop networks with packet loss. By grouping coded packets into batches and restricting the use of RLNC within the same batch, BNC resolves the issue of RLNC that has high computational and storage costs at the intermediate nodes. A simple and common way to apply BNC is to fire and forget the recoded packets at the intermediate nodes, as BNC can act as an erasure code for data recovery. Due to the finiteness of batch size, the recoding strategy is a critical design that affects the throughput, the storage requirements, and the computational cost of BNC. The gain of the recoding strategy can be enhanced with the aid of a feedback mechanism, however the utilization and development of this mechanism is not yet standardized. In this paper, we investigate a multi-phase recoding mechanism for BNC. In each phase, recoding depends on the amount of innovative information remained at the current node after the transmission of the previous phases was completed. Relevant information can be obtained via hop-by-hop feedback; then, a more precise recoding scheme that allocates networking resources can be established. Unlike hop-by-hop retransmission schemes, the reception status of individual packets does not need to be known and packets to be sent in the next phase may not be the lost packets in the previous phase. Further, due to the loss-tolerance feature of BNC, it is unnecessary to pass all innovative information to the next node. This study illustrates that multi-phase recoding can significantly boost the throughput and reduce the decoding time as compared with the traditional single-phase recoding approach This opens a new window in developing better strategies for designing BNC rather than sending more batches in a blind manner.</p>
	]]></content:encoded>

	<dc:title>Multi-Phase Adaptive Recoding: An Analogue of Partial Retransmission in Batched Network Coding</dc:title>
			<dc:creator>Hoover H. F. Yin</dc:creator>
			<dc:creator>Mehrdad Tahernia</dc:creator>
			<dc:creator>Hugo Wai Leung Mak</dc:creator>
		<dc:identifier>doi: 10.3390/network4040024</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2024-10-30</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2024-10-30</prism:publicationDate>
	<prism:volume>4</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>468</prism:startingPage>
		<prism:doi>10.3390/network4040024</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/4/4/24</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/4/4/23">

	<title>Network, Vol. 4, Pages 453-467: Advanced Security Framework for 6G Networks: Integrating Deep Learning and Physical Layer Security</title>
	<link>https://www.mdpi.com/2673-8732/4/4/23</link>
	<description>This paper presents an advanced framework for securing 6G communication by integrating deep learning and physical layer security (PLS). The proposed model incorporates multi-stage detection mechanisms to enhance security against various attacks on the 6G air interface. Deep neural networks and a hybrid model are employed for sequential learning to improve classification accuracy and handle complex data patterns. Additionally, spoofing, jamming, and eavesdropping attacks are simulated to refine detection mechanisms. An anomaly detection system is developed to identify unusual signal patterns indicating potential attacks. The results demonstrate that machine learning (ML) and hybrid models outperform conventional approaches, showing improvements of up to 85% in bit error rate (BER) and 24% in accuracy, especially under attack conditions. This research contributes to the advancement of secure 6G communication systems, offering details on effective defence mechanisms against physical layer attacks.</description>
	<pubDate>2024-10-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 4, Pages 453-467: Advanced Security Framework for 6G Networks: Integrating Deep Learning and Physical Layer Security</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/4/4/23">doi: 10.3390/network4040023</a></p>
	<p>Authors:
		Haitham Mahmoud
		Tawfik Ismail
		Tobi Baiyekusi
		Moad Idrissi
		</p>
	<p>This paper presents an advanced framework for securing 6G communication by integrating deep learning and physical layer security (PLS). The proposed model incorporates multi-stage detection mechanisms to enhance security against various attacks on the 6G air interface. Deep neural networks and a hybrid model are employed for sequential learning to improve classification accuracy and handle complex data patterns. Additionally, spoofing, jamming, and eavesdropping attacks are simulated to refine detection mechanisms. An anomaly detection system is developed to identify unusual signal patterns indicating potential attacks. The results demonstrate that machine learning (ML) and hybrid models outperform conventional approaches, showing improvements of up to 85% in bit error rate (BER) and 24% in accuracy, especially under attack conditions. This research contributes to the advancement of secure 6G communication systems, offering details on effective defence mechanisms against physical layer attacks.</p>
	]]></content:encoded>

	<dc:title>Advanced Security Framework for 6G Networks: Integrating Deep Learning and Physical Layer Security</dc:title>
			<dc:creator>Haitham Mahmoud</dc:creator>
			<dc:creator>Tawfik Ismail</dc:creator>
			<dc:creator>Tobi Baiyekusi</dc:creator>
			<dc:creator>Moad Idrissi</dc:creator>
		<dc:identifier>doi: 10.3390/network4040023</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2024-10-23</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2024-10-23</prism:publicationDate>
	<prism:volume>4</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>453</prism:startingPage>
		<prism:doi>10.3390/network4040023</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/4/4/23</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/4/4/22">

	<title>Network, Vol. 4, Pages 443-452: Development of Graphical User Interface for Link Budget Analysis of Point-to-Point Communications at 5 GHz and 11 GHz</title>
	<link>https://www.mdpi.com/2673-8732/4/4/22</link>
	<description>It is well known that simulation tools are essential for the design and optimization of wireless communication systems. This paper proposes a Python script that can be used for planning and predicting a connection link budget by analyzing its basic parameters. Our proposal consists of an application that calculates the connection budget for point-to-point links operating at 5 GHz and 11 GHz, taking into account all the necessary microwave parameters. For validating the efficiency of the proposed tool, this paper presents comprehensive simulation results derived from comparing our tool to a couple of other simulation tools by means of calculating the same parameters.</description>
	<pubDate>2024-10-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 4, Pages 443-452: Development of Graphical User Interface for Link Budget Analysis of Point-to-Point Communications at 5 GHz and 11 GHz</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/4/4/22">doi: 10.3390/network4040022</a></p>
	<p>Authors:
		Konstantinos Zarkadas
		Apollon Smyrnaios
		George Dimitrakopoulos
		</p>
	<p>It is well known that simulation tools are essential for the design and optimization of wireless communication systems. This paper proposes a Python script that can be used for planning and predicting a connection link budget by analyzing its basic parameters. Our proposal consists of an application that calculates the connection budget for point-to-point links operating at 5 GHz and 11 GHz, taking into account all the necessary microwave parameters. For validating the efficiency of the proposed tool, this paper presents comprehensive simulation results derived from comparing our tool to a couple of other simulation tools by means of calculating the same parameters.</p>
	]]></content:encoded>

	<dc:title>Development of Graphical User Interface for Link Budget Analysis of Point-to-Point Communications at 5 GHz and 11 GHz</dc:title>
			<dc:creator>Konstantinos Zarkadas</dc:creator>
			<dc:creator>Apollon Smyrnaios</dc:creator>
			<dc:creator>George Dimitrakopoulos</dc:creator>
		<dc:identifier>doi: 10.3390/network4040022</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2024-10-01</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2024-10-01</prism:publicationDate>
	<prism:volume>4</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>443</prism:startingPage>
		<prism:doi>10.3390/network4040022</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/4/4/22</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/4/4/21">

	<title>Network, Vol. 4, Pages 426-442: Blockchain-Based E-Voting Mechanisms: A Survey and a Proposal</title>
	<link>https://www.mdpi.com/2673-8732/4/4/21</link>
	<description>Advancements in blockchain technology and network technology are bringing in a new era in electronic voting systems. These systems are characterized by enhanced security, efficiency, and accessibility. In this paper, we compose a comparative analysis of blockchain-based electronic voting (e-voting) systems using blockchain technology, cryptographic techniques, counting methods, and security requirements. The core of the analysis involves a detailed examination of blockchain-based electronic voting systems, focusing on the variations in architecture, cryptographic techniques, vote counting methods, and security. We also introduce a novel blockchain-based e-voting system, which integrates advanced methodologies, including the Borda count and Condorcet method, into e-voting systems for improved accuracy and representation in vote tallying. The system&amp;amp;rsquo;s design features a flexible and amendable blockchain structure, ensuring robustness and security. Practical implementation on a Raspberry Pi 3 Model B+ demonstrates the system&amp;amp;rsquo;s feasibility and adaptability in diverse environments. Our study of the evolution of e-voting systems and the incorporation of blockchain technology contributes to the development of secure, transparent, and efficient solutions for modern democratic governance.</description>
	<pubDate>2024-09-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 4, Pages 426-442: Blockchain-Based E-Voting Mechanisms: A Survey and a Proposal</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/4/4/21">doi: 10.3390/network4040021</a></p>
	<p>Authors:
		Matthew Sharp
		Laurent Njilla
		Chin-Tser Huang
		Tieming Geng
		</p>
	<p>Advancements in blockchain technology and network technology are bringing in a new era in electronic voting systems. These systems are characterized by enhanced security, efficiency, and accessibility. In this paper, we compose a comparative analysis of blockchain-based electronic voting (e-voting) systems using blockchain technology, cryptographic techniques, counting methods, and security requirements. The core of the analysis involves a detailed examination of blockchain-based electronic voting systems, focusing on the variations in architecture, cryptographic techniques, vote counting methods, and security. We also introduce a novel blockchain-based e-voting system, which integrates advanced methodologies, including the Borda count and Condorcet method, into e-voting systems for improved accuracy and representation in vote tallying. The system&amp;amp;rsquo;s design features a flexible and amendable blockchain structure, ensuring robustness and security. Practical implementation on a Raspberry Pi 3 Model B+ demonstrates the system&amp;amp;rsquo;s feasibility and adaptability in diverse environments. Our study of the evolution of e-voting systems and the incorporation of blockchain technology contributes to the development of secure, transparent, and efficient solutions for modern democratic governance.</p>
	]]></content:encoded>

	<dc:title>Blockchain-Based E-Voting Mechanisms: A Survey and a Proposal</dc:title>
			<dc:creator>Matthew Sharp</dc:creator>
			<dc:creator>Laurent Njilla</dc:creator>
			<dc:creator>Chin-Tser Huang</dc:creator>
			<dc:creator>Tieming Geng</dc:creator>
		<dc:identifier>doi: 10.3390/network4040021</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2024-09-26</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2024-09-26</prism:publicationDate>
	<prism:volume>4</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>426</prism:startingPage>
		<prism:doi>10.3390/network4040021</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/4/4/21</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/4/4/20">

	<title>Network, Vol. 4, Pages 405-425: An Optimization Strategy for Security and Reliability in a Diamond Untrusted Relay Network with Cooperative Jamming</title>
	<link>https://www.mdpi.com/2673-8732/4/4/20</link>
	<description>This paper tackles the challenge of secure and reliable data transmission in diamond network configurations featuring two untrusted relays with low-security clearance. We propose an innovative approach that employs lossy-decode and -forward relaying at these untrusted relays to boost transmission reliability while safeguarding the source information from potential eavesdroppers. An essential contribution of this work is the introduction of the reliable and secure probability (RSP) metric. This metric assesses the likelihood of the destination successfully retrieving the original information while maintaining its confidentiality from untrusted relays. Our analysis shows that the integration of cooperative jamming signals markedly enhances the RSP, resulting in superior security and reliability. Simulation results confirm that optimal power distribution among the source, relays, and destination further maximizes the RSP. These findings underscore the effectiveness of our proposed scheme in ensuring secure and reliable communication in environments with untrusted relays, suggesting its potential as a robust solution for secure communications in diamond network configurations.</description>
	<pubDate>2024-09-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 4, Pages 405-425: An Optimization Strategy for Security and Reliability in a Diamond Untrusted Relay Network with Cooperative Jamming</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/4/4/20">doi: 10.3390/network4040020</a></p>
	<p>Authors:
		Shen Qian
		Meng Cheng
		</p>
	<p>This paper tackles the challenge of secure and reliable data transmission in diamond network configurations featuring two untrusted relays with low-security clearance. We propose an innovative approach that employs lossy-decode and -forward relaying at these untrusted relays to boost transmission reliability while safeguarding the source information from potential eavesdroppers. An essential contribution of this work is the introduction of the reliable and secure probability (RSP) metric. This metric assesses the likelihood of the destination successfully retrieving the original information while maintaining its confidentiality from untrusted relays. Our analysis shows that the integration of cooperative jamming signals markedly enhances the RSP, resulting in superior security and reliability. Simulation results confirm that optimal power distribution among the source, relays, and destination further maximizes the RSP. These findings underscore the effectiveness of our proposed scheme in ensuring secure and reliable communication in environments with untrusted relays, suggesting its potential as a robust solution for secure communications in diamond network configurations.</p>
	]]></content:encoded>

	<dc:title>An Optimization Strategy for Security and Reliability in a Diamond Untrusted Relay Network with Cooperative Jamming</dc:title>
			<dc:creator>Shen Qian</dc:creator>
			<dc:creator>Meng Cheng</dc:creator>
		<dc:identifier>doi: 10.3390/network4040020</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2024-09-25</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2024-09-25</prism:publicationDate>
	<prism:volume>4</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>405</prism:startingPage>
		<prism:doi>10.3390/network4040020</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/4/4/20</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/4/3/19">

	<title>Network, Vol. 4, Pages 404: Correction: Zhu et al. Energy Efficient Access Point Placement for Distributed Massive MIMO. Network 2022, 2, 288&amp;ndash;310</title>
	<link>https://www.mdpi.com/2673-8732/4/3/19</link>
	<description>Following publication, concerns were raised regarding the peer-review process related to the publication of this article [...]</description>
	<pubDate>2024-09-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 4, Pages 404: Correction: Zhu et al. Energy Efficient Access Point Placement for Distributed Massive MIMO. Network 2022, 2, 288&amp;ndash;310</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/4/3/19">doi: 10.3390/network4030019</a></p>
	<p>Authors:
		Yi-Hang Zhu
		Gilles Callebaut
		Hatice Çalık
		Liesbet Van der Perre
		François Rottenberg
		</p>
	<p>Following publication, concerns were raised regarding the peer-review process related to the publication of this article [...]</p>
	]]></content:encoded>

	<dc:title>Correction: Zhu et al. Energy Efficient Access Point Placement for Distributed Massive MIMO. Network 2022, 2, 288&amp;amp;ndash;310</dc:title>
			<dc:creator>Yi-Hang Zhu</dc:creator>
			<dc:creator>Gilles Callebaut</dc:creator>
			<dc:creator>Hatice Çalık</dc:creator>
			<dc:creator>Liesbet Van der Perre</dc:creator>
			<dc:creator>François Rottenberg</dc:creator>
		<dc:identifier>doi: 10.3390/network4030019</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2024-09-11</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2024-09-11</prism:publicationDate>
	<prism:volume>4</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Correction</prism:section>
	<prism:startingPage>404</prism:startingPage>
		<prism:doi>10.3390/network4030019</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/4/3/19</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/4/3/18">

	<title>Network, Vol. 4, Pages 390-403: Efficient Collaborative Edge Computing for Vehicular Network Using Clustering Service</title>
	<link>https://www.mdpi.com/2673-8732/4/3/18</link>
	<description>Internet of Vehicles applications are known to be critical and time-sensitive. The value proposition of edge computing comprises its lower latency, advantageous bandwidth consumption, privacy, management, efficiency of treatments, and mobility, which aim to improve vehicular and traffic services. Successful stories have been observed between IoV and edge computing to support smooth mobility and the use of local resources. However, vehicle travel, especially due to high-speed movement and intersections, can result in IoV devices losing connection and/or processing with high latency. This paper proposes a Cluster Collaboration Vehicular Edge Computing (CCVEC) framework that aims to guarantee and enhance the connectivity between vehicle sensors and the cloud by utilizing the edge computing paradigm in the middle. The objectives are achieved by utilizing the cluster management strategies deployed between cloud and edge computing servers. The framework is implemented in OpenStack cloud servers and evaluated by measuring the throughput, latency, and memory parameters in two different scenarios. The results obtained show promising indications in terms of latency (approximately 390 ms of the ideal status) and throughput (30 kB/s) values, and thus appears acceptable in terms of performance as well as memory.</description>
	<pubDate>2024-09-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 4, Pages 390-403: Efficient Collaborative Edge Computing for Vehicular Network Using Clustering Service</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/4/3/18">doi: 10.3390/network4030018</a></p>
	<p>Authors:
		Ali Al-Allawee
		Pascal Lorenz
		Alhamza Munther
		</p>
	<p>Internet of Vehicles applications are known to be critical and time-sensitive. The value proposition of edge computing comprises its lower latency, advantageous bandwidth consumption, privacy, management, efficiency of treatments, and mobility, which aim to improve vehicular and traffic services. Successful stories have been observed between IoV and edge computing to support smooth mobility and the use of local resources. However, vehicle travel, especially due to high-speed movement and intersections, can result in IoV devices losing connection and/or processing with high latency. This paper proposes a Cluster Collaboration Vehicular Edge Computing (CCVEC) framework that aims to guarantee and enhance the connectivity between vehicle sensors and the cloud by utilizing the edge computing paradigm in the middle. The objectives are achieved by utilizing the cluster management strategies deployed between cloud and edge computing servers. The framework is implemented in OpenStack cloud servers and evaluated by measuring the throughput, latency, and memory parameters in two different scenarios. The results obtained show promising indications in terms of latency (approximately 390 ms of the ideal status) and throughput (30 kB/s) values, and thus appears acceptable in terms of performance as well as memory.</p>
	]]></content:encoded>

	<dc:title>Efficient Collaborative Edge Computing for Vehicular Network Using Clustering Service</dc:title>
			<dc:creator>Ali Al-Allawee</dc:creator>
			<dc:creator>Pascal Lorenz</dc:creator>
			<dc:creator>Alhamza Munther</dc:creator>
		<dc:identifier>doi: 10.3390/network4030018</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2024-09-06</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2024-09-06</prism:publicationDate>
	<prism:volume>4</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>390</prism:startingPage>
		<prism:doi>10.3390/network4030018</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/4/3/18</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/4/3/17">

	<title>Network, Vol. 4, Pages 367-389: Dynamic Framing and Power Allocation for Real-Time Wireless Networks with Variable-Length Coding: A Tandem Queue Approach</title>
	<link>https://www.mdpi.com/2673-8732/4/3/17</link>
	<description>Ensuring high reliability and low latency poses challenges for numerous applications that require rigid performance guarantees, such as industrial automation and autonomous vehicles. Our research primarily concentrates on addressing the real-time requirements of ultra-reliable low-latency communication (URLLC). Specifically, we tackle the challenge of hard delay constraints in real-time transmission systems, overcoming this obstacle through a finite blocklength coding scheme. In the physical layer, we encode randomly arriving packets using a variable-length coding scheme and transmit the encoded symbols by truncated channel inversion over parallel channels. In the network layer, we model the encoding and transmission processes as tandem queues. These queues backlog the data bits waiting to be encoded and the encoded symbols to be transmitted, respectively. This way, we represent the system as a two-dimensional Markov chain. By focusing on instances when the symbol queue is empty, we simplify the Markov chain into a one-dimensional Markov chain, with the packet queue being the system state. This approach allows us to analytically express power consumption and formulate a power minimization problem under hard delay constraints. Finally, we propose a heuristic algorithm to solve the problem and provide an extensive evaluation of the trade-offs between the hard delay constraint and power consumption.</description>
	<pubDate>2024-08-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 4, Pages 367-389: Dynamic Framing and Power Allocation for Real-Time Wireless Networks with Variable-Length Coding: A Tandem Queue Approach</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/4/3/17">doi: 10.3390/network4030017</a></p>
	<p>Authors:
		Yuanrui Liu
		Xiaoyu Zhao
		Wei Chen
		Ying-Jun Angela Zhang
		</p>
	<p>Ensuring high reliability and low latency poses challenges for numerous applications that require rigid performance guarantees, such as industrial automation and autonomous vehicles. Our research primarily concentrates on addressing the real-time requirements of ultra-reliable low-latency communication (URLLC). Specifically, we tackle the challenge of hard delay constraints in real-time transmission systems, overcoming this obstacle through a finite blocklength coding scheme. In the physical layer, we encode randomly arriving packets using a variable-length coding scheme and transmit the encoded symbols by truncated channel inversion over parallel channels. In the network layer, we model the encoding and transmission processes as tandem queues. These queues backlog the data bits waiting to be encoded and the encoded symbols to be transmitted, respectively. This way, we represent the system as a two-dimensional Markov chain. By focusing on instances when the symbol queue is empty, we simplify the Markov chain into a one-dimensional Markov chain, with the packet queue being the system state. This approach allows us to analytically express power consumption and formulate a power minimization problem under hard delay constraints. Finally, we propose a heuristic algorithm to solve the problem and provide an extensive evaluation of the trade-offs between the hard delay constraint and power consumption.</p>
	]]></content:encoded>

	<dc:title>Dynamic Framing and Power Allocation for Real-Time Wireless Networks with Variable-Length Coding: A Tandem Queue Approach</dc:title>
			<dc:creator>Yuanrui Liu</dc:creator>
			<dc:creator>Xiaoyu Zhao</dc:creator>
			<dc:creator>Wei Chen</dc:creator>
			<dc:creator>Ying-Jun Angela Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/network4030017</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2024-08-27</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2024-08-27</prism:publicationDate>
	<prism:volume>4</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>367</prism:startingPage>
		<prism:doi>10.3390/network4030017</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/4/3/17</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/4/3/16">

	<title>Network, Vol. 4, Pages 338-366: Securing IPv6 Neighbor Discovery Address Resolution with Voucher-Based Addressing</title>
	<link>https://www.mdpi.com/2673-8732/4/3/16</link>
	<description>The majority of local IPv6 networks continue to remain insecure and vulnerable to neighbor spoofing attacks. The Secure Neighbor Discovery (SEND) standard and its concomitant Cryptographically Generated Addressing (CGA) scheme were accepted by large standard bodies to codify practical mitigations. SEND and CGA have never seen widespread adoption due to their complexities, obscurity, costs, compatibility issues, and continued lack of mature implementations. In light of their poor adoption, research since their standardization has continued to find new perspectives and proffer new ideas. The orthodox solutions for securing Neighbor Discovery have historically struggled to successfully harmonize three core ideals: simplicity, flexibility, and privacy preservation. This research introduces Voucher-Based Addressing, a low-configuration, low-cost, and high-impact alternative to IPv6 address generation methods. It secures the Neighbor Discovery address resolution process while remaining simple, highly adaptable, indistinguishable, and privacy-focused. Applying a unique concoction of cryptographic key derivation functions, link-layer address binding, and neighbor consensus on the parameters of address generation, the resolved address bindings are verifiable without the need for complex techniques that have hindered the adoption of canonical specifications.</description>
	<pubDate>2024-08-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 4, Pages 338-366: Securing IPv6 Neighbor Discovery Address Resolution with Voucher-Based Addressing</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/4/3/16">doi: 10.3390/network4030016</a></p>
	<p>Authors:
		Zachary T. Puhl
		Jinhua Guo
		</p>
	<p>The majority of local IPv6 networks continue to remain insecure and vulnerable to neighbor spoofing attacks. The Secure Neighbor Discovery (SEND) standard and its concomitant Cryptographically Generated Addressing (CGA) scheme were accepted by large standard bodies to codify practical mitigations. SEND and CGA have never seen widespread adoption due to their complexities, obscurity, costs, compatibility issues, and continued lack of mature implementations. In light of their poor adoption, research since their standardization has continued to find new perspectives and proffer new ideas. The orthodox solutions for securing Neighbor Discovery have historically struggled to successfully harmonize three core ideals: simplicity, flexibility, and privacy preservation. This research introduces Voucher-Based Addressing, a low-configuration, low-cost, and high-impact alternative to IPv6 address generation methods. It secures the Neighbor Discovery address resolution process while remaining simple, highly adaptable, indistinguishable, and privacy-focused. Applying a unique concoction of cryptographic key derivation functions, link-layer address binding, and neighbor consensus on the parameters of address generation, the resolved address bindings are verifiable without the need for complex techniques that have hindered the adoption of canonical specifications.</p>
	]]></content:encoded>

	<dc:title>Securing IPv6 Neighbor Discovery Address Resolution with Voucher-Based Addressing</dc:title>
			<dc:creator>Zachary T. Puhl</dc:creator>
			<dc:creator>Jinhua Guo</dc:creator>
		<dc:identifier>doi: 10.3390/network4030016</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2024-08-14</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2024-08-14</prism:publicationDate>
	<prism:volume>4</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>338</prism:startingPage>
		<prism:doi>10.3390/network4030016</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/4/3/16</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/4/3/15">

	<title>Network, Vol. 4, Pages 313-337: Polar Codes with Differential Phase Shift Keying for Selective Detect-and-Forward Multi-Way Relaying Systems</title>
	<link>https://www.mdpi.com/2673-8732/4/3/15</link>
	<description>Relaying with network coding forms a basis for a variety of collaborative communication systems. A linear block coding framework for multi-way relaying using network codes introduced in the literature shows great promise for understanding, analyzing, and designing such systems. So far, this technique has been used with low-density parity check (LDPC) codes and belief propagation (BP) decoding. Polar codes have drawn significant interest in recent years because of their low decoding complexity and good performance. Our paper considers the use of polar codes also as network codes with differential binary phase shift keying (DBPSK), bypassing the need for channel state estimation in multi-way selective detect-and-forward (DetF) cooperative relaying. We demonstrate that polar codes are suitable for such applications. The encoding and decoding complexity of such systems for linear block codes is analyzed using maximum likelihood (ML) decoding for LDPC codes with log-BP decoding and polar codes with successive cancellation (SC) as well as successive cancellation list (SCL) decoding. We present Monte-Carlo simulation results for the performance of such a multi-way relaying system, employing polar codes with different lengths and code rates. The results demonstrate a significant performance gain compared to an uncoded scheme. The simulation results show that the error performance of such a system employing polar codes is comparable to LDPC codes with log-BP decoding, while the decoding complexity is much lower. Furthermore, we consider a hard threshold technique at user terminals for determining whether a relay transmits or not. This technique makes the system practical without increasing the complexity and can significantly reduce the degradation from intermittent relay transmissions that is associated with such a multi-way relaying protocol.</description>
	<pubDate>2024-08-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 4, Pages 313-337: Polar Codes with Differential Phase Shift Keying for Selective Detect-and-Forward Multi-Way Relaying Systems</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/4/3/15">doi: 10.3390/network4030015</a></p>
	<p>Authors:
		Ruilin Ji
		Harry Leib
		</p>
	<p>Relaying with network coding forms a basis for a variety of collaborative communication systems. A linear block coding framework for multi-way relaying using network codes introduced in the literature shows great promise for understanding, analyzing, and designing such systems. So far, this technique has been used with low-density parity check (LDPC) codes and belief propagation (BP) decoding. Polar codes have drawn significant interest in recent years because of their low decoding complexity and good performance. Our paper considers the use of polar codes also as network codes with differential binary phase shift keying (DBPSK), bypassing the need for channel state estimation in multi-way selective detect-and-forward (DetF) cooperative relaying. We demonstrate that polar codes are suitable for such applications. The encoding and decoding complexity of such systems for linear block codes is analyzed using maximum likelihood (ML) decoding for LDPC codes with log-BP decoding and polar codes with successive cancellation (SC) as well as successive cancellation list (SCL) decoding. We present Monte-Carlo simulation results for the performance of such a multi-way relaying system, employing polar codes with different lengths and code rates. The results demonstrate a significant performance gain compared to an uncoded scheme. The simulation results show that the error performance of such a system employing polar codes is comparable to LDPC codes with log-BP decoding, while the decoding complexity is much lower. Furthermore, we consider a hard threshold technique at user terminals for determining whether a relay transmits or not. This technique makes the system practical without increasing the complexity and can significantly reduce the degradation from intermittent relay transmissions that is associated with such a multi-way relaying protocol.</p>
	]]></content:encoded>

	<dc:title>Polar Codes with Differential Phase Shift Keying for Selective Detect-and-Forward Multi-Way Relaying Systems</dc:title>
			<dc:creator>Ruilin Ji</dc:creator>
			<dc:creator>Harry Leib</dc:creator>
		<dc:identifier>doi: 10.3390/network4030015</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2024-08-08</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2024-08-08</prism:publicationDate>
	<prism:volume>4</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>313</prism:startingPage>
		<prism:doi>10.3390/network4030015</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/4/3/15</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/4/3/14">

	<title>Network, Vol. 4, Pages 295-312: A User Location Reset Method through Object Recognition in Indoor Navigation System Using Unity and a Smartphone (INSUS)</title>
	<link>https://www.mdpi.com/2673-8732/4/3/14</link>
	<description>To enhance user experiences of reaching destinations in large, complex buildings, we have developed a indoor navigation system using Unity and a smartphone called INSUS. It can reset the user location using a quick response (QR) code to reduce the loss of direction of the user during navigation. However, this approach needs a number of QR code sheets to be prepared in the field, causing extra loads at implementation. In this paper, we propose another reset method to reduce loads by recognizing information of naturally installed signs in the field using object detection and Optical Character Recognition (OCR) technologies. A lot of signs exist in a building, containing texts such as room numbers, room names, and floor numbers. In the proposal, the Sign Image is taken with a smartphone, the sign is detected by YOLOv8, the text inside the sign is recognized by PaddleOCR, and it is compared with each record in the Room Database using Levenshtein distance. For evaluations, we applied the proposal in two buildings in Okayama University, Japan. The results show that YOLOv8 achieved mAP@0.5 0.995 and mAP@0.5:0.95 0.978, and PaddleOCR could extract text in the sign image accurately with an averaged CER% lower than 10%. The combination of both YOLOv8 and PaddleOCR decreases the execution time by 6.71s compared to the previous method. The results confirmed the effectiveness of the proposal.</description>
	<pubDate>2024-07-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 4, Pages 295-312: A User Location Reset Method through Object Recognition in Indoor Navigation System Using Unity and a Smartphone (INSUS)</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/4/3/14">doi: 10.3390/network4030014</a></p>
	<p>Authors:
		Evianita Dewi Fajrianti
		Yohanes Yohanie Fridelin Panduman
		Nobuo Funabiki
		Amma Liesvarastranta Haz
		Komang Candra Brata
		Sritrusta Sukaridhoto
		</p>
	<p>To enhance user experiences of reaching destinations in large, complex buildings, we have developed a indoor navigation system using Unity and a smartphone called INSUS. It can reset the user location using a quick response (QR) code to reduce the loss of direction of the user during navigation. However, this approach needs a number of QR code sheets to be prepared in the field, causing extra loads at implementation. In this paper, we propose another reset method to reduce loads by recognizing information of naturally installed signs in the field using object detection and Optical Character Recognition (OCR) technologies. A lot of signs exist in a building, containing texts such as room numbers, room names, and floor numbers. In the proposal, the Sign Image is taken with a smartphone, the sign is detected by YOLOv8, the text inside the sign is recognized by PaddleOCR, and it is compared with each record in the Room Database using Levenshtein distance. For evaluations, we applied the proposal in two buildings in Okayama University, Japan. The results show that YOLOv8 achieved mAP@0.5 0.995 and mAP@0.5:0.95 0.978, and PaddleOCR could extract text in the sign image accurately with an averaged CER% lower than 10%. The combination of both YOLOv8 and PaddleOCR decreases the execution time by 6.71s compared to the previous method. The results confirmed the effectiveness of the proposal.</p>
	]]></content:encoded>

	<dc:title>A User Location Reset Method through Object Recognition in Indoor Navigation System Using Unity and a Smartphone (INSUS)</dc:title>
			<dc:creator>Evianita Dewi Fajrianti</dc:creator>
			<dc:creator>Yohanes Yohanie Fridelin Panduman</dc:creator>
			<dc:creator>Nobuo Funabiki</dc:creator>
			<dc:creator>Amma Liesvarastranta Haz</dc:creator>
			<dc:creator>Komang Candra Brata</dc:creator>
			<dc:creator>Sritrusta Sukaridhoto</dc:creator>
		<dc:identifier>doi: 10.3390/network4030014</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2024-07-22</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2024-07-22</prism:publicationDate>
	<prism:volume>4</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>295</prism:startingPage>
		<prism:doi>10.3390/network4030014</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/4/3/14</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/4/3/13">

	<title>Network, Vol. 4, Pages 260-294: Enhancing Resilience in Digital Twins: ASCON-Based Security Solutions for Industry 4.0</title>
	<link>https://www.mdpi.com/2673-8732/4/3/13</link>
	<description>Persistent security challenges in Industry 4.0 due to the limited resources of IoT devices necessitate innovative solutions. Addressing this, this study introduces the ASCON algorithm for lightweight authenticated encryption with associated data, enhancing confidentiality, integrity, and authenticity within IoT limitations. By integrating Digital Twins, the framework emphasizes the need for robust security in Industry 4.0, with ASCON ensuring secure data transmission and bolstering system resilience against cyber threats. Practical validation using the MQTT protocol confirms ASCON&amp;amp;rsquo;s efficacy over AES-GCM, highlighting its potential for enhanced security in Industry 4.0. Future research should focus on optimizing ASCON for microprocessors and developing secure remote access tailored to resource-constrained devices, ensuring adaptability in the digital era.</description>
	<pubDate>2024-07-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 4, Pages 260-294: Enhancing Resilience in Digital Twins: ASCON-Based Security Solutions for Industry 4.0</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/4/3/13">doi: 10.3390/network4030013</a></p>
	<p>Authors:
		Mohammed El-Hajj
		Teklit Haftu Gebremariam
		</p>
	<p>Persistent security challenges in Industry 4.0 due to the limited resources of IoT devices necessitate innovative solutions. Addressing this, this study introduces the ASCON algorithm for lightweight authenticated encryption with associated data, enhancing confidentiality, integrity, and authenticity within IoT limitations. By integrating Digital Twins, the framework emphasizes the need for robust security in Industry 4.0, with ASCON ensuring secure data transmission and bolstering system resilience against cyber threats. Practical validation using the MQTT protocol confirms ASCON&amp;amp;rsquo;s efficacy over AES-GCM, highlighting its potential for enhanced security in Industry 4.0. Future research should focus on optimizing ASCON for microprocessors and developing secure remote access tailored to resource-constrained devices, ensuring adaptability in the digital era.</p>
	]]></content:encoded>

	<dc:title>Enhancing Resilience in Digital Twins: ASCON-Based Security Solutions for Industry 4.0</dc:title>
			<dc:creator>Mohammed El-Hajj</dc:creator>
			<dc:creator>Teklit Haftu Gebremariam</dc:creator>
		<dc:identifier>doi: 10.3390/network4030013</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2024-07-19</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2024-07-19</prism:publicationDate>
	<prism:volume>4</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>260</prism:startingPage>
		<prism:doi>10.3390/network4030013</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/4/3/13</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2673-8732/4/3/12">

	<title>Network, Vol. 4, Pages 237-259: Delay and Disruption Tolerant Networking for Terrestrial and TCP/IP Applications: A Systematic Literature Review</title>
	<link>https://www.mdpi.com/2673-8732/4/3/12</link>
	<description>Delay and Disruption Tolerant Networking (DTN) is a network architecture created basically to overcome non-continuing connectivity. There has been a great deal of research on this topic, from space communication to terrestrial applications. Since there are still many places on earth where there is no means of communication, the focus of this work is on the latest. A systematic literature review (SLR) was performed to know the main issues and advances related to the implementation of DTN for terrestrial and TCP/IP applications, especially in places where telecommunication infrastructure is lacking. The result is a classification of papers based on key aspects, such as architecture, performance, routing, and applications. A matrix of all the papers about these aspects is included to help researchers find the missing piece and concrete terrestrial solutions. The matrix uses three colors, green, yellow, and red according to the focus, either high, medium, or low, so that it is easy to identify specific papers.</description>
	<pubDate>2024-07-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Network, Vol. 4, Pages 237-259: Delay and Disruption Tolerant Networking for Terrestrial and TCP/IP Applications: A Systematic Literature Review</b></p>
	<p>Network <a href="https://www.mdpi.com/2673-8732/4/3/12">doi: 10.3390/network4030012</a></p>
	<p>Authors:
		Aris Castillo
		Carlos Juiz
		Belen Bermejo
		</p>
	<p>Delay and Disruption Tolerant Networking (DTN) is a network architecture created basically to overcome non-continuing connectivity. There has been a great deal of research on this topic, from space communication to terrestrial applications. Since there are still many places on earth where there is no means of communication, the focus of this work is on the latest. A systematic literature review (SLR) was performed to know the main issues and advances related to the implementation of DTN for terrestrial and TCP/IP applications, especially in places where telecommunication infrastructure is lacking. The result is a classification of papers based on key aspects, such as architecture, performance, routing, and applications. A matrix of all the papers about these aspects is included to help researchers find the missing piece and concrete terrestrial solutions. The matrix uses three colors, green, yellow, and red according to the focus, either high, medium, or low, so that it is easy to identify specific papers.</p>
	]]></content:encoded>

	<dc:title>Delay and Disruption Tolerant Networking for Terrestrial and TCP/IP Applications: A Systematic Literature Review</dc:title>
			<dc:creator>Aris Castillo</dc:creator>
			<dc:creator>Carlos Juiz</dc:creator>
			<dc:creator>Belen Bermejo</dc:creator>
		<dc:identifier>doi: 10.3390/network4030012</dc:identifier>
	<dc:source>Network</dc:source>
	<dc:date>2024-07-01</dc:date>

	<prism:publicationName>Network</prism:publicationName>
	<prism:publicationDate>2024-07-01</prism:publicationDate>
	<prism:volume>4</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>237</prism:startingPage>
		<prism:doi>10.3390/network4030012</prism:doi>
	<prism:url>https://www.mdpi.com/2673-8732/4/3/12</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
    
<cc:License rdf:about="https://creativecommons.org/licenses/by/4.0/">
	<cc:permits rdf:resource="https://creativecommons.org/ns#Reproduction" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#Distribution" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#DerivativeWorks" />
</cc:License>

</rdf:RDF>
