<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF xmlns="http://purl.org/rss/1.0/"
 xmlns:dc="http://purl.org/dc/elements/1.1/"
 xmlns:dcterms="http://purl.org/dc/terms/"
 xmlns:cc="http://web.resource.org/cc/"
 xmlns:prism="http://prismstandard.org/namespaces/basic/2.0/"
 xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
 xmlns:admin="http://webns.net/mvcb/"
 xmlns:content="http://purl.org/rss/1.0/modules/content/">
    <channel rdf:about="https://www.mdpi.com/rss/journal/remotesensing">
		<title>Remote Sensing</title>
		<description>Latest open access articles published in Remote Sens. at https://www.mdpi.com/journal/remotesensing</description>
		<link>https://www.mdpi.com/journal/remotesensing</link>
		<admin:generatorAgent rdf:resource="https://www.mdpi.com/journal/remotesensing"/>
		<admin:errorReportsTo rdf:resource="mailto:support@mdpi.com"/>
		<dc:publisher>MDPI</dc:publisher>
		<dc:language>en</dc:language>
		<dc:rights>Creative Commons Attribution (CC-BY)</dc:rights>
						<prism:copyright>MDPI</prism:copyright>
		<prism:rightsAgent>support@mdpi.com</prism:rightsAgent>
		<image rdf:resource="https://pub.mdpi-res.com/img/design/mdpi-pub-logo.png?13cf3b5bd783e021?1778040678"/>
				<items>
			<rdf:Seq>
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1447" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1441" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1446" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1445" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1444" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1443" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1442" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1440" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1439" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1437" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1435" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1438" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1436" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1434" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1433" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1432" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1431" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1430" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1429" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1428" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1427" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1426" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1425" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1424" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1423" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1422" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1419" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1420" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1421" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1418" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1417" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1416" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1415" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1414" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1411" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1413" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1412" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1409" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1410" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1408" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1406" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1407" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1405" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1403" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1400" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1404" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1402" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1401" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1399" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1398" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1397" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1396" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1395" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1394" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1393" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1392" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1390" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1391" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1388" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1389" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1387" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1384" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1386" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1385" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1383" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1382" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1381" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1379" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1380" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1376" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1378" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1377" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1375" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1374" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1373" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1372" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1370" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1371" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1368" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1369" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1366" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1367" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1365" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1364" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1361" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1363" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1360" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1362" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1359" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1358" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1357" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1355" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1356" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1354" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1351" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1353" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1350" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1352" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1349" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2072-4292/18/9/1348" />
                    	</rdf:Seq>
		</items>
				<cc:license rdf:resource="https://creativecommons.org/licenses/by/4.0/" />
	</channel>

        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1447">

	<title>Remote Sensing, Vol. 18, Pages 1447: LiteScan-Net: A Lightweight Scanning Network and a Large-Scale Dataset for Cropland Change Detection</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1447</link>
	<description>Aiming at the dual dilemma in high-resolution cropland change detection, where CNNs are constrained by limited local receptive fields and Transformers suffer from heavy computational costs, we propose LiteScan-Net, a lightweight and robust network architecture incorporating scanning principles from state-space modeling. The network innovatively introduces the Multi-Directional Global Scanning (MDGS) mechanism as an efficient engineering surrogate, which simulates the selective scanning process using large-kernel 1D convolutions. This achieves global context modeling with linear complexity while avoiding the hardware limitations imposed by recurrent computations. Based on this mechanism, a three-stage collaborative architecture is constructed: the Coordinate-Aware Feature Purification (CAFP) module is designed to mitigate shallow phenological noise via coordinate sensitivity; the Context Difference Verification (CDV) module aims to alleviate pseudo-changes caused by registration errors through global alignment; and the State-Space Guided Refinement (SSGR) module promotes the generation of change masks with precise boundaries and compact interiors. To verify the model generalization, we construct a Massive Specialized Cropland Change Detection dataset named MSCC, which exhibits significant cross-scale characteristics. Experimental results demonstrate that LiteScan-Net achieves state-of-the-art (SOTA) performance across the CLCD, Hi-CNA, and MSCC datasets, with F1-scores of 79.43%, 84.82%, and 89.62%, respectively. With a low computational cost of only 1.78 GFLOPs and a real-time inference speed of 37.9 FPS, LiteScan-Net demonstrates high potential for future deployment on resource-constrained edge devices.</description>
	<pubDate>2026-05-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1447: LiteScan-Net: A Lightweight Scanning Network and a Large-Scale Dataset for Cropland Change Detection</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1447">doi: 10.3390/rs18091447</a></p>
	<p>Authors:
		Zhengfang Lou
		Xiaoping Lu
		Yao Lu
		Siyi Li
		Guosheng Cai
		Ling Song
		</p>
	<p>Aiming at the dual dilemma in high-resolution cropland change detection, where CNNs are constrained by limited local receptive fields and Transformers suffer from heavy computational costs, we propose LiteScan-Net, a lightweight and robust network architecture incorporating scanning principles from state-space modeling. The network innovatively introduces the Multi-Directional Global Scanning (MDGS) mechanism as an efficient engineering surrogate, which simulates the selective scanning process using large-kernel 1D convolutions. This achieves global context modeling with linear complexity while avoiding the hardware limitations imposed by recurrent computations. Based on this mechanism, a three-stage collaborative architecture is constructed: the Coordinate-Aware Feature Purification (CAFP) module is designed to mitigate shallow phenological noise via coordinate sensitivity; the Context Difference Verification (CDV) module aims to alleviate pseudo-changes caused by registration errors through global alignment; and the State-Space Guided Refinement (SSGR) module promotes the generation of change masks with precise boundaries and compact interiors. To verify the model generalization, we construct a Massive Specialized Cropland Change Detection dataset named MSCC, which exhibits significant cross-scale characteristics. Experimental results demonstrate that LiteScan-Net achieves state-of-the-art (SOTA) performance across the CLCD, Hi-CNA, and MSCC datasets, with F1-scores of 79.43%, 84.82%, and 89.62%, respectively. With a low computational cost of only 1.78 GFLOPs and a real-time inference speed of 37.9 FPS, LiteScan-Net demonstrates high potential for future deployment on resource-constrained edge devices.</p>
	]]></content:encoded>

	<dc:title>LiteScan-Net: A Lightweight Scanning Network and a Large-Scale Dataset for Cropland Change Detection</dc:title>
			<dc:creator>Zhengfang Lou</dc:creator>
			<dc:creator>Xiaoping Lu</dc:creator>
			<dc:creator>Yao Lu</dc:creator>
			<dc:creator>Siyi Li</dc:creator>
			<dc:creator>Guosheng Cai</dc:creator>
			<dc:creator>Ling Song</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091447</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-06</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-06</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1447</prism:startingPage>
		<prism:doi>10.3390/rs18091447</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1447</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1441">

	<title>Remote Sensing, Vol. 18, Pages 1441: A 2025 High-Resolution Glacier Inventory of the Greater Caucasus Reveals Accelerated Area Loss</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1441</link>
	<description>The Greater Caucasus is one of the most extensively glacierized mountain systems in mid-latitude Eurasia and has experienced substantial glacier retreat in recent decades. Continuous monitoring using high-resolution satellite observations is therefore essential for accurately quantifying ongoing and future changes. In this study, we present a new glacier inventory for 2025 derived from high-resolution (3 m) PlanetScope satellite imagery combined with topographic information from the 30 m Advanced Land Observing Satellite (ALOS) Global Digital Surface Model (2006&amp;amp;ndash;2011). A total of 101 cloud-free PlanetScope scenes, acquired primarily during August&amp;amp;ndash;September 2025, were manually delineated to ensure precise glacier boundary detection. Regional climatic data, including summer temperature and winter precipitation from the ERA5 reanalysis, were compiled to support interpretation of glacier changes since the 1960s. The new inventory identifies 2341 glaciers covering 964.0 &amp;amp;plusmn; 22.8 km2 across the Greater Caucasus. Glacier distribution is highly uneven: most of the glacier-covered area is found in the Central Caucasus (730.2 &amp;amp;plusmn; 15.5 km2), whereas considerably smaller glacierized areas occur in the Western and Eastern sectors. Most glaciers are located on northern slopes (687.7 &amp;amp;plusmn; 16.0 km2), reflecting strong topographic and climatic asymmetry. Mean glacier elevations range from ~3300 to 3600 m a.s.l., increasing eastward in response to decreasing precipitation. Size-class analysis shows that small glaciers (&amp;amp;lt;0.5 km2) dominate numerically, whereas a limited number of large valley glaciers (&amp;amp;gt;5.0 km2) contribute disproportionately to total glacier area. Comparison with previous inventories indicates continued and accelerated glacier retreat, particularly since 2014, with a mean area loss rate of &amp;amp;minus;1.8% yr&amp;amp;minus;1. These comparisons further show that a total of 965 glaciers (~122.9 km2) have become extinct across the Greater Caucasus since the 1960s. This trend is primarily driven by increasing summer temperatures and declining winter precipitation. This high-resolution inventory provides the most detailed glacier dataset currently available for the Greater Caucasus and establishes an updated benchmark for future glacier monitoring, climate change studies, and hydrological assessments.</description>
	<pubDate>2026-05-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1441: A 2025 High-Resolution Glacier Inventory of the Greater Caucasus Reveals Accelerated Area Loss</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1441">doi: 10.3390/rs18091441</a></p>
	<p>Authors:
		Levan G. Tielidze
		Gennady A. Nosenko
		Akaki Nadaraia
		Tatiana E. Khromova
		Roman M. Kumladze
		Caroline C. Clason
		Mikheil Elashvili
		Lela Gadrani
		</p>
	<p>The Greater Caucasus is one of the most extensively glacierized mountain systems in mid-latitude Eurasia and has experienced substantial glacier retreat in recent decades. Continuous monitoring using high-resolution satellite observations is therefore essential for accurately quantifying ongoing and future changes. In this study, we present a new glacier inventory for 2025 derived from high-resolution (3 m) PlanetScope satellite imagery combined with topographic information from the 30 m Advanced Land Observing Satellite (ALOS) Global Digital Surface Model (2006&amp;amp;ndash;2011). A total of 101 cloud-free PlanetScope scenes, acquired primarily during August&amp;amp;ndash;September 2025, were manually delineated to ensure precise glacier boundary detection. Regional climatic data, including summer temperature and winter precipitation from the ERA5 reanalysis, were compiled to support interpretation of glacier changes since the 1960s. The new inventory identifies 2341 glaciers covering 964.0 &amp;amp;plusmn; 22.8 km2 across the Greater Caucasus. Glacier distribution is highly uneven: most of the glacier-covered area is found in the Central Caucasus (730.2 &amp;amp;plusmn; 15.5 km2), whereas considerably smaller glacierized areas occur in the Western and Eastern sectors. Most glaciers are located on northern slopes (687.7 &amp;amp;plusmn; 16.0 km2), reflecting strong topographic and climatic asymmetry. Mean glacier elevations range from ~3300 to 3600 m a.s.l., increasing eastward in response to decreasing precipitation. Size-class analysis shows that small glaciers (&amp;amp;lt;0.5 km2) dominate numerically, whereas a limited number of large valley glaciers (&amp;amp;gt;5.0 km2) contribute disproportionately to total glacier area. Comparison with previous inventories indicates continued and accelerated glacier retreat, particularly since 2014, with a mean area loss rate of &amp;amp;minus;1.8% yr&amp;amp;minus;1. These comparisons further show that a total of 965 glaciers (~122.9 km2) have become extinct across the Greater Caucasus since the 1960s. This trend is primarily driven by increasing summer temperatures and declining winter precipitation. This high-resolution inventory provides the most detailed glacier dataset currently available for the Greater Caucasus and establishes an updated benchmark for future glacier monitoring, climate change studies, and hydrological assessments.</p>
	]]></content:encoded>

	<dc:title>A 2025 High-Resolution Glacier Inventory of the Greater Caucasus Reveals Accelerated Area Loss</dc:title>
			<dc:creator>Levan G. Tielidze</dc:creator>
			<dc:creator>Gennady A. Nosenko</dc:creator>
			<dc:creator>Akaki Nadaraia</dc:creator>
			<dc:creator>Tatiana E. Khromova</dc:creator>
			<dc:creator>Roman M. Kumladze</dc:creator>
			<dc:creator>Caroline C. Clason</dc:creator>
			<dc:creator>Mikheil Elashvili</dc:creator>
			<dc:creator>Lela Gadrani</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091441</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-06</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-06</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1441</prism:startingPage>
		<prism:doi>10.3390/rs18091441</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1441</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1446">

	<title>Remote Sensing, Vol. 18, Pages 1446: Improving Satellite-Based Estimation and Mapping of Soil Lead by Using an Enhanced Spectral Feature Set and XGBoost Model</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1446</link>
	<description>Satellite hyperspectral remote sensing offers an efficient and cost-effective approach for estimating and mapping soil lead (Pb), thereby supporting pollution identification and environmental sustainability. However, the development of satellite-based spectral estimation models depends on the availability of a robust spectral feature set for soil Pb as input, which is difficult to obtain under field conditions due to interference from moisture, particle size, and light scattering. To address this issue, controlled spectral experiments were conducted on laboratory-prepared soil samples with varying Pb contamination levels. The spectral characteristics associated with Pb contamination were analyzed, and an enhanced spectral feature set (ESFS) was constructed using the successive projections algorithm&amp;amp;ndash;Shapley additive explanations method. Two new spectral indices for Pb-contaminated soils, named SPPI-2 and SPPI-3, were developed and incorporated into the ESFS. The ESFS was then applied to satellite hyperspectral data calibrated via direct standardization, with its spectral parameters used as input variables and measured Pb concentrations from field soil samples as the dependent variable. Finally, a satellite-based spectral model for soil Pb estimation was developed using the XGBoost (eXtreme Gradient Boosting) algorithm. Results showed that the spectral parameters in the ESFS included four characteristic bands (R840, R1013, R1215, and R2239) and two newly developed spectral indices (SPPI-2 and SPPI-3). The satellite-based spectral estimation model based on the ESFS and XGBoost algorithm achieved the best performance, with R2 (coefficient of determination) and RPD (ratio of performance to deviation) values of 0.78 and 2.10, respectively, representing a maximum improvement of 164.10% and a minimum improvement of 12.86% (in terms of RPD values) compared to common methods. Hotspot areas of Pb-contaminated soils were mainly found in the eastern part of the abandoned coal mining area, which is associated with improper coal mining and transportation activities. This study presents a satellite hyperspectral framework for effectively estimating the distribution pattern of soil Pb and supporting the regional-scale soil management and environmental sustainability.</description>
	<pubDate>2026-05-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1446: Improving Satellite-Based Estimation and Mapping of Soil Lead by Using an Enhanced Spectral Feature Set and XGBoost Model</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1446">doi: 10.3390/rs18091446</a></p>
	<p>Authors:
		Xibo Xu
		Ying Wang
		Xinrui Dai
		Qi Shen
		Quanyuan Wu
		Zeqiang Wang
		Jianfei Cao
		</p>
	<p>Satellite hyperspectral remote sensing offers an efficient and cost-effective approach for estimating and mapping soil lead (Pb), thereby supporting pollution identification and environmental sustainability. However, the development of satellite-based spectral estimation models depends on the availability of a robust spectral feature set for soil Pb as input, which is difficult to obtain under field conditions due to interference from moisture, particle size, and light scattering. To address this issue, controlled spectral experiments were conducted on laboratory-prepared soil samples with varying Pb contamination levels. The spectral characteristics associated with Pb contamination were analyzed, and an enhanced spectral feature set (ESFS) was constructed using the successive projections algorithm&amp;amp;ndash;Shapley additive explanations method. Two new spectral indices for Pb-contaminated soils, named SPPI-2 and SPPI-3, were developed and incorporated into the ESFS. The ESFS was then applied to satellite hyperspectral data calibrated via direct standardization, with its spectral parameters used as input variables and measured Pb concentrations from field soil samples as the dependent variable. Finally, a satellite-based spectral model for soil Pb estimation was developed using the XGBoost (eXtreme Gradient Boosting) algorithm. Results showed that the spectral parameters in the ESFS included four characteristic bands (R840, R1013, R1215, and R2239) and two newly developed spectral indices (SPPI-2 and SPPI-3). The satellite-based spectral estimation model based on the ESFS and XGBoost algorithm achieved the best performance, with R2 (coefficient of determination) and RPD (ratio of performance to deviation) values of 0.78 and 2.10, respectively, representing a maximum improvement of 164.10% and a minimum improvement of 12.86% (in terms of RPD values) compared to common methods. Hotspot areas of Pb-contaminated soils were mainly found in the eastern part of the abandoned coal mining area, which is associated with improper coal mining and transportation activities. This study presents a satellite hyperspectral framework for effectively estimating the distribution pattern of soil Pb and supporting the regional-scale soil management and environmental sustainability.</p>
	]]></content:encoded>

	<dc:title>Improving Satellite-Based Estimation and Mapping of Soil Lead by Using an Enhanced Spectral Feature Set and XGBoost Model</dc:title>
			<dc:creator>Xibo Xu</dc:creator>
			<dc:creator>Ying Wang</dc:creator>
			<dc:creator>Xinrui Dai</dc:creator>
			<dc:creator>Qi Shen</dc:creator>
			<dc:creator>Quanyuan Wu</dc:creator>
			<dc:creator>Zeqiang Wang</dc:creator>
			<dc:creator>Jianfei Cao</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091446</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-06</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-06</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1446</prism:startingPage>
		<prism:doi>10.3390/rs18091446</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1446</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1445">

	<title>Remote Sensing, Vol. 18, Pages 1445: Glacial Lake Changes in the Donglin Tsangpo Watershed of China&amp;ndash;Nepal Economic Corridor from 2016 to 2024</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1445</link>
	<description>Glacial lake dynamics in high-mountain regions serve as a sensitive proxy for cryospheric responses to climate warming. This study utilizes multi-temporal Sentinel-2 imagery and digital elevation model (DEM) data to quantify glacial lake evolution in the Donglin Tsangpo Watershed, a strategically important section of the China&amp;amp;ndash;Nepal Economic Corridor, from 2016 to 2024. The results show a significant expansion in both the number (from 43 to 56) and total area (from 3.97 km2 to 4.94 km2, +24.43%) of glacial lakes, primarily driven by the rapid emergence of very small lakes (0.02&amp;amp;ndash;0.05 km2) and a clear upward shift in elevation distribution, with new lakes forming above 5300 m and extending to elevations exceeding 5500 m. Analysis of Moderate Resolution Imaging Spectroradiometer (MODIS) land surface temperature (LST) reveals that this expansion coincided with pronounced positive thermal anomalies, particularly the 2020 extreme warm event (daytime +3.88 &amp;amp;deg;C, nighttime +1.61 &amp;amp;deg;C). Mechanistic analysis using the ERA5-Land reanalysis dataset further demonstrates that persistent positive downward longwave radiation (LW) anomalies (peaking at +10.71 W/m2 in 2021) effectively compensated for reduced shortwave input, inhibiting nocturnal refreezing and extending the effective ablation period. Furthermore, a rising liquid-to-solid precipitation ratio and extreme melt-day anomalies (up to +39.36 days) provided intensified hydrothermal inputs, driving the pronounced expansion of glacier-contact lakes despite non-linear interannual responses. This study also estimates individual lake volumes, identifying a transition toward rapid lake development that elevates potential downstream hazard exposure. These findings provide a high-resolution dataset and a robust physical framework for transboundary environmental monitoring and risk assessment in this climate-sensitive region.</description>
	<pubDate>2026-05-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1445: Glacial Lake Changes in the Donglin Tsangpo Watershed of China&amp;ndash;Nepal Economic Corridor from 2016 to 2024</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1445">doi: 10.3390/rs18091445</a></p>
	<p>Authors:
		Zhe Chen
		Changlu Cui
		Daxiang Xiang
		Ying Jiang
		</p>
	<p>Glacial lake dynamics in high-mountain regions serve as a sensitive proxy for cryospheric responses to climate warming. This study utilizes multi-temporal Sentinel-2 imagery and digital elevation model (DEM) data to quantify glacial lake evolution in the Donglin Tsangpo Watershed, a strategically important section of the China&amp;amp;ndash;Nepal Economic Corridor, from 2016 to 2024. The results show a significant expansion in both the number (from 43 to 56) and total area (from 3.97 km2 to 4.94 km2, +24.43%) of glacial lakes, primarily driven by the rapid emergence of very small lakes (0.02&amp;amp;ndash;0.05 km2) and a clear upward shift in elevation distribution, with new lakes forming above 5300 m and extending to elevations exceeding 5500 m. Analysis of Moderate Resolution Imaging Spectroradiometer (MODIS) land surface temperature (LST) reveals that this expansion coincided with pronounced positive thermal anomalies, particularly the 2020 extreme warm event (daytime +3.88 &amp;amp;deg;C, nighttime +1.61 &amp;amp;deg;C). Mechanistic analysis using the ERA5-Land reanalysis dataset further demonstrates that persistent positive downward longwave radiation (LW) anomalies (peaking at +10.71 W/m2 in 2021) effectively compensated for reduced shortwave input, inhibiting nocturnal refreezing and extending the effective ablation period. Furthermore, a rising liquid-to-solid precipitation ratio and extreme melt-day anomalies (up to +39.36 days) provided intensified hydrothermal inputs, driving the pronounced expansion of glacier-contact lakes despite non-linear interannual responses. This study also estimates individual lake volumes, identifying a transition toward rapid lake development that elevates potential downstream hazard exposure. These findings provide a high-resolution dataset and a robust physical framework for transboundary environmental monitoring and risk assessment in this climate-sensitive region.</p>
	]]></content:encoded>

	<dc:title>Glacial Lake Changes in the Donglin Tsangpo Watershed of China&amp;amp;ndash;Nepal Economic Corridor from 2016 to 2024</dc:title>
			<dc:creator>Zhe Chen</dc:creator>
			<dc:creator>Changlu Cui</dc:creator>
			<dc:creator>Daxiang Xiang</dc:creator>
			<dc:creator>Ying Jiang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091445</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-06</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-06</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1445</prism:startingPage>
		<prism:doi>10.3390/rs18091445</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1445</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1444">

	<title>Remote Sensing, Vol. 18, Pages 1444: SASR: Sensor-Agnostic Semantic Representation Unification for Cross-Modal RGB and Hyperspectral Aerial Scene Recognition</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1444</link>
	<description>Aerial scene recognition has progressed substantially with deep learning methods for RGB and hyperspectral imagery; however, existing approaches typically operate on single modalities or rely on explicit multimodal fusion, limiting scalability, flexibility, and deployment in heterogeneous sensing environments. To address this limitation, we propose a sensor-agnostic semantic representation learning framework that formulates multimodal learning as the unification of semantic representations rather than feature-level fusion. The proposed architecture employs modality-specific encoders and projection heads to map spatial and spectral&amp;amp;ndash;spatial features into a shared semantic embedding space, enabling modality-invariant representation learning while preserving discriminative characteristics of each sensing modality. A composite objective integrating cross-spectral alignment, intra-class compactness regularization, and prototype-based semantic anchoring is introduced to enforce consistent embedding geometry and improve class separability across modalities. A unified classifier operating within this shared space enables reliable inference from a single modality input without requiring paired data or explicit fusion. Extensive evaluations on multiple benchmark datasets, including Houston 2013 for cross-modality RGB&amp;amp;ndash;hyperspectral analysis, UC Merced for independent RGB aerial scene classification, and Indian Pines for hyperspectral land-cover recognition, demonstrate the robustness and generalization capability of the proposed framework. In Houston 2013, the method achieves 96.4% (RGB) and 97.3% (hyperspectral) overall accuracy, with cross-modality transfer performance of 87.2% (RGB &amp;amp;rarr; HSI) and 88.7% (HSI &amp;amp;rarr; RGB), further improving to 97.0% and 97.8% under joint training. On UC Merced and Indian Pines, the model attains 98.7% and 97.6% overall accuracy, respectively. These results establish semantic representation unification as a scalable and effective alternative to conventional multimodal fusion for heterogeneous remote sensing environments.</description>
	<pubDate>2026-05-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1444: SASR: Sensor-Agnostic Semantic Representation Unification for Cross-Modal RGB and Hyperspectral Aerial Scene Recognition</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1444">doi: 10.3390/rs18091444</a></p>
	<p>Authors:
		Muhammad Zaheer Sajid
		Muhammad Fareed Hamid
		Kamran Bashir Taas
		Muhammad Attique Khan
		Latifah Almuqren
		Mohammad Alhefdi
		Yunyoung Nam
		Zepa Yang
		</p>
	<p>Aerial scene recognition has progressed substantially with deep learning methods for RGB and hyperspectral imagery; however, existing approaches typically operate on single modalities or rely on explicit multimodal fusion, limiting scalability, flexibility, and deployment in heterogeneous sensing environments. To address this limitation, we propose a sensor-agnostic semantic representation learning framework that formulates multimodal learning as the unification of semantic representations rather than feature-level fusion. The proposed architecture employs modality-specific encoders and projection heads to map spatial and spectral&amp;amp;ndash;spatial features into a shared semantic embedding space, enabling modality-invariant representation learning while preserving discriminative characteristics of each sensing modality. A composite objective integrating cross-spectral alignment, intra-class compactness regularization, and prototype-based semantic anchoring is introduced to enforce consistent embedding geometry and improve class separability across modalities. A unified classifier operating within this shared space enables reliable inference from a single modality input without requiring paired data or explicit fusion. Extensive evaluations on multiple benchmark datasets, including Houston 2013 for cross-modality RGB&amp;amp;ndash;hyperspectral analysis, UC Merced for independent RGB aerial scene classification, and Indian Pines for hyperspectral land-cover recognition, demonstrate the robustness and generalization capability of the proposed framework. In Houston 2013, the method achieves 96.4% (RGB) and 97.3% (hyperspectral) overall accuracy, with cross-modality transfer performance of 87.2% (RGB &amp;amp;rarr; HSI) and 88.7% (HSI &amp;amp;rarr; RGB), further improving to 97.0% and 97.8% under joint training. On UC Merced and Indian Pines, the model attains 98.7% and 97.6% overall accuracy, respectively. These results establish semantic representation unification as a scalable and effective alternative to conventional multimodal fusion for heterogeneous remote sensing environments.</p>
	]]></content:encoded>

	<dc:title>SASR: Sensor-Agnostic Semantic Representation Unification for Cross-Modal RGB and Hyperspectral Aerial Scene Recognition</dc:title>
			<dc:creator>Muhammad Zaheer Sajid</dc:creator>
			<dc:creator>Muhammad Fareed Hamid</dc:creator>
			<dc:creator>Kamran Bashir Taas</dc:creator>
			<dc:creator>Muhammad Attique Khan</dc:creator>
			<dc:creator>Latifah Almuqren</dc:creator>
			<dc:creator>Mohammad Alhefdi</dc:creator>
			<dc:creator>Yunyoung Nam</dc:creator>
			<dc:creator>Zepa Yang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091444</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-06</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-06</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1444</prism:startingPage>
		<prism:doi>10.3390/rs18091444</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1444</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1443">

	<title>Remote Sensing, Vol. 18, Pages 1443: Backpack System Development and Image-LiDAR Integration for Improved Geospatial Data Alignment in Forest Mapping</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1443</link>
	<description>Backpack mobile mapping systems (MMS) equipped with LiDAR and RGB cameras, as well as an optional GNSS/INS direct georeferencing unit, are increasingly utilized in forest inventory applications. In general, LiDAR point clouds provide detailed structural information, whereas imagery offers visual specifics of surface features. However, cameras typically operate at lower acquisition rates compared to LiDAR. In proximal mapping, another challenge is the inconsistent reception of GNSS signals beneath forest canopies. Additionally, georeferencing accuracy may differ between LiDAR and imagery due to biases in the system calibration parameters and variations in post-processing approaches. To address these challenges, this study introduces a Backpack MMS that uses cameras configured at elevated frame rates to enhance image overlap. Concurrently, this study presents an algorithmic approach to addressing georeferencing issues by integrating imagery and LiDAR data, thereby enhancing system calibration and improving platform trajectory. The method is based on the hypothesis that forest environments are rich with geometrically well-defined features, such as tree trunks and ground patches. By identifying conjugate primitives in point clouds from both imagery and LiDAR, the procedure optimizes feature models while simultaneously minimizing calibration biases and/or trajectory errors. The proposed approach is validated using multiple field datasets collected in diverse forest environments. Quantitative results show that the procedure reduces image&amp;amp;ndash;LiDAR feature misalignment across all datasets from up to 1.1 m in the planimetric direction and 2 m in the vertical direction to within 5 cm in both. The feature fitting accuracy also improves from 2.9 cm to 0.85 cm for LiDAR point clouds and from 10 cm to 0.9 cm for image-based point clouds. However, the results indicate that despite increased data availability, imagery alone remains less reliable than LiDAR for extracting structural information. Nevertheless, the proposed image&amp;amp;ndash;LiDAR alignment strategy represents a crucial step toward developing a comprehensive tree inventory.</description>
	<pubDate>2026-05-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1443: Backpack System Development and Image-LiDAR Integration for Improved Geospatial Data Alignment in Forest Mapping</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1443">doi: 10.3390/rs18091443</a></p>
	<p>Authors:
		Raja Manish
		Songlin Fei
		Ayman Habib
		</p>
	<p>Backpack mobile mapping systems (MMS) equipped with LiDAR and RGB cameras, as well as an optional GNSS/INS direct georeferencing unit, are increasingly utilized in forest inventory applications. In general, LiDAR point clouds provide detailed structural information, whereas imagery offers visual specifics of surface features. However, cameras typically operate at lower acquisition rates compared to LiDAR. In proximal mapping, another challenge is the inconsistent reception of GNSS signals beneath forest canopies. Additionally, georeferencing accuracy may differ between LiDAR and imagery due to biases in the system calibration parameters and variations in post-processing approaches. To address these challenges, this study introduces a Backpack MMS that uses cameras configured at elevated frame rates to enhance image overlap. Concurrently, this study presents an algorithmic approach to addressing georeferencing issues by integrating imagery and LiDAR data, thereby enhancing system calibration and improving platform trajectory. The method is based on the hypothesis that forest environments are rich with geometrically well-defined features, such as tree trunks and ground patches. By identifying conjugate primitives in point clouds from both imagery and LiDAR, the procedure optimizes feature models while simultaneously minimizing calibration biases and/or trajectory errors. The proposed approach is validated using multiple field datasets collected in diverse forest environments. Quantitative results show that the procedure reduces image&amp;amp;ndash;LiDAR feature misalignment across all datasets from up to 1.1 m in the planimetric direction and 2 m in the vertical direction to within 5 cm in both. The feature fitting accuracy also improves from 2.9 cm to 0.85 cm for LiDAR point clouds and from 10 cm to 0.9 cm for image-based point clouds. However, the results indicate that despite increased data availability, imagery alone remains less reliable than LiDAR for extracting structural information. Nevertheless, the proposed image&amp;amp;ndash;LiDAR alignment strategy represents a crucial step toward developing a comprehensive tree inventory.</p>
	]]></content:encoded>

	<dc:title>Backpack System Development and Image-LiDAR Integration for Improved Geospatial Data Alignment in Forest Mapping</dc:title>
			<dc:creator>Raja Manish</dc:creator>
			<dc:creator>Songlin Fei</dc:creator>
			<dc:creator>Ayman Habib</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091443</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-06</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-06</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1443</prism:startingPage>
		<prism:doi>10.3390/rs18091443</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1443</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1442">

	<title>Remote Sensing, Vol. 18, Pages 1442: Assessing Debris-Flow Susceptibility at Local and Global Scales: A Deep-Learning-Based Comparative Study ofSichuan, China, and Worldwide</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1442</link>
	<description>Debris flows pose a significant global geohazard, causing a large number of deaths and infrastructure damage every year. Effective protection and land-use planning in the affected regions requires understanding susceptibility to these events. Although a global phenomenon, previous studies have focused extensively on local areas with specialized models and accordingly complex feature selections. In this study, we investigate whether a unified debris-flow susceptibility prediction paradigm can be achieved regardless of regional scale, using only very few global public remote sensing data sources. To this end, this work contributes in the following ways: (1) A novel two-step negative sample generation scheme is proposed, and two open debris-flow datasets are constructed based on global debris-flow locations and locations in Sichuan, China. (2) An open-source end-to-end machine learning platform using remote sensing features directly is proposed, which achieves state-of-the-art results with 0.947 and 0.957 AUC in both scales compared to 0.88 for previous methods on the same location data, while using far fewer features. (3) A comparative feature importance analysis shows that, given the significant feature distribution difference on global vs local datasets, alleviating the scale-level gap is possible by leveraging the advanced deep learning technologies. This allows our unified framework to be easily applied to any regional study of debris-flow susceptibility prediction.</description>
	<pubDate>2026-05-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1442: Assessing Debris-Flow Susceptibility at Local and Global Scales: A Deep-Learning-Based Comparative Study ofSichuan, China, and Worldwide</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1442">doi: 10.3390/rs18091442</a></p>
	<p>Authors:
		Andreas Nienkötter
		Ang Bian
		Baofeng Di
		Jierui Li
		Tian Deng
		</p>
	<p>Debris flows pose a significant global geohazard, causing a large number of deaths and infrastructure damage every year. Effective protection and land-use planning in the affected regions requires understanding susceptibility to these events. Although a global phenomenon, previous studies have focused extensively on local areas with specialized models and accordingly complex feature selections. In this study, we investigate whether a unified debris-flow susceptibility prediction paradigm can be achieved regardless of regional scale, using only very few global public remote sensing data sources. To this end, this work contributes in the following ways: (1) A novel two-step negative sample generation scheme is proposed, and two open debris-flow datasets are constructed based on global debris-flow locations and locations in Sichuan, China. (2) An open-source end-to-end machine learning platform using remote sensing features directly is proposed, which achieves state-of-the-art results with 0.947 and 0.957 AUC in both scales compared to 0.88 for previous methods on the same location data, while using far fewer features. (3) A comparative feature importance analysis shows that, given the significant feature distribution difference on global vs local datasets, alleviating the scale-level gap is possible by leveraging the advanced deep learning technologies. This allows our unified framework to be easily applied to any regional study of debris-flow susceptibility prediction.</p>
	]]></content:encoded>

	<dc:title>Assessing Debris-Flow Susceptibility at Local and Global Scales: A Deep-Learning-Based Comparative Study ofSichuan, China, and Worldwide</dc:title>
			<dc:creator>Andreas Nienkötter</dc:creator>
			<dc:creator>Ang Bian</dc:creator>
			<dc:creator>Baofeng Di</dc:creator>
			<dc:creator>Jierui Li</dc:creator>
			<dc:creator>Tian Deng</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091442</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-06</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-06</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1442</prism:startingPage>
		<prism:doi>10.3390/rs18091442</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1442</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1440">

	<title>Remote Sensing, Vol. 18, Pages 1440: Hyperspectral Band Selection for Ground Fuel Classification for Prescribed Fires</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1440</link>
	<description>Hyperspectral image (HSI) analysis plays a central role in remote sensing tasks requiring fine-grained material discrimination, vegetation health assessment, and post-disturbance monitoring. Yet, the high dimensionality and strong spectral redundancy in HSIs often reduce the efficiency and reliability of machine learning models. These challenges are especially important in wildfire science and prescribed-fire monitoring, where spectral responses vary due to burn severity, char deposition, canopy structure, and early vegetation recovery. Benchmark datasets such as Indian Pines and Pavia University and others provide controlled environments for algorithms&amp;amp;rsquo; evaluation, but real-world post-fire forest conditions pose additional complexity. This study presents a unified and comprehensive evaluation of five dimensionality reduction strategies: Principal Component Analysis (PCA), Spatial&amp;amp;ndash;Spectral Edge Preservation (SSEP), Spectral-Redundancy Penalized Attention (SRPA), and a Deep Reinforcement Learning (DRL)-based selector together with a clustering based baseline, K-Means Clustering-Based Band Selection (KMCBS). These strategies are combined with classical machine learning and deep learning classifiers: Random Forest (RF), Support Vector Machines (SVMs), K-Nearest Neighbors (KNNs), and 3D Convolutional Neural Networks (3D-CNN). The full pipeline includes exploratory data analysis, preprocessing, patch-based spatial&amp;amp;ndash;spectral modeling, consistent train&amp;amp;ndash;validation protocols, and multi-dataset evaluation across Indian Pines, Pavia University, and a new custom VNIR hyperspectral dataset collected after prescribed burns at the Lubrecht Experimental Forest in Montana, USA. By systematically comparing statistical, edge-aware, attention-guided, and reinforcement learning-based band-selection strategies, this work identifies compact yet informative spectral subsets that enhance classification performance while reducing computational cost. Importantly, the inclusion of the Montana prescribed-burn dataset provides a unique real-world testbed for understanding band selection behavior in fire-affected forest environments. Overall, this study contributes a generalizable and extensible framework for HSI dimensionality reduction and classification, laying the groundwork for future applications in wildfire assessment, vegetation recovery monitoring, and remote sensing.</description>
	<pubDate>2026-05-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1440: Hyperspectral Band Selection for Ground Fuel Classification for Prescribed Fires</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1440">doi: 10.3390/rs18091440</a></p>
	<p>Authors:
		Mahmad Isaq Karankot
		Ethan M. Glenn
		Muhammad Umer Masood
		Xiaobing Zhou
		Bradley M. Whitaker
		</p>
	<p>Hyperspectral image (HSI) analysis plays a central role in remote sensing tasks requiring fine-grained material discrimination, vegetation health assessment, and post-disturbance monitoring. Yet, the high dimensionality and strong spectral redundancy in HSIs often reduce the efficiency and reliability of machine learning models. These challenges are especially important in wildfire science and prescribed-fire monitoring, where spectral responses vary due to burn severity, char deposition, canopy structure, and early vegetation recovery. Benchmark datasets such as Indian Pines and Pavia University and others provide controlled environments for algorithms&amp;amp;rsquo; evaluation, but real-world post-fire forest conditions pose additional complexity. This study presents a unified and comprehensive evaluation of five dimensionality reduction strategies: Principal Component Analysis (PCA), Spatial&amp;amp;ndash;Spectral Edge Preservation (SSEP), Spectral-Redundancy Penalized Attention (SRPA), and a Deep Reinforcement Learning (DRL)-based selector together with a clustering based baseline, K-Means Clustering-Based Band Selection (KMCBS). These strategies are combined with classical machine learning and deep learning classifiers: Random Forest (RF), Support Vector Machines (SVMs), K-Nearest Neighbors (KNNs), and 3D Convolutional Neural Networks (3D-CNN). The full pipeline includes exploratory data analysis, preprocessing, patch-based spatial&amp;amp;ndash;spectral modeling, consistent train&amp;amp;ndash;validation protocols, and multi-dataset evaluation across Indian Pines, Pavia University, and a new custom VNIR hyperspectral dataset collected after prescribed burns at the Lubrecht Experimental Forest in Montana, USA. By systematically comparing statistical, edge-aware, attention-guided, and reinforcement learning-based band-selection strategies, this work identifies compact yet informative spectral subsets that enhance classification performance while reducing computational cost. Importantly, the inclusion of the Montana prescribed-burn dataset provides a unique real-world testbed for understanding band selection behavior in fire-affected forest environments. Overall, this study contributes a generalizable and extensible framework for HSI dimensionality reduction and classification, laying the groundwork for future applications in wildfire assessment, vegetation recovery monitoring, and remote sensing.</p>
	]]></content:encoded>

	<dc:title>Hyperspectral Band Selection for Ground Fuel Classification for Prescribed Fires</dc:title>
			<dc:creator>Mahmad Isaq Karankot</dc:creator>
			<dc:creator>Ethan M. Glenn</dc:creator>
			<dc:creator>Muhammad Umer Masood</dc:creator>
			<dc:creator>Xiaobing Zhou</dc:creator>
			<dc:creator>Bradley M. Whitaker</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091440</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-06</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-06</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1440</prism:startingPage>
		<prism:doi>10.3390/rs18091440</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1440</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1439">

	<title>Remote Sensing, Vol. 18, Pages 1439: Rid-HRNet: A Lightweight Multi-Scale Network for Sand Ridge Line Extraction from Landsat Imagery</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1439</link>
	<description>Sand ridge lines serve as key geomorphological indicators for interpreting aeolian dynamics and assessing desertification intensity. However, automated extraction of continuous ridge structures from remote sensing imagery remains challenging due to the multi-scale morphology of dunes, complex surface textures, and strong shadow interference. Conventional edge detection models often rely on computationally heavy backbones or suffer from structural discontinuities in subtle ridge branches, limiting their applicability in large-scale desert monitoring. To address these challenges, we propose Rid-HRNet, a lightweight high-resolution network specifically designed for efficient and structurally coherent sand ridge extraction. Unlike traditional encoder&amp;amp;ndash;decoder architectures, Rid-HRNet maintains parallel high-resolution representations throughout the network to preserve fine spatial details. A Multi-Scale Information Aggregation (MSIA) module enhances cross-scale feature interaction by integrating shallow structural cues with deeper semantic representations. In addition, an Improved Contextual Fusion Module (ICFM) employs pixel-wise attention to adaptively fuse multi-level predictions, reinforcing ridge continuity while suppressing background interference. Experiments on Landsat-8 desert imagery demonstrate that Rid-HRNet achieves an Optimal Dataset Scale (ODS) of 0.790, an Optimal Image Scale (OIS) of 0.806, an Average Precision (AP) of 0.710, and an AC(R50) score of 0.744. The proposed model outperforms classical VGG-based detectors, including HED and RCF, as well as recent lightweight baselines such as PiDiNet and LDC, in terms of overall accuracy and structural consistency. Notably, Rid-HRNet contains only 0.20M parameters and requires 0.55 GFLOPs, operating at 279.23 FPS with a GPU memory footprint of 0.02 GB. These results indicate that Rid-HRNet achieves a favorable balance between detection performance and computational efficiency, supporting large-scale geomorphological mapping and operational desert monitoring based on high-resolution satellite imagery.</description>
	<pubDate>2026-05-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1439: Rid-HRNet: A Lightweight Multi-Scale Network for Sand Ridge Line Extraction from Landsat Imagery</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1439">doi: 10.3390/rs18091439</a></p>
	<p>Authors:
		Xuanjing Huang
		Xinchao Liu
		Jiayue Mu
		Ye Zhu
		Zhaobin Wang
		Yaonan Zhang
		</p>
	<p>Sand ridge lines serve as key geomorphological indicators for interpreting aeolian dynamics and assessing desertification intensity. However, automated extraction of continuous ridge structures from remote sensing imagery remains challenging due to the multi-scale morphology of dunes, complex surface textures, and strong shadow interference. Conventional edge detection models often rely on computationally heavy backbones or suffer from structural discontinuities in subtle ridge branches, limiting their applicability in large-scale desert monitoring. To address these challenges, we propose Rid-HRNet, a lightweight high-resolution network specifically designed for efficient and structurally coherent sand ridge extraction. Unlike traditional encoder&amp;amp;ndash;decoder architectures, Rid-HRNet maintains parallel high-resolution representations throughout the network to preserve fine spatial details. A Multi-Scale Information Aggregation (MSIA) module enhances cross-scale feature interaction by integrating shallow structural cues with deeper semantic representations. In addition, an Improved Contextual Fusion Module (ICFM) employs pixel-wise attention to adaptively fuse multi-level predictions, reinforcing ridge continuity while suppressing background interference. Experiments on Landsat-8 desert imagery demonstrate that Rid-HRNet achieves an Optimal Dataset Scale (ODS) of 0.790, an Optimal Image Scale (OIS) of 0.806, an Average Precision (AP) of 0.710, and an AC(R50) score of 0.744. The proposed model outperforms classical VGG-based detectors, including HED and RCF, as well as recent lightweight baselines such as PiDiNet and LDC, in terms of overall accuracy and structural consistency. Notably, Rid-HRNet contains only 0.20M parameters and requires 0.55 GFLOPs, operating at 279.23 FPS with a GPU memory footprint of 0.02 GB. These results indicate that Rid-HRNet achieves a favorable balance between detection performance and computational efficiency, supporting large-scale geomorphological mapping and operational desert monitoring based on high-resolution satellite imagery.</p>
	]]></content:encoded>

	<dc:title>Rid-HRNet: A Lightweight Multi-Scale Network for Sand Ridge Line Extraction from Landsat Imagery</dc:title>
			<dc:creator>Xuanjing Huang</dc:creator>
			<dc:creator>Xinchao Liu</dc:creator>
			<dc:creator>Jiayue Mu</dc:creator>
			<dc:creator>Ye Zhu</dc:creator>
			<dc:creator>Zhaobin Wang</dc:creator>
			<dc:creator>Yaonan Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091439</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-06</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-06</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1439</prism:startingPage>
		<prism:doi>10.3390/rs18091439</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1439</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1437">

	<title>Remote Sensing, Vol. 18, Pages 1437: Optical River Ice Spectral Subclassification on the Tibetan Plateau: A Landsat 5&amp;ndash;9 and Sentinel-2 Benchmark with Interpretable Machine Learning</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1437</link>
	<description>River ice products from optical satellites are still dominated by binary ice&amp;amp;ndash;water or ice&amp;amp;ndash;snow discrimination, leaving within-ice spectral heterogeneity largely unresolved. This study benchmarks how far river ice can be subclassified from multispectral reflectance alone on the Tibetan Plateau using Landsat 5/7, Landsat 8/9, and Sentinel-2 surface-reflectance imagery. We compiled 356 winter scenes acquired between 2000 and 2024 across eight Tibetan Plateau basins, delineated river ice using NDSI and RDRI, and extracted 24,674 pixel-level spectra. To define reproducible subclasses, we applied K-means clustering guided by the Silhouette Coefficient, Davies&amp;amp;ndash;Bouldin index, Calinski&amp;amp;ndash;Harabasz index, and Gap Statistic. Combined with stratified visual interpretation, this approach consistently supported four optical spectral subclasses: thin-snow-covered ice, thick ice cover, thin ice, and frazil ice. Within-sensor classification accuracy remained extremely high (overall accuracy &amp;amp;ge; 0.948; kappa &amp;amp;ge; 0.929), with the Backpropagation Neural Network (BPNN) and tree ensembles performing best. Crucially, evaluating the optimal BPNN architecture revealed exceptional multi-dimensional generalizability: a Leave-One-Basin-Out spatial cross-validation yielded a stable average OA &amp;amp;gt; 99% with an average Kappa &amp;amp;gt; 0.98, while a unified multi-sensor model achieved a robust OA of 90.14% and a Kappa of 0.86. The most stable discriminative cues were visible-band brightness, reflectance turnover near ~0.7 &amp;amp;mu;m, and shortwave-infrared sensitivity to effective thickness and surface wetness. These results provide a sensor-aware benchmark for practical optical river ice spectral subclassification and clarify which multispectral bands most strongly constrain subclass separability.</description>
	<pubDate>2026-05-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1437: Optical River Ice Spectral Subclassification on the Tibetan Plateau: A Landsat 5&amp;ndash;9 and Sentinel-2 Benchmark with Interpretable Machine Learning</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1437">doi: 10.3390/rs18091437</a></p>
	<p>Authors:
		Hanwen Zhang
		Hongyi Li
		</p>
	<p>River ice products from optical satellites are still dominated by binary ice&amp;amp;ndash;water or ice&amp;amp;ndash;snow discrimination, leaving within-ice spectral heterogeneity largely unresolved. This study benchmarks how far river ice can be subclassified from multispectral reflectance alone on the Tibetan Plateau using Landsat 5/7, Landsat 8/9, and Sentinel-2 surface-reflectance imagery. We compiled 356 winter scenes acquired between 2000 and 2024 across eight Tibetan Plateau basins, delineated river ice using NDSI and RDRI, and extracted 24,674 pixel-level spectra. To define reproducible subclasses, we applied K-means clustering guided by the Silhouette Coefficient, Davies&amp;amp;ndash;Bouldin index, Calinski&amp;amp;ndash;Harabasz index, and Gap Statistic. Combined with stratified visual interpretation, this approach consistently supported four optical spectral subclasses: thin-snow-covered ice, thick ice cover, thin ice, and frazil ice. Within-sensor classification accuracy remained extremely high (overall accuracy &amp;amp;ge; 0.948; kappa &amp;amp;ge; 0.929), with the Backpropagation Neural Network (BPNN) and tree ensembles performing best. Crucially, evaluating the optimal BPNN architecture revealed exceptional multi-dimensional generalizability: a Leave-One-Basin-Out spatial cross-validation yielded a stable average OA &amp;amp;gt; 99% with an average Kappa &amp;amp;gt; 0.98, while a unified multi-sensor model achieved a robust OA of 90.14% and a Kappa of 0.86. The most stable discriminative cues were visible-band brightness, reflectance turnover near ~0.7 &amp;amp;mu;m, and shortwave-infrared sensitivity to effective thickness and surface wetness. These results provide a sensor-aware benchmark for practical optical river ice spectral subclassification and clarify which multispectral bands most strongly constrain subclass separability.</p>
	]]></content:encoded>

	<dc:title>Optical River Ice Spectral Subclassification on the Tibetan Plateau: A Landsat 5&amp;amp;ndash;9 and Sentinel-2 Benchmark with Interpretable Machine Learning</dc:title>
			<dc:creator>Hanwen Zhang</dc:creator>
			<dc:creator>Hongyi Li</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091437</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-06</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-06</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1437</prism:startingPage>
		<prism:doi>10.3390/rs18091437</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1437</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1435">

	<title>Remote Sensing, Vol. 18, Pages 1435: Characterizing Rainfall Discrepancies Between Landslide Sites and the Nearest Rain Gauges Using Radar Estimates: A Case Study from Italy</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1435</link>
	<description>The spatial representativeness of rain gauges is critical for accurately estimating rainfall that triggers landslides and for defining operational thresholds. This study evaluates the potential error in conventional rain-gauge-based methods for estimating landslide-triggering rainfall, using 548 landslide events across Italy from the e-ITALICA database, which reports the duration of each rainfall event and the location of the nearest available rain gauge. A radar-based assessment, using the Surface Rainfall Intensity (SRI) product (1 km2 resolution) provided by the Italian Department of Civil Protection, quantified discrepancies between rainfall at landslide locations and at the nearest rain gauges. Seasonal analysis was performed, considering summer events (April&amp;amp;ndash;September), typically associated with convective and spatially variable rainfall, and winter events (October&amp;amp;ndash;March), generally more stratiform and uniform rainfall. Results indicate that the probability of large discrepancies increases with distance. Summer events show larger discrepancies at short distances compared to winter events, but seasonal distributions converge at larger distances. These findings provide useful insights into rain gauge representativeness in studies of rainfall-induced landslides.</description>
	<pubDate>2026-05-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1435: Characterizing Rainfall Discrepancies Between Landslide Sites and the Nearest Rain Gauges Using Radar Estimates: A Case Study from Italy</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1435">doi: 10.3390/rs18091435</a></p>
	<p>Authors:
		Carmela Vennari
		Francesco Chiaravalloti
		Roberto Coscarelli
		</p>
	<p>The spatial representativeness of rain gauges is critical for accurately estimating rainfall that triggers landslides and for defining operational thresholds. This study evaluates the potential error in conventional rain-gauge-based methods for estimating landslide-triggering rainfall, using 548 landslide events across Italy from the e-ITALICA database, which reports the duration of each rainfall event and the location of the nearest available rain gauge. A radar-based assessment, using the Surface Rainfall Intensity (SRI) product (1 km2 resolution) provided by the Italian Department of Civil Protection, quantified discrepancies between rainfall at landslide locations and at the nearest rain gauges. Seasonal analysis was performed, considering summer events (April&amp;amp;ndash;September), typically associated with convective and spatially variable rainfall, and winter events (October&amp;amp;ndash;March), generally more stratiform and uniform rainfall. Results indicate that the probability of large discrepancies increases with distance. Summer events show larger discrepancies at short distances compared to winter events, but seasonal distributions converge at larger distances. These findings provide useful insights into rain gauge representativeness in studies of rainfall-induced landslides.</p>
	]]></content:encoded>

	<dc:title>Characterizing Rainfall Discrepancies Between Landslide Sites and the Nearest Rain Gauges Using Radar Estimates: A Case Study from Italy</dc:title>
			<dc:creator>Carmela Vennari</dc:creator>
			<dc:creator>Francesco Chiaravalloti</dc:creator>
			<dc:creator>Roberto Coscarelli</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091435</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-06</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-06</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1435</prism:startingPage>
		<prism:doi>10.3390/rs18091435</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1435</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1438">

	<title>Remote Sensing, Vol. 18, Pages 1438: Long-Term Wildfire Emissions and Smoke-Plume Dynamics in Greece</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1438</link>
	<description>This study investigates long-term wildfire emissions and smoke-plume geospatial characteristics in Greece by analyzing a multi-pollutant dataset spanning January 2003 to August 2025. Details of emissions of carbon monoxide (CO), carbon dioxide (CO2), methane (CH4), particulate matter (PM2.5), organic carbon (OC), and black carbon (BC) were derived from the Global Fire Assimilation System (GFAS), which converts MODIS fire radiative power into trace gas and aerosol fluxes at 0.1&amp;amp;deg; resolution, and also accounts for the land type. Burned-area statistics from the European Forest Fire Information System (EFFIS) were used for cross-validation. Data were processed into daily, monthly, annual, and cumulative time series, with spatial mapping at the municipality scale and information regarding long-term trends. The analysis shows that while there are several sizeable wildfire events in the country every year, the bulk of the total of Greek wildfire emissions for the last 23 years is attributable to a few extreme fire seasons (2007, 2021, and 2023) that produced abrupt emission surges and accounted for a disproportionate share of national totals. Analysis of spatial data identifies the areas of Evia, East Attica, Messinia, and Evros as persistent emission hotspots. Although wildfire CO2 emissions are generally a minor fraction of Greece&amp;amp;rsquo;s anthropogenic totals (&amp;amp;lt;5%), they reached 15&amp;amp;ndash;17% during peak fire years. Plume-injection height analysis reveals that most smoke remains below ~1 km but can reach 3&amp;amp;ndash;6 km during extreme events, facilitating long-range transport. Overall, the dataset demonstrates a shift toward more intense and concentrated wildfire events in recent years, highlighting both their growing climatic relevance and their acute impacts on regional air quality.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1438: Long-Term Wildfire Emissions and Smoke-Plume Dynamics in Greece</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1438">doi: 10.3390/rs18091438</a></p>
	<p>Authors:
		Thanos Kourantos
		Anna Kampouri
		Marios Mermigkas
		Konstantinos Michailidis
		Apostolos Voulgarakis
		Mark Parrington
		Dimitris Vallianatos
		Dimitris Melas
		Ioannis Kioutsioukis
		Vassilis Amiridis
		</p>
	<p>This study investigates long-term wildfire emissions and smoke-plume geospatial characteristics in Greece by analyzing a multi-pollutant dataset spanning January 2003 to August 2025. Details of emissions of carbon monoxide (CO), carbon dioxide (CO2), methane (CH4), particulate matter (PM2.5), organic carbon (OC), and black carbon (BC) were derived from the Global Fire Assimilation System (GFAS), which converts MODIS fire radiative power into trace gas and aerosol fluxes at 0.1&amp;amp;deg; resolution, and also accounts for the land type. Burned-area statistics from the European Forest Fire Information System (EFFIS) were used for cross-validation. Data were processed into daily, monthly, annual, and cumulative time series, with spatial mapping at the municipality scale and information regarding long-term trends. The analysis shows that while there are several sizeable wildfire events in the country every year, the bulk of the total of Greek wildfire emissions for the last 23 years is attributable to a few extreme fire seasons (2007, 2021, and 2023) that produced abrupt emission surges and accounted for a disproportionate share of national totals. Analysis of spatial data identifies the areas of Evia, East Attica, Messinia, and Evros as persistent emission hotspots. Although wildfire CO2 emissions are generally a minor fraction of Greece&amp;amp;rsquo;s anthropogenic totals (&amp;amp;lt;5%), they reached 15&amp;amp;ndash;17% during peak fire years. Plume-injection height analysis reveals that most smoke remains below ~1 km but can reach 3&amp;amp;ndash;6 km during extreme events, facilitating long-range transport. Overall, the dataset demonstrates a shift toward more intense and concentrated wildfire events in recent years, highlighting both their growing climatic relevance and their acute impacts on regional air quality.</p>
	]]></content:encoded>

	<dc:title>Long-Term Wildfire Emissions and Smoke-Plume Dynamics in Greece</dc:title>
			<dc:creator>Thanos Kourantos</dc:creator>
			<dc:creator>Anna Kampouri</dc:creator>
			<dc:creator>Marios Mermigkas</dc:creator>
			<dc:creator>Konstantinos Michailidis</dc:creator>
			<dc:creator>Apostolos Voulgarakis</dc:creator>
			<dc:creator>Mark Parrington</dc:creator>
			<dc:creator>Dimitris Vallianatos</dc:creator>
			<dc:creator>Dimitris Melas</dc:creator>
			<dc:creator>Ioannis Kioutsioukis</dc:creator>
			<dc:creator>Vassilis Amiridis</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091438</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1438</prism:startingPage>
		<prism:doi>10.3390/rs18091438</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1438</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1436">

	<title>Remote Sensing, Vol. 18, Pages 1436: Changes in Glaciers of the Vakhsh River Basin, Tajikistan Under Global Climate Change</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1436</link>
	<description>The VRB represents one of the most important glacierized regions in the upper Amu Darya Basin (UADB), where glacier and snow dynamics play a key role in regional water resources. This study investigates glacier changes in the VRB during 2000&amp;amp;ndash;2025 based on multi-source remote sensing and GIS analysis, while long-term climatic variability since 1970 is used to provide background context for regional climate conditions. The results show a significant reduction in glacier area from 4440.9 km2 in 2000 to 3955.2 km2 in 2025, corresponding to a loss of 485.7 km2 (10.94%). The glaciers are mainly distributed on northern and northeastern slopes at elevations between 4000 and 5000 m a.s.l., where climatic conditions favor their preservation. The basin also contains numerous surge-type glaciers, accounting for approximately 60% of all surge-type glaciers in the Pamir region, with advances ranging from 0.4 to 3.6 km. Climatic analysis indicates a warming trend of 0.15&amp;amp;ndash;0.31 &amp;amp;deg;C per decade during 1970&amp;amp;ndash;2025, accompanied by pronounced seasonal variability in snow cover and gradual decreases in surface albedo associated with increased dust and black carbon concentrations. Glacier thinning is particularly evident in the lower glacier zones, while hydrological analysis shows that glacier and snow meltwater strongly influence river runoff. These results highlight the sensitivity of glaciers in the VRB to climatic and environmental changes and emphasize the importance of continued monitoring and adaptive water resource management in the VRB.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1436: Changes in Glaciers of the Vakhsh River Basin, Tajikistan Under Global Climate Change</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1436">doi: 10.3390/rs18091436</a></p>
	<p>Authors:
		Farhod Nasrulloev
		Yaning Chen
		Aminjon Gulakhmadov
		Amirkhamza Murodov
		Xueqi Zhang
		</p>
	<p>The VRB represents one of the most important glacierized regions in the upper Amu Darya Basin (UADB), where glacier and snow dynamics play a key role in regional water resources. This study investigates glacier changes in the VRB during 2000&amp;amp;ndash;2025 based on multi-source remote sensing and GIS analysis, while long-term climatic variability since 1970 is used to provide background context for regional climate conditions. The results show a significant reduction in glacier area from 4440.9 km2 in 2000 to 3955.2 km2 in 2025, corresponding to a loss of 485.7 km2 (10.94%). The glaciers are mainly distributed on northern and northeastern slopes at elevations between 4000 and 5000 m a.s.l., where climatic conditions favor their preservation. The basin also contains numerous surge-type glaciers, accounting for approximately 60% of all surge-type glaciers in the Pamir region, with advances ranging from 0.4 to 3.6 km. Climatic analysis indicates a warming trend of 0.15&amp;amp;ndash;0.31 &amp;amp;deg;C per decade during 1970&amp;amp;ndash;2025, accompanied by pronounced seasonal variability in snow cover and gradual decreases in surface albedo associated with increased dust and black carbon concentrations. Glacier thinning is particularly evident in the lower glacier zones, while hydrological analysis shows that glacier and snow meltwater strongly influence river runoff. These results highlight the sensitivity of glaciers in the VRB to climatic and environmental changes and emphasize the importance of continued monitoring and adaptive water resource management in the VRB.</p>
	]]></content:encoded>

	<dc:title>Changes in Glaciers of the Vakhsh River Basin, Tajikistan Under Global Climate Change</dc:title>
			<dc:creator>Farhod Nasrulloev</dc:creator>
			<dc:creator>Yaning Chen</dc:creator>
			<dc:creator>Aminjon Gulakhmadov</dc:creator>
			<dc:creator>Amirkhamza Murodov</dc:creator>
			<dc:creator>Xueqi Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091436</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1436</prism:startingPage>
		<prism:doi>10.3390/rs18091436</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1436</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1434">

	<title>Remote Sensing, Vol. 18, Pages 1434: Assessing Satellite-Based Data Products Estimating Daily Means of Solar Irradiance at Surface over South Cameroon Plateau and Potential Improvements</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1434</link>
	<description>The assessment of satellite-derived solar radiation products has been performed over several parts of the world by various authors. The case of Central Africa has so far hardly been addressed. This study takes a step forward by evaluating the performance of three existing state-of-the-art satellite-based data products, namely CAMS-RAD 4.6, CERES SYN1deg Ed4.2 and SARAH-3, in estimating the daily mean surface solar irradiance at five ground-based stations located in the South Cameroon plateau. The correlation coefficient varies between 0.59 and 0.92, with the highest level always seen for CAMS-RAD 4.6 data at each station. The bias (RMSE) is large and always positive for satellite products, confirming a general overestimation. It ranges between 34 W m&amp;amp;ndash;2 and 77 W m&amp;amp;ndash;2 (40 W m&amp;amp;ndash;2 and 86 W m&amp;amp;ndash;2), i.e., 22 % and 53 % (26 % and 60 %) in relative value. The lowest (highest) bias is seen with CERES SYN1deg Ed4.2 (SARAH-3) data when each station is taken individually. An approach for improvement based on a simple linear regression model without the intercept was developed with CAMS data. In general, the approach significantly reduces the bias by at least 30 W m&amp;amp;ndash;2, i.e., 20 % in relative value at each station. The RMSE also clearly reduces, by at least 30 W m&amp;amp;ndash;2, i.e., a reduction of at least 20 % in relative value. This work shows the way toward further improvements.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1434: Assessing Satellite-Based Data Products Estimating Daily Means of Solar Irradiance at Surface over South Cameroon Plateau and Potential Improvements</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1434">doi: 10.3390/rs18091434</a></p>
	<p>Authors:
		Delphin Aymar Ngah Onana
		Pascal Brice Owona Atangana
		Murielle Mbuko Tcheutchoua
		William Wandji Nyamsi
		</p>
	<p>The assessment of satellite-derived solar radiation products has been performed over several parts of the world by various authors. The case of Central Africa has so far hardly been addressed. This study takes a step forward by evaluating the performance of three existing state-of-the-art satellite-based data products, namely CAMS-RAD 4.6, CERES SYN1deg Ed4.2 and SARAH-3, in estimating the daily mean surface solar irradiance at five ground-based stations located in the South Cameroon plateau. The correlation coefficient varies between 0.59 and 0.92, with the highest level always seen for CAMS-RAD 4.6 data at each station. The bias (RMSE) is large and always positive for satellite products, confirming a general overestimation. It ranges between 34 W m&amp;amp;ndash;2 and 77 W m&amp;amp;ndash;2 (40 W m&amp;amp;ndash;2 and 86 W m&amp;amp;ndash;2), i.e., 22 % and 53 % (26 % and 60 %) in relative value. The lowest (highest) bias is seen with CERES SYN1deg Ed4.2 (SARAH-3) data when each station is taken individually. An approach for improvement based on a simple linear regression model without the intercept was developed with CAMS data. In general, the approach significantly reduces the bias by at least 30 W m&amp;amp;ndash;2, i.e., 20 % in relative value at each station. The RMSE also clearly reduces, by at least 30 W m&amp;amp;ndash;2, i.e., a reduction of at least 20 % in relative value. This work shows the way toward further improvements.</p>
	]]></content:encoded>

	<dc:title>Assessing Satellite-Based Data Products Estimating Daily Means of Solar Irradiance at Surface over South Cameroon Plateau and Potential Improvements</dc:title>
			<dc:creator>Delphin Aymar Ngah Onana</dc:creator>
			<dc:creator>Pascal Brice Owona Atangana</dc:creator>
			<dc:creator>Murielle Mbuko Tcheutchoua</dc:creator>
			<dc:creator>William Wandji Nyamsi</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091434</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1434</prism:startingPage>
		<prism:doi>10.3390/rs18091434</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1434</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1433">

	<title>Remote Sensing, Vol. 18, Pages 1433: Impact of Data Modality and Batch Normalization Layers on Very High-Resolution Impervious Surface Mapping Using DeepLabv3+ and U-Net Under Regional Cross-City and Cross-Season Domain Shifts</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1433</link>
	<description>Urban planning, climatology, or hydrology require continuous and spatially explicit information about impervious surfaces. Semantic segmentation using very high-resolution remote sensing data increased the performance of their detection. However, semantic segmentation models (SSMs) suffer from domains shifts when applied across cities or seasons. While domain adaptation (DA) techniques exist, the current literature provides little information on the level of sensitivity expected for baseline SSMs in mapping impervious surfaces in such scenarios. This study evaluates how data modality (e. g. spectral or height information) and adaptive batch normalization (AdaBN) affect the robustness of SSMs in cross-city and cross-season scenarios. Potsdam and Vaihingen benchmark datasets were used and merged into classes of impervious surfaces, buildings, and background. The impervious surface class was found to be the most sensitive to cross-domain shifts. Multimodal datasets and AdaBN increased model robustness, while in comparison, the impact of AdaBN was 3.46 percentage points lower regarding the mean intersection over union (mIoU). The combination of multimodal datasets and AdaBN exhibited the best results throughout the experiments, increasing mIoU by an additional 10.06 percentage points compared to the multimodal model versions. When DA techniques are unavailable, using multimodal datasets in combination with AdaBN holds a practical approach for cross-domain scenarios in impervious surface mapping.</description>
	<pubDate>2026-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1433: Impact of Data Modality and Batch Normalization Layers on Very High-Resolution Impervious Surface Mapping Using DeepLabv3+ and U-Net Under Regional Cross-City and Cross-Season Domain Shifts</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1433">doi: 10.3390/rs18091433</a></p>
	<p>Authors:
		Jan-Philipp Langenkamp
		Andreas Rienow
		</p>
	<p>Urban planning, climatology, or hydrology require continuous and spatially explicit information about impervious surfaces. Semantic segmentation using very high-resolution remote sensing data increased the performance of their detection. However, semantic segmentation models (SSMs) suffer from domains shifts when applied across cities or seasons. While domain adaptation (DA) techniques exist, the current literature provides little information on the level of sensitivity expected for baseline SSMs in mapping impervious surfaces in such scenarios. This study evaluates how data modality (e. g. spectral or height information) and adaptive batch normalization (AdaBN) affect the robustness of SSMs in cross-city and cross-season scenarios. Potsdam and Vaihingen benchmark datasets were used and merged into classes of impervious surfaces, buildings, and background. The impervious surface class was found to be the most sensitive to cross-domain shifts. Multimodal datasets and AdaBN increased model robustness, while in comparison, the impact of AdaBN was 3.46 percentage points lower regarding the mean intersection over union (mIoU). The combination of multimodal datasets and AdaBN exhibited the best results throughout the experiments, increasing mIoU by an additional 10.06 percentage points compared to the multimodal model versions. When DA techniques are unavailable, using multimodal datasets in combination with AdaBN holds a practical approach for cross-domain scenarios in impervious surface mapping.</p>
	]]></content:encoded>

	<dc:title>Impact of Data Modality and Batch Normalization Layers on Very High-Resolution Impervious Surface Mapping Using DeepLabv3+ and U-Net Under Regional Cross-City and Cross-Season Domain Shifts</dc:title>
			<dc:creator>Jan-Philipp Langenkamp</dc:creator>
			<dc:creator>Andreas Rienow</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091433</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-04</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-04</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1433</prism:startingPage>
		<prism:doi>10.3390/rs18091433</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1433</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1432">

	<title>Remote Sensing, Vol. 18, Pages 1432: DSFNet: A Directional Statistical Fusion Network for Cloud and Cloud Shadow Segmentation</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1432</link>
	<description>Accurate cloud and cloud shadow segmentation is a critical prerequisite for remote sensing image preprocessing. However, this task remains challenging due to the directional continuity of projected cloud shadows, the radiometric ambiguity between low-reflectance shadows and other dark surfaces, and the difficulty of preserving semantic consistency and fine boundaries in complex scenes. To address these issues, this paper proposes a Directional Statistical Fusion Network (DSFNet) based on an enhanced DeepLabV3+ architecture. Specifically, a Directional Scale Refinement Module (DSRM) is introduced in parallel with Atrous Spatial Pyramid Pooling to strengthen the representation of direction-sensitive cloud-shadow structures and multi-scale cloud regions. An Adaptive Statistical Context Attention (ASCA) module is further designed to perform robust feature modulation by jointly exploiting global statistics, edge-aware statistics, and median-based normalization, thereby suppressing anomalous responses under heterogeneous backgrounds. In the decoder, an Adaptive Grouped Multi-scale Fusion (AGMF) module is employed to adaptively fuse shallow detail features and high-level semantic features through discrepancy-guided grouped gating, improving structural consistency and boundary recovery. In addition, a hybrid loss is adopted to further optimize segmentation. Experiments on the GF1_WHU dataset show that DSFNet achieves 76.97% mIoU, demonstrating strong effectiveness and robustness in complex remote sensing scenes.</description>
	<pubDate>2026-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1432: DSFNet: A Directional Statistical Fusion Network for Cloud and Cloud Shadow Segmentation</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1432">doi: 10.3390/rs18091432</a></p>
	<p>Authors:
		Yuqi Fang
		Zhiyong Fan
		Min Xia
		Ni Li
		Xiaolin Yang
		</p>
	<p>Accurate cloud and cloud shadow segmentation is a critical prerequisite for remote sensing image preprocessing. However, this task remains challenging due to the directional continuity of projected cloud shadows, the radiometric ambiguity between low-reflectance shadows and other dark surfaces, and the difficulty of preserving semantic consistency and fine boundaries in complex scenes. To address these issues, this paper proposes a Directional Statistical Fusion Network (DSFNet) based on an enhanced DeepLabV3+ architecture. Specifically, a Directional Scale Refinement Module (DSRM) is introduced in parallel with Atrous Spatial Pyramid Pooling to strengthen the representation of direction-sensitive cloud-shadow structures and multi-scale cloud regions. An Adaptive Statistical Context Attention (ASCA) module is further designed to perform robust feature modulation by jointly exploiting global statistics, edge-aware statistics, and median-based normalization, thereby suppressing anomalous responses under heterogeneous backgrounds. In the decoder, an Adaptive Grouped Multi-scale Fusion (AGMF) module is employed to adaptively fuse shallow detail features and high-level semantic features through discrepancy-guided grouped gating, improving structural consistency and boundary recovery. In addition, a hybrid loss is adopted to further optimize segmentation. Experiments on the GF1_WHU dataset show that DSFNet achieves 76.97% mIoU, demonstrating strong effectiveness and robustness in complex remote sensing scenes.</p>
	]]></content:encoded>

	<dc:title>DSFNet: A Directional Statistical Fusion Network for Cloud and Cloud Shadow Segmentation</dc:title>
			<dc:creator>Yuqi Fang</dc:creator>
			<dc:creator>Zhiyong Fan</dc:creator>
			<dc:creator>Min Xia</dc:creator>
			<dc:creator>Ni Li</dc:creator>
			<dc:creator>Xiaolin Yang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091432</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-04</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-04</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1432</prism:startingPage>
		<prism:doi>10.3390/rs18091432</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1432</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1431">

	<title>Remote Sensing, Vol. 18, Pages 1431: Precision Estimation of Aboveground Carbon Stock in Acidosasa edulis Bamboo Forests: A Fusion Approach with UAV-LiDAR, Allometric Equations, and Machine Learning</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1431</link>
	<description>As a fast-growing and multifunctional crop, bamboo plays a pivotal role in food security and climate change mitigation by leveraging its high carbon sequestration potential. Monitoring aboveground carbon (AGC) stock in bamboo forests is crucial for guiding field management, growth observation, and yield prediction. Unmanned aerial vehicle (UAV)-based point cloud sensors offer a rapid and scalable solution for measuring bamboo AGC. This study evaluates the potential of UAV-LiDAR and machine learning (ML) for organ-level AGC estimation in bamboo forests. From LiDAR point clouds, we extracted structural features&amp;amp;mdash;including height, density, canopy, and intensity metrics&amp;amp;mdash;aggregated by mean plot-level metric (Mean-PM) and maximum plot-level metric (Max-PM) values at a 1 m2 grid scale. Key predictors were selected using ML-based recursive feature elimination (ML-RFE) to develop organ-specific AGC inversion models. Results showed that organ-specific carbon content and allometric equations effectively eliminated biases associated with a uniform coefficient. Max-PM features outperformed Mean-PM features in stem and leaf AGCs, with the XGBoost and Random Forest models achieving the highest accuracy (R2 = 0.82 for stems, 0.73 for leaves). Height percentiles and canopy structural metrics emerged as dominant predictors. This UAV-LiDAR-ML framework provides a cost-effective solution for precise bamboo carbon estimation, offering critical insights for carbon neutrality management and informed decision-making in bamboo forest ecosystems.</description>
	<pubDate>2026-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1431: Precision Estimation of Aboveground Carbon Stock in Acidosasa edulis Bamboo Forests: A Fusion Approach with UAV-LiDAR, Allometric Equations, and Machine Learning</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1431">doi: 10.3390/rs18091431</a></p>
	<p>Authors:
		Xiaoyu Guo
		Weisen Wang
		Zhanghua Xu
		Mingjing Li
		Kele Yang
		Yan Tan
		Ze Shi
		Haohao Yue
		Juncheng Zhang
		</p>
	<p>As a fast-growing and multifunctional crop, bamboo plays a pivotal role in food security and climate change mitigation by leveraging its high carbon sequestration potential. Monitoring aboveground carbon (AGC) stock in bamboo forests is crucial for guiding field management, growth observation, and yield prediction. Unmanned aerial vehicle (UAV)-based point cloud sensors offer a rapid and scalable solution for measuring bamboo AGC. This study evaluates the potential of UAV-LiDAR and machine learning (ML) for organ-level AGC estimation in bamboo forests. From LiDAR point clouds, we extracted structural features&amp;amp;mdash;including height, density, canopy, and intensity metrics&amp;amp;mdash;aggregated by mean plot-level metric (Mean-PM) and maximum plot-level metric (Max-PM) values at a 1 m2 grid scale. Key predictors were selected using ML-based recursive feature elimination (ML-RFE) to develop organ-specific AGC inversion models. Results showed that organ-specific carbon content and allometric equations effectively eliminated biases associated with a uniform coefficient. Max-PM features outperformed Mean-PM features in stem and leaf AGCs, with the XGBoost and Random Forest models achieving the highest accuracy (R2 = 0.82 for stems, 0.73 for leaves). Height percentiles and canopy structural metrics emerged as dominant predictors. This UAV-LiDAR-ML framework provides a cost-effective solution for precise bamboo carbon estimation, offering critical insights for carbon neutrality management and informed decision-making in bamboo forest ecosystems.</p>
	]]></content:encoded>

	<dc:title>Precision Estimation of Aboveground Carbon Stock in Acidosasa edulis Bamboo Forests: A Fusion Approach with UAV-LiDAR, Allometric Equations, and Machine Learning</dc:title>
			<dc:creator>Xiaoyu Guo</dc:creator>
			<dc:creator>Weisen Wang</dc:creator>
			<dc:creator>Zhanghua Xu</dc:creator>
			<dc:creator>Mingjing Li</dc:creator>
			<dc:creator>Kele Yang</dc:creator>
			<dc:creator>Yan Tan</dc:creator>
			<dc:creator>Ze Shi</dc:creator>
			<dc:creator>Haohao Yue</dc:creator>
			<dc:creator>Juncheng Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091431</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-04</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-04</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1431</prism:startingPage>
		<prism:doi>10.3390/rs18091431</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1431</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1430">

	<title>Remote Sensing, Vol. 18, Pages 1430: TVLightFormer: A Lightweight Cross-Modal Transformer for Language-Guided Target Localization in SAR Imagery</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1430</link>
	<description>We study language-guided target localization in synthetic aperture radar (SAR) imagery for deployment on resource-constrained platforms. Existing vision-language models either rely on heavy backbones unsuitable for edge devices or are designed for natural images, overlooking SAR-specific characteristics such as speckle noise, weak scattering responses, and geometric distortions. The proposed model, TVLightFormer, combines a lightweight dual-modal encoder (MobileNetV3 and TinyBERT) with a grouped-query attention (GQA) mechanism for efficient cross-modal interaction and an activation-free lightweight feature pyramid network (LFPN) to handle scale variation while preserving weak scattering signals. The individual modules are not claimed as newly invented components; the main contribution lies in their SAR-aware integration for edge-oriented cross-modal localization. We evaluate the model on five remote sensing datasets&amp;amp;mdash;SOMA-1M, ATRNet-STAR, GAIA, MLRSNet, and SODAS&amp;amp;mdash;under a unified localization setting, and we explicitly discuss the limitations introduced by weak or scene-level annotations. The results show that TVLightFormer achieves a favorable trade-off between accuracy and efficiency, reaching an average mIoU of 69.8% with 27.4 M parameters and 9.7 GFLOPs. Ablation studies quantify the contribution of each component. The model is suited for edge-oriented scenarios where computational resources are limited. We also provide a critical analysis of failure cases, SAR-specific disturbance factors, loss-function choices, and dataset-protocol sensitivity.</description>
	<pubDate>2026-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1430: TVLightFormer: A Lightweight Cross-Modal Transformer for Language-Guided Target Localization in SAR Imagery</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1430">doi: 10.3390/rs18091430</a></p>
	<p>Authors:
		Yuqiao Zhong
		Haoqi Quan
		Chenyu Nie
		Yingmei Wei
		Yanming Guo
		</p>
	<p>We study language-guided target localization in synthetic aperture radar (SAR) imagery for deployment on resource-constrained platforms. Existing vision-language models either rely on heavy backbones unsuitable for edge devices or are designed for natural images, overlooking SAR-specific characteristics such as speckle noise, weak scattering responses, and geometric distortions. The proposed model, TVLightFormer, combines a lightweight dual-modal encoder (MobileNetV3 and TinyBERT) with a grouped-query attention (GQA) mechanism for efficient cross-modal interaction and an activation-free lightweight feature pyramid network (LFPN) to handle scale variation while preserving weak scattering signals. The individual modules are not claimed as newly invented components; the main contribution lies in their SAR-aware integration for edge-oriented cross-modal localization. We evaluate the model on five remote sensing datasets&amp;amp;mdash;SOMA-1M, ATRNet-STAR, GAIA, MLRSNet, and SODAS&amp;amp;mdash;under a unified localization setting, and we explicitly discuss the limitations introduced by weak or scene-level annotations. The results show that TVLightFormer achieves a favorable trade-off between accuracy and efficiency, reaching an average mIoU of 69.8% with 27.4 M parameters and 9.7 GFLOPs. Ablation studies quantify the contribution of each component. The model is suited for edge-oriented scenarios where computational resources are limited. We also provide a critical analysis of failure cases, SAR-specific disturbance factors, loss-function choices, and dataset-protocol sensitivity.</p>
	]]></content:encoded>

	<dc:title>TVLightFormer: A Lightweight Cross-Modal Transformer for Language-Guided Target Localization in SAR Imagery</dc:title>
			<dc:creator>Yuqiao Zhong</dc:creator>
			<dc:creator>Haoqi Quan</dc:creator>
			<dc:creator>Chenyu Nie</dc:creator>
			<dc:creator>Yingmei Wei</dc:creator>
			<dc:creator>Yanming Guo</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091430</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-04</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-04</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1430</prism:startingPage>
		<prism:doi>10.3390/rs18091430</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1430</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1429">

	<title>Remote Sensing, Vol. 18, Pages 1429: Attribution of Evapotranspiration Variation in the Yellow River Basin with a Simplified Water&amp;ndash;Energy Partitioning Method Based on Multi-Source Datasets</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1429</link>
	<description>Terrestrial evapotranspiration (ET) serves as a critical nexus between the hydrological cycle and energy process, which is highly sensitive to climate change (CC) and underlying characteristic change (ULCC), particularly in the regions with rapid environmental changes. This study designed a data combination scheme for investigating the ET variation and quantifying its drivers in the Yellow River Basin (YRB), using a simplified water&amp;amp;ndash;energy partitioning (WEP) method based on nine multi-source ET, precipitation and potential ET datasets. Results reveal that all ET datasets demonstrate significant increasing trends with the rates of 0.82&amp;amp;ndash;2.04 mm/yr2 during the period of 1982&amp;amp;ndash;2022, and the ET increments are 13.4&amp;amp;ndash;45.2 mm/yr from the base period (1982&amp;amp;ndash;2000) to the change period (2001&amp;amp;ndash;2022). For the whole YRB, ULCC has slightly larger averaged absolute and relative contribution (15.8 mm/yr and 52.9%) than those of CC (12.2 mm/yr and 47.1%) to ET increases among the different dataset triplets. For most sub-basins, ULCC exhibits higher contributions than CC, with relative contributions of nearly two-thirds, although considerable variabilities exist in their absolute contributions. However, the opposite results occur in the source region of the YRB, where CC has a primary contribution to ET variation. In summary, while ULCC is the primary driver of ET increases, its estimated contributions entail substantial uncertainty. In contrast, CC acts as a secondary driver, exhibiting greater robustness and lower sensitivity to multi-source dataset variability. This study provides a valuable multi-source-dataset-based ET attribution framework with the WEP method that advances our understanding of hydrological responses to the changing environment in the YRB.</description>
	<pubDate>2026-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1429: Attribution of Evapotranspiration Variation in the Yellow River Basin with a Simplified Water&amp;ndash;Energy Partitioning Method Based on Multi-Source Datasets</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1429">doi: 10.3390/rs18091429</a></p>
	<p>Authors:
		Dayang Wang
		Yanyu Ma
		Ya Huang
		Kaihao Long
		Shaobo Liu
		Xiaohang Ma
		Minghao Song
		Zequn Lin
		</p>
	<p>Terrestrial evapotranspiration (ET) serves as a critical nexus between the hydrological cycle and energy process, which is highly sensitive to climate change (CC) and underlying characteristic change (ULCC), particularly in the regions with rapid environmental changes. This study designed a data combination scheme for investigating the ET variation and quantifying its drivers in the Yellow River Basin (YRB), using a simplified water&amp;amp;ndash;energy partitioning (WEP) method based on nine multi-source ET, precipitation and potential ET datasets. Results reveal that all ET datasets demonstrate significant increasing trends with the rates of 0.82&amp;amp;ndash;2.04 mm/yr2 during the period of 1982&amp;amp;ndash;2022, and the ET increments are 13.4&amp;amp;ndash;45.2 mm/yr from the base period (1982&amp;amp;ndash;2000) to the change period (2001&amp;amp;ndash;2022). For the whole YRB, ULCC has slightly larger averaged absolute and relative contribution (15.8 mm/yr and 52.9%) than those of CC (12.2 mm/yr and 47.1%) to ET increases among the different dataset triplets. For most sub-basins, ULCC exhibits higher contributions than CC, with relative contributions of nearly two-thirds, although considerable variabilities exist in their absolute contributions. However, the opposite results occur in the source region of the YRB, where CC has a primary contribution to ET variation. In summary, while ULCC is the primary driver of ET increases, its estimated contributions entail substantial uncertainty. In contrast, CC acts as a secondary driver, exhibiting greater robustness and lower sensitivity to multi-source dataset variability. This study provides a valuable multi-source-dataset-based ET attribution framework with the WEP method that advances our understanding of hydrological responses to the changing environment in the YRB.</p>
	]]></content:encoded>

	<dc:title>Attribution of Evapotranspiration Variation in the Yellow River Basin with a Simplified Water&amp;amp;ndash;Energy Partitioning Method Based on Multi-Source Datasets</dc:title>
			<dc:creator>Dayang Wang</dc:creator>
			<dc:creator>Yanyu Ma</dc:creator>
			<dc:creator>Ya Huang</dc:creator>
			<dc:creator>Kaihao Long</dc:creator>
			<dc:creator>Shaobo Liu</dc:creator>
			<dc:creator>Xiaohang Ma</dc:creator>
			<dc:creator>Minghao Song</dc:creator>
			<dc:creator>Zequn Lin</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091429</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-04</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-04</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1429</prism:startingPage>
		<prism:doi>10.3390/rs18091429</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1429</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1428">

	<title>Remote Sensing, Vol. 18, Pages 1428: Understanding the Optical Behavior and Spectral Signature of Dredging-Induced Plumes in Coastal Waters</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1428</link>
	<description>Dredging activities regularly occurring in near-shore and coastal waters generate turbid waters within the surface layer with high concentrations of suspended particulate matter collected in bottom sediments. The potential impact of these dredge plumes on natural ecosystems must be monitored using cost-effective methods and observations. Here, we investigate the biogeochemical and optical properties of dredge plumes selected mainly in European and African coastal waters. Laboratory analyses realized on numerous water samples collected in dredge plumes reveal (extremely) high water turbidity and high concentrations of inorganic particles in suspension, sometimes mixed with high concentrations of phytoplankton particles. The most peculiar optical property of these particles is a spectral light absorption coefficient significantly flatter than that of suspended particles in natural turbid waters (e.g., river plumes or estuarine maximum turbidity zones). This peculiar optical property is also detected on ocean color satellite data corrected for atmospheric effects, with a water reflectance signal higher than natural turbid waters at short visible wavebands (400&amp;amp;ndash;550 nm). Such an atypical spectral signature, which can be detected and mapped from space, makes the operational monitoring of dredge plumes in coastal waters using high-spatial-resolution (e.g., Sentinel2-MSI) satellite data possible.</description>
	<pubDate>2026-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1428: Understanding the Optical Behavior and Spectral Signature of Dredging-Induced Plumes in Coastal Waters</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1428">doi: 10.3390/rs18091428</a></p>
	<p>Authors:
		David Doxaran
		Isabella Mayot
		Liesbeth De Keukelaere
		Robrecht Moelans
		Niels Verdoodt
		Els Knaeps
		</p>
	<p>Dredging activities regularly occurring in near-shore and coastal waters generate turbid waters within the surface layer with high concentrations of suspended particulate matter collected in bottom sediments. The potential impact of these dredge plumes on natural ecosystems must be monitored using cost-effective methods and observations. Here, we investigate the biogeochemical and optical properties of dredge plumes selected mainly in European and African coastal waters. Laboratory analyses realized on numerous water samples collected in dredge plumes reveal (extremely) high water turbidity and high concentrations of inorganic particles in suspension, sometimes mixed with high concentrations of phytoplankton particles. The most peculiar optical property of these particles is a spectral light absorption coefficient significantly flatter than that of suspended particles in natural turbid waters (e.g., river plumes or estuarine maximum turbidity zones). This peculiar optical property is also detected on ocean color satellite data corrected for atmospheric effects, with a water reflectance signal higher than natural turbid waters at short visible wavebands (400&amp;amp;ndash;550 nm). Such an atypical spectral signature, which can be detected and mapped from space, makes the operational monitoring of dredge plumes in coastal waters using high-spatial-resolution (e.g., Sentinel2-MSI) satellite data possible.</p>
	]]></content:encoded>

	<dc:title>Understanding the Optical Behavior and Spectral Signature of Dredging-Induced Plumes in Coastal Waters</dc:title>
			<dc:creator>David Doxaran</dc:creator>
			<dc:creator>Isabella Mayot</dc:creator>
			<dc:creator>Liesbeth De Keukelaere</dc:creator>
			<dc:creator>Robrecht Moelans</dc:creator>
			<dc:creator>Niels Verdoodt</dc:creator>
			<dc:creator>Els Knaeps</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091428</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-04</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-04</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1428</prism:startingPage>
		<prism:doi>10.3390/rs18091428</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1428</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1427">

	<title>Remote Sensing, Vol. 18, Pages 1427: SCNAnet: Structure-Aware Contrastive with Noise-Augmented Network for Unsupervised Change Detection</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1427</link>
	<description>Unsupervised change detection (UCD) is a key technique in Earth observation, aiming to identify and quantify surface changes over time by analyzing multi-temporal remote sensing images without manual annotations. Unlike supervised approaches that rely on ground reference to directly guide discriminative semantic learning, UCD methods must construct their own reference. A mainstream strategy employs one temporal image as the reference and uses transformation models (e.g., style transfer networks) to align the other image in unchanged regions. Loss is then reduced by labeling hard-to-align pixels as &amp;amp;ldquo;changes&amp;amp;rdquo; and excluding them from the objective. However, this optimization process is dominated by style losses, which cause the model to learn to exclude regions that make only limited contributions to style-loss minimization, rather than to acquire discriminative representations of true geospatial changes. Such shortcut-driven optimization results in insufficient modeling of genuine change features and frequent misclassification of unchanged yet stylistically similar regions. To address these limitations, we propose SCNAnet, a novel framework that integrates three modules: a noise-perturbation consistency branch to suppress shortcut-driven learning, a structure-aware style transformation encoder to strengthen semantic representations of structural changes, and a frequency-attention decoder to refine the delineation of change regions. Extensive experiments on three benchmark datasets (GF-2, OSCD, and QuickBird) demonstrate the effectiveness of SCNAnet. Specifically, SCNAnet improves the F1 score by approximately 8% on the Montpellier dataset compared with the second-best method, demonstrating its effectiveness under challenging conditions.</description>
	<pubDate>2026-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1427: SCNAnet: Structure-Aware Contrastive with Noise-Augmented Network for Unsupervised Change Detection</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1427">doi: 10.3390/rs18091427</a></p>
	<p>Authors:
		Yijie Sun
		Qingxi Wu
		Nan Wang
		</p>
	<p>Unsupervised change detection (UCD) is a key technique in Earth observation, aiming to identify and quantify surface changes over time by analyzing multi-temporal remote sensing images without manual annotations. Unlike supervised approaches that rely on ground reference to directly guide discriminative semantic learning, UCD methods must construct their own reference. A mainstream strategy employs one temporal image as the reference and uses transformation models (e.g., style transfer networks) to align the other image in unchanged regions. Loss is then reduced by labeling hard-to-align pixels as &amp;amp;ldquo;changes&amp;amp;rdquo; and excluding them from the objective. However, this optimization process is dominated by style losses, which cause the model to learn to exclude regions that make only limited contributions to style-loss minimization, rather than to acquire discriminative representations of true geospatial changes. Such shortcut-driven optimization results in insufficient modeling of genuine change features and frequent misclassification of unchanged yet stylistically similar regions. To address these limitations, we propose SCNAnet, a novel framework that integrates three modules: a noise-perturbation consistency branch to suppress shortcut-driven learning, a structure-aware style transformation encoder to strengthen semantic representations of structural changes, and a frequency-attention decoder to refine the delineation of change regions. Extensive experiments on three benchmark datasets (GF-2, OSCD, and QuickBird) demonstrate the effectiveness of SCNAnet. Specifically, SCNAnet improves the F1 score by approximately 8% on the Montpellier dataset compared with the second-best method, demonstrating its effectiveness under challenging conditions.</p>
	]]></content:encoded>

	<dc:title>SCNAnet: Structure-Aware Contrastive with Noise-Augmented Network for Unsupervised Change Detection</dc:title>
			<dc:creator>Yijie Sun</dc:creator>
			<dc:creator>Qingxi Wu</dc:creator>
			<dc:creator>Nan Wang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091427</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-04</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-04</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1427</prism:startingPage>
		<prism:doi>10.3390/rs18091427</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1427</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1426">

	<title>Remote Sensing, Vol. 18, Pages 1426: Long-Term Assessment of Inter-Sensor Radiometric Biases Among SNPP, NOAA-20, NOAA-21 ATMS, and NOAA-19 AMSU-A Instruments Using the NOAA ICVS Framework</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1426</link>
	<description>This study evaluates mission-long inter-sensor radiometric calibration biases in Sensor Data Record (SDR) and/or Temperature Data Record (TDR) radiances from NOAA microwave sounders, including Advanced Technology Microwave Sounder (ATMS) (Suomi National Polar-orbiting Partnership or SNPP, NOAA-20, NOAA-21) and Advanced Microwave Sounding Unit-A (AMSU-A) (NOAA-19). Using four complementary validation techniques within the Inter-Sensor Radiometric Bias Assessment (iSensor-RCBA) system&amp;amp;mdash;32-day averaging, Community Radiative Transfer Model (CRTM) Double Difference (DD), Simultaneously Nadir Overpass (SNO), and sensor-DD via SNO&amp;amp;mdash;we characterize long-term performance. Results indicate that the SDR/TDR radiance quality remains stable and generally meets scientific requirements throughout their operational lifetimes with minimal anomalies; observed anomalies were infrequent and primarily correlated with calibration-table updates or spacecraft events or instrument degradation. Moreover, this research examines how radiometric calibration biases for the three ATMS instruments vary with Earth scene radiance or temperatures using the CRTM and SNO methods, as well as the radiance-dependency of inter-sensor calibration biases across the three instruments. Notably, due to its exceptional stability over 14 years, despite an approximate two-month data gap, the SNPP ATMS TDR and SDR datasets are recommended as the ideal reference to link legacy AMSU-A and Microwave Humidity Sounder (MHS) with Joint Polar Satellite System (JPSS), QuickSounder, and MetOp-Second Generation (MetOp-SG) microwave instruments. Beyond quantifying data quality, our multi-method framework with iSensor-RCBA effectively diagnosed critical issues, including a simulation error for CRTM ATMS radiance related to the CRTM spectral-response approximation and a NOAA-19 AMSU-A channel-8 performance anomaly. These findings confirm the long-term integrity of NOAA microwave sounder records and reinforce the value of integrated cross-sensor calibration assessments.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1426: Long-Term Assessment of Inter-Sensor Radiometric Biases Among SNPP, NOAA-20, NOAA-21 ATMS, and NOAA-19 AMSU-A Instruments Using the NOAA ICVS Framework</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1426">doi: 10.3390/rs18091426</a></p>
	<p>Authors:
		Banghua Yan
		Ninghai Sun
		Flavio Iturbide-Sanchez
		Changyong Cao
		Lihang Zhou
		</p>
	<p>This study evaluates mission-long inter-sensor radiometric calibration biases in Sensor Data Record (SDR) and/or Temperature Data Record (TDR) radiances from NOAA microwave sounders, including Advanced Technology Microwave Sounder (ATMS) (Suomi National Polar-orbiting Partnership or SNPP, NOAA-20, NOAA-21) and Advanced Microwave Sounding Unit-A (AMSU-A) (NOAA-19). Using four complementary validation techniques within the Inter-Sensor Radiometric Bias Assessment (iSensor-RCBA) system&amp;amp;mdash;32-day averaging, Community Radiative Transfer Model (CRTM) Double Difference (DD), Simultaneously Nadir Overpass (SNO), and sensor-DD via SNO&amp;amp;mdash;we characterize long-term performance. Results indicate that the SDR/TDR radiance quality remains stable and generally meets scientific requirements throughout their operational lifetimes with minimal anomalies; observed anomalies were infrequent and primarily correlated with calibration-table updates or spacecraft events or instrument degradation. Moreover, this research examines how radiometric calibration biases for the three ATMS instruments vary with Earth scene radiance or temperatures using the CRTM and SNO methods, as well as the radiance-dependency of inter-sensor calibration biases across the three instruments. Notably, due to its exceptional stability over 14 years, despite an approximate two-month data gap, the SNPP ATMS TDR and SDR datasets are recommended as the ideal reference to link legacy AMSU-A and Microwave Humidity Sounder (MHS) with Joint Polar Satellite System (JPSS), QuickSounder, and MetOp-Second Generation (MetOp-SG) microwave instruments. Beyond quantifying data quality, our multi-method framework with iSensor-RCBA effectively diagnosed critical issues, including a simulation error for CRTM ATMS radiance related to the CRTM spectral-response approximation and a NOAA-19 AMSU-A channel-8 performance anomaly. These findings confirm the long-term integrity of NOAA microwave sounder records and reinforce the value of integrated cross-sensor calibration assessments.</p>
	]]></content:encoded>

	<dc:title>Long-Term Assessment of Inter-Sensor Radiometric Biases Among SNPP, NOAA-20, NOAA-21 ATMS, and NOAA-19 AMSU-A Instruments Using the NOAA ICVS Framework</dc:title>
			<dc:creator>Banghua Yan</dc:creator>
			<dc:creator>Ninghai Sun</dc:creator>
			<dc:creator>Flavio Iturbide-Sanchez</dc:creator>
			<dc:creator>Changyong Cao</dc:creator>
			<dc:creator>Lihang Zhou</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091426</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1426</prism:startingPage>
		<prism:doi>10.3390/rs18091426</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1426</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1425">

	<title>Remote Sensing, Vol. 18, Pages 1425: Global Flood Vulnerability Model: Building-Level Assessment Using Multi-Source Remote Sensing</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1425</link>
	<description>Remote sensing enables building-level flood vulnerability assessment without field surveys, yet existing approaches require site-specific calibration or produce categorical outputs without physical interpretability. We present the Global Flood Vulnerability Model (GFVM), integrating six remotely sensed components (elevation, slope, topographic position index, distance to water, building height, and basement depth) through geographic context classification to quantify vulnerability from terrain and structural characteristics across coastal, fluvial, and pluvial settings. Building heights are extracted primarily from the Global Building Atlas, with gaps filled using a ConvNeXt neural network trained on high-resolution Light Detection and Ranging (LiDAR) ground truth from four cities (within-city MAE 1.35&amp;amp;ndash;1.91 m, cross-city MAE 2.05&amp;amp;ndash;3.47 m). Terrain metrics are derived from a combination of hierarchical digital elevation models (DEM) (USGS 3DEP 10 m, AHN LiDAR 0.5 m, UK Environment Agency DTM 1 m, Australia 5 m) and global datasets (NASADEM 30 m, Copernicus GLO-30). Hydrographic networks are sourced from OpenStreetMap and Natural Earth. Implementation through Google Earth Engine requires only coordinates as input, returning a five-level vulnerability index with multi-hazard decomposition (fluvial, coastal, pluvial) and SHapley Additive exPlanations (SHAP)-based attribution identifying dominant drivers. Validation across 183 independent locations in Germany, UK, and USA demonstrates robust performance: Area Under Curve 0.855 for separating flooded from non-flooded sites, weighted Cohen&amp;amp;rsquo;s kappa 0.493 across regulatory zones, and Spearman &amp;amp;rho; 0.746 against Federal Emergency Management Agency (FEMA) classifications. Sensitivity analysis across 625 parameter configurations confirms stability, and DEM resolution experiments show that global 30 m elevation data produces category reclassification in only 5.3&amp;amp;ndash;8.6% of locations compared to high-resolution sources. Application to the 2024 Kazakhstan floods identifies 118 high-vulnerability locations across 581 assessment points, with vulnerability patterns matching documented inundation. GFVM advances remote sensing applications for disaster risk assessment by demonstrating that multi-source geospatial data fusion enables building-level vulnerability screening without local calibration or field surveys.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1425: Global Flood Vulnerability Model: Building-Level Assessment Using Multi-Source Remote Sensing</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1425">doi: 10.3390/rs18091425</a></p>
	<p>Authors:
		Sakiru Olarewaju Olagunju
		Ademi Sharipova
		Adina Serikkyzy
		Dariga Satybaldiyeva
		Huseyin Atakan Varol
		Ferhat Karaca
		</p>
	<p>Remote sensing enables building-level flood vulnerability assessment without field surveys, yet existing approaches require site-specific calibration or produce categorical outputs without physical interpretability. We present the Global Flood Vulnerability Model (GFVM), integrating six remotely sensed components (elevation, slope, topographic position index, distance to water, building height, and basement depth) through geographic context classification to quantify vulnerability from terrain and structural characteristics across coastal, fluvial, and pluvial settings. Building heights are extracted primarily from the Global Building Atlas, with gaps filled using a ConvNeXt neural network trained on high-resolution Light Detection and Ranging (LiDAR) ground truth from four cities (within-city MAE 1.35&amp;amp;ndash;1.91 m, cross-city MAE 2.05&amp;amp;ndash;3.47 m). Terrain metrics are derived from a combination of hierarchical digital elevation models (DEM) (USGS 3DEP 10 m, AHN LiDAR 0.5 m, UK Environment Agency DTM 1 m, Australia 5 m) and global datasets (NASADEM 30 m, Copernicus GLO-30). Hydrographic networks are sourced from OpenStreetMap and Natural Earth. Implementation through Google Earth Engine requires only coordinates as input, returning a five-level vulnerability index with multi-hazard decomposition (fluvial, coastal, pluvial) and SHapley Additive exPlanations (SHAP)-based attribution identifying dominant drivers. Validation across 183 independent locations in Germany, UK, and USA demonstrates robust performance: Area Under Curve 0.855 for separating flooded from non-flooded sites, weighted Cohen&amp;amp;rsquo;s kappa 0.493 across regulatory zones, and Spearman &amp;amp;rho; 0.746 against Federal Emergency Management Agency (FEMA) classifications. Sensitivity analysis across 625 parameter configurations confirms stability, and DEM resolution experiments show that global 30 m elevation data produces category reclassification in only 5.3&amp;amp;ndash;8.6% of locations compared to high-resolution sources. Application to the 2024 Kazakhstan floods identifies 118 high-vulnerability locations across 581 assessment points, with vulnerability patterns matching documented inundation. GFVM advances remote sensing applications for disaster risk assessment by demonstrating that multi-source geospatial data fusion enables building-level vulnerability screening without local calibration or field surveys.</p>
	]]></content:encoded>

	<dc:title>Global Flood Vulnerability Model: Building-Level Assessment Using Multi-Source Remote Sensing</dc:title>
			<dc:creator>Sakiru Olarewaju Olagunju</dc:creator>
			<dc:creator>Ademi Sharipova</dc:creator>
			<dc:creator>Adina Serikkyzy</dc:creator>
			<dc:creator>Dariga Satybaldiyeva</dc:creator>
			<dc:creator>Huseyin Atakan Varol</dc:creator>
			<dc:creator>Ferhat Karaca</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091425</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1425</prism:startingPage>
		<prism:doi>10.3390/rs18091425</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1425</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1424">

	<title>Remote Sensing, Vol. 18, Pages 1424: Long-Term Surface Uplift Driven by Groundwater Recovery in Xi&amp;rsquo;an, China: InSAR Constraints on Aquifer Storage and Hydraulic Diffusivity</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1424</link>
	<description>Vertical land motion in urban areas is a critical manifestation of groundwater, directly affecting infrastructure stability and groundwater sustainability. While land subsidence caused by groundwater extraction has been widely investigated, the opposite process&amp;amp;mdash;surface uplift induced by groundwater recovery&amp;amp;mdash;remains poorly documented or understood, particularly regarding its hydrological mechanisms and potential hazards. Here, we integrate InSAR time-series analysis of Sentinel-1 imagery (2017&amp;amp;ndash;2025) with groundwater well records to quantify the spatial&amp;amp;ndash;temporal characteristics of uplift in Xi&amp;amp;rsquo;an, China, and to evaluate its hydrogeological drivers. Results reveal a persistent surface uplift zone south of the ancient city in Xi&amp;amp;rsquo;an, with rates up to 20 mm/yr. The uplift correlates closely with rising groundwater levels in the shallow confined aquifer, indicating a strong coupling between aquifer recharge and surface uplift. Calculated storage coefficients and hydraulic diffusivity values highlight marked spatial variations, constrained by some ground fissures that act as both mechanical discontinuities and hydrological barriers controlling pressure diffusion. Time-series analysis further identifies the eastward propagation of subsidence-to-uplift reversal in Yuhuazhai, an urban village with groundwater injection, which is used to quantify the diffusivity coefficients. Field investigations show that rapid groundwater rebound can lead to uplift-related hazards, such as basement seepage, underscoring that surface uplift must be considered alongside subsidence in urban water management.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1424: Long-Term Surface Uplift Driven by Groundwater Recovery in Xi&amp;rsquo;an, China: InSAR Constraints on Aquifer Storage and Hydraulic Diffusivity</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1424">doi: 10.3390/rs18091424</a></p>
	<p>Authors:
		Weilai Sun
		Rongrong Zhou
		Xiaojuan Wu
		Teng Wang
		</p>
	<p>Vertical land motion in urban areas is a critical manifestation of groundwater, directly affecting infrastructure stability and groundwater sustainability. While land subsidence caused by groundwater extraction has been widely investigated, the opposite process&amp;amp;mdash;surface uplift induced by groundwater recovery&amp;amp;mdash;remains poorly documented or understood, particularly regarding its hydrological mechanisms and potential hazards. Here, we integrate InSAR time-series analysis of Sentinel-1 imagery (2017&amp;amp;ndash;2025) with groundwater well records to quantify the spatial&amp;amp;ndash;temporal characteristics of uplift in Xi&amp;amp;rsquo;an, China, and to evaluate its hydrogeological drivers. Results reveal a persistent surface uplift zone south of the ancient city in Xi&amp;amp;rsquo;an, with rates up to 20 mm/yr. The uplift correlates closely with rising groundwater levels in the shallow confined aquifer, indicating a strong coupling between aquifer recharge and surface uplift. Calculated storage coefficients and hydraulic diffusivity values highlight marked spatial variations, constrained by some ground fissures that act as both mechanical discontinuities and hydrological barriers controlling pressure diffusion. Time-series analysis further identifies the eastward propagation of subsidence-to-uplift reversal in Yuhuazhai, an urban village with groundwater injection, which is used to quantify the diffusivity coefficients. Field investigations show that rapid groundwater rebound can lead to uplift-related hazards, such as basement seepage, underscoring that surface uplift must be considered alongside subsidence in urban water management.</p>
	]]></content:encoded>

	<dc:title>Long-Term Surface Uplift Driven by Groundwater Recovery in Xi&amp;amp;rsquo;an, China: InSAR Constraints on Aquifer Storage and Hydraulic Diffusivity</dc:title>
			<dc:creator>Weilai Sun</dc:creator>
			<dc:creator>Rongrong Zhou</dc:creator>
			<dc:creator>Xiaojuan Wu</dc:creator>
			<dc:creator>Teng Wang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091424</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1424</prism:startingPage>
		<prism:doi>10.3390/rs18091424</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1424</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1423">

	<title>Remote Sensing, Vol. 18, Pages 1423: A Hierarchical Multi-Scale Denoising Framework for UAV-Derived Digital Subsidence Models in Coal Mining Areas</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1423</link>
	<description>Mining-induced subsidence monitoring is essential for safe coal production and ecological protection in mining areas. UAV photogrammetry has become a widely adopted technique for constructing Digital Subsidence Models (DSuM); however, multi-scale composite noise significantly limits model accuracy and parameter extraction reliability. Taking the 2S201 working face of Wangjiata Coal Mine in a western arid&amp;amp;ndash;semi-arid region as the study area, this study systematically investigates DSuM noise characteristics and proposes a hierarchical multi-scale denoising framework. First, subsidence value interval stratification is employed to analyze the spatial distribution of noise. Based on this analysis, a two-stage strategy is developed. In the first stage, large-scale outliers are identified and removed using an improved DBSCAN algorithm with empirically calibrated and density-adaptive parameter computation. In the second stage, small-scale mixed noise is suppressed through a curvature-adaptive multi-stage denoising method. Validation using 20 ground monitoring points demonstrates that the RMSE decreases from 154 mm to 86 mm after large-scale denoising and further to 59 mm, achieving a 61.5% overall accuracy improvement. The denoised model exhibits enhanced surface continuity, smoother deformation profiles, and clearer subsidence boundaries while preserving overall deformation trends. The proposed framework effectively improves DSuM geometric accuracy and spatial consistency, providing reliable technical support for subsidence monitoring with improved accuracy in complex mining environments.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1423: A Hierarchical Multi-Scale Denoising Framework for UAV-Derived Digital Subsidence Models in Coal Mining Areas</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1423">doi: 10.3390/rs18091423</a></p>
	<p>Authors:
		Xi Zhang
		Jiazheng Han
		Zhanjie Feng
		Lingtong Meng
		Ruihao Cui
		Zhenqi Hu
		</p>
	<p>Mining-induced subsidence monitoring is essential for safe coal production and ecological protection in mining areas. UAV photogrammetry has become a widely adopted technique for constructing Digital Subsidence Models (DSuM); however, multi-scale composite noise significantly limits model accuracy and parameter extraction reliability. Taking the 2S201 working face of Wangjiata Coal Mine in a western arid&amp;amp;ndash;semi-arid region as the study area, this study systematically investigates DSuM noise characteristics and proposes a hierarchical multi-scale denoising framework. First, subsidence value interval stratification is employed to analyze the spatial distribution of noise. Based on this analysis, a two-stage strategy is developed. In the first stage, large-scale outliers are identified and removed using an improved DBSCAN algorithm with empirically calibrated and density-adaptive parameter computation. In the second stage, small-scale mixed noise is suppressed through a curvature-adaptive multi-stage denoising method. Validation using 20 ground monitoring points demonstrates that the RMSE decreases from 154 mm to 86 mm after large-scale denoising and further to 59 mm, achieving a 61.5% overall accuracy improvement. The denoised model exhibits enhanced surface continuity, smoother deformation profiles, and clearer subsidence boundaries while preserving overall deformation trends. The proposed framework effectively improves DSuM geometric accuracy and spatial consistency, providing reliable technical support for subsidence monitoring with improved accuracy in complex mining environments.</p>
	]]></content:encoded>

	<dc:title>A Hierarchical Multi-Scale Denoising Framework for UAV-Derived Digital Subsidence Models in Coal Mining Areas</dc:title>
			<dc:creator>Xi Zhang</dc:creator>
			<dc:creator>Jiazheng Han</dc:creator>
			<dc:creator>Zhanjie Feng</dc:creator>
			<dc:creator>Lingtong Meng</dc:creator>
			<dc:creator>Ruihao Cui</dc:creator>
			<dc:creator>Zhenqi Hu</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091423</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1423</prism:startingPage>
		<prism:doi>10.3390/rs18091423</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1423</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1422">

	<title>Remote Sensing, Vol. 18, Pages 1422: DFS-YOLO: A Dynamic Feature Collaboration and State Space Framework for UAV-Based Infrared Object Detection</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1422</link>
	<description>UAV-based infrared target detection presents inherent challenges, including low signal-to-noise ratios, texture degradation, and severe scale variations. To address these issues, we propose DFS-YOLO, an approach based on dynamic feature collaboration and efficient state-space modeling. We introduce a Dynamic Range-Calibrated Area Attention (DRCAA) module in the backbone to stabilize feature activations under strong thermal clutter. Within the neck architecture, an Efficient Attentional Scale-Sequence Fusion (EASF) strategy reduces cross-scale semantic misalignment and ensures precise spatial coherence. Additionally, an EfficientViM-based state-space module captures global contextual dependencies while maintaining linear computational complexity. Finally, the Content-Guided Triple-Attention Fusion (CGTAFusion) module maximizes feature discriminability by calibrating fusion representations across the channel, spatial, and pixel dimensions. Extensive experiments on the HIT-UAV and IRSTD-1k benchmarks validate the efficacy of the DFS-YOLO framework. Compared to the baseline YOLOv12, DFS-YOLO&amp;amp;rsquo;s performance has been significantly improved, increasing mAP@50 and mAP@50-95 by 10.16% and 7.55% on HIT-UAV, and by 1.84% and 3.18% on IRSTD-1k, respectively. These quantitative gains establish DFS-YOLO as a highly robust and state-of-the-art solution for complex infrared aerial surveillance.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1422: DFS-YOLO: A Dynamic Feature Collaboration and State Space Framework for UAV-Based Infrared Object Detection</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1422">doi: 10.3390/rs18091422</a></p>
	<p>Authors:
		Ziyan Wang
		Wangbin Li
		Kaimin Sun
		</p>
	<p>UAV-based infrared target detection presents inherent challenges, including low signal-to-noise ratios, texture degradation, and severe scale variations. To address these issues, we propose DFS-YOLO, an approach based on dynamic feature collaboration and efficient state-space modeling. We introduce a Dynamic Range-Calibrated Area Attention (DRCAA) module in the backbone to stabilize feature activations under strong thermal clutter. Within the neck architecture, an Efficient Attentional Scale-Sequence Fusion (EASF) strategy reduces cross-scale semantic misalignment and ensures precise spatial coherence. Additionally, an EfficientViM-based state-space module captures global contextual dependencies while maintaining linear computational complexity. Finally, the Content-Guided Triple-Attention Fusion (CGTAFusion) module maximizes feature discriminability by calibrating fusion representations across the channel, spatial, and pixel dimensions. Extensive experiments on the HIT-UAV and IRSTD-1k benchmarks validate the efficacy of the DFS-YOLO framework. Compared to the baseline YOLOv12, DFS-YOLO&amp;amp;rsquo;s performance has been significantly improved, increasing mAP@50 and mAP@50-95 by 10.16% and 7.55% on HIT-UAV, and by 1.84% and 3.18% on IRSTD-1k, respectively. These quantitative gains establish DFS-YOLO as a highly robust and state-of-the-art solution for complex infrared aerial surveillance.</p>
	]]></content:encoded>

	<dc:title>DFS-YOLO: A Dynamic Feature Collaboration and State Space Framework for UAV-Based Infrared Object Detection</dc:title>
			<dc:creator>Ziyan Wang</dc:creator>
			<dc:creator>Wangbin Li</dc:creator>
			<dc:creator>Kaimin Sun</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091422</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1422</prism:startingPage>
		<prism:doi>10.3390/rs18091422</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1422</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1419">

	<title>Remote Sensing, Vol. 18, Pages 1419: Assessing GAN Super-Resolution in Grasslands: The Role of Spatial Heterogeneity and Textural Complexity</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1419</link>
	<description>High-resolution imagery is essential for monitoring heterogeneous grassland ecosystems, yet the performance of generative adversarial network (GAN) super-resolution under varying landscape heterogeneity and operational application scenarios remains unclear. This study presents a landscape-aware evaluation of super-resolution methods in semi-arid savanna grasslands of the Edwards Plateau (Texas, USA) using paired multispectral imagery from PlanetScope (3 m) and unmanned aerial vehicle (UAV) platforms (0.03 m). Two GAN models, SRGAN and ESRGAN, were compared with a bicubic interpolation baseline. Image tiles were systematically stratified along ecologically relevant gradients of vegetation condition (NDVI quartiles), spatial structure (woody patch-based clusters), and textural complexity (GLCM entropy quartiles). Model performance was evaluated across three operational frameworks: intra-sensor downscaling, cross-sensor downscaling, and intra-to-cross generalization. Reconstruction fidelity was quantified using peak signal-to-noise ratio (PSNR) and structural similarity index (SSIM), complemented by variability analysis to assess performance stability. Landscape heterogeneity strongly influenced downscaling outcomes. SRGAN performance declined in areas with dense vegetation, aggregated woody structure, and high-entropy textures, with large variability under cross-sensor and generalization scenarios. In contrast, ESRGAN demonstrated consistently robust performance across landscape gradients, whereas bicubic interpolation performed well only under intra-sensor conditions and drastically degraded under sensor transfer. These results demonstrate that vegetation condition, structural heterogeneity, and sensor-transfer scenarios jointly constrain super-resolution performance. Rather than serving as a model comparison exercise, this study emphasizes a landscape-aware framework for understanding how ecological heterogeneity and operational domain shifts jointly shape super-resolution behavior in grassland ecosystems, providing guidance for more reliable applications of deep learning-based remote sensing methods.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1419: Assessing GAN Super-Resolution in Grasslands: The Role of Spatial Heterogeneity and Textural Complexity</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1419">doi: 10.3390/rs18091419</a></p>
	<p>Authors:
		Efrain Noa-Yarasca
		Javier Osorio Leyton
		Nada Jumaa
		Haoyu Niu
		Lonesome Malambo
		</p>
	<p>High-resolution imagery is essential for monitoring heterogeneous grassland ecosystems, yet the performance of generative adversarial network (GAN) super-resolution under varying landscape heterogeneity and operational application scenarios remains unclear. This study presents a landscape-aware evaluation of super-resolution methods in semi-arid savanna grasslands of the Edwards Plateau (Texas, USA) using paired multispectral imagery from PlanetScope (3 m) and unmanned aerial vehicle (UAV) platforms (0.03 m). Two GAN models, SRGAN and ESRGAN, were compared with a bicubic interpolation baseline. Image tiles were systematically stratified along ecologically relevant gradients of vegetation condition (NDVI quartiles), spatial structure (woody patch-based clusters), and textural complexity (GLCM entropy quartiles). Model performance was evaluated across three operational frameworks: intra-sensor downscaling, cross-sensor downscaling, and intra-to-cross generalization. Reconstruction fidelity was quantified using peak signal-to-noise ratio (PSNR) and structural similarity index (SSIM), complemented by variability analysis to assess performance stability. Landscape heterogeneity strongly influenced downscaling outcomes. SRGAN performance declined in areas with dense vegetation, aggregated woody structure, and high-entropy textures, with large variability under cross-sensor and generalization scenarios. In contrast, ESRGAN demonstrated consistently robust performance across landscape gradients, whereas bicubic interpolation performed well only under intra-sensor conditions and drastically degraded under sensor transfer. These results demonstrate that vegetation condition, structural heterogeneity, and sensor-transfer scenarios jointly constrain super-resolution performance. Rather than serving as a model comparison exercise, this study emphasizes a landscape-aware framework for understanding how ecological heterogeneity and operational domain shifts jointly shape super-resolution behavior in grassland ecosystems, providing guidance for more reliable applications of deep learning-based remote sensing methods.</p>
	]]></content:encoded>

	<dc:title>Assessing GAN Super-Resolution in Grasslands: The Role of Spatial Heterogeneity and Textural Complexity</dc:title>
			<dc:creator>Efrain Noa-Yarasca</dc:creator>
			<dc:creator>Javier Osorio Leyton</dc:creator>
			<dc:creator>Nada Jumaa</dc:creator>
			<dc:creator>Haoyu Niu</dc:creator>
			<dc:creator>Lonesome Malambo</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091419</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1419</prism:startingPage>
		<prism:doi>10.3390/rs18091419</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1419</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1420">

	<title>Remote Sensing, Vol. 18, Pages 1420: Role of Crop Salt Tolerance in Enhancing Remote Sensing-Based Soil Salinity Mapping Across Irrigated Agroecosystems: A Review</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1420</link>
	<description>Soil salinization poses a persistent threat to irrigated agroecosystems, yet remote sensing-based salinity assessment remains predominantly calibrated against bulk electrical conductivity without fully integrating crop physiological variability. This review examines the evolution of remote sensing approaches for soil salinity mapping (1994&amp;amp;ndash;2024), with particular emphasis on the role of crop salt tolerance in shaping spectral interpretation and mapping accuracy. A systematic synthesis of 58 peer-reviewed studies retrieved from the Scopus database was conducted using bibliometric analysis and structured full-text thematic classification to evaluate methodological trends and conceptual integration across soil, crops, and spectral domains. The results reveal substantial technological advancement, including multispectral and hyperspectral sensing, machine learning frameworks, and multi-source data integration. However, most approaches remain surface-oriented and statistically calibrated, with limited operationalization of crop-specific tolerance thresholds, root-zone salinity dynamics, and hydrochemical variability. The findings indicate that crop salt tolerance functions as a mediating factor within the soil&amp;amp;ndash;plant&amp;amp;ndash;spectral continuum, influencing the stability and transferability of spectral&amp;amp;ndash;salinity relationships. Integrating physiological tolerance parameters and subsurface processes into modeling frameworks is essential for improving agronomic interpretability and supporting more reliable salinity management in irrigated systems.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1420: Role of Crop Salt Tolerance in Enhancing Remote Sensing-Based Soil Salinity Mapping Across Irrigated Agroecosystems: A Review</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1420">doi: 10.3390/rs18091420</a></p>
	<p>Authors:
		Zhassulan Smanov
		Jilili Abuduwaili
		Alim Samat
		Kanat Samarkhanov
		Shakhislam Laiskhanov
		Kanat Kulymbet
		Azamat Yershibul
		Saken Duisekov
		Assiya Massakbayeva
		Zhanerke Sharapkhanova
		</p>
	<p>Soil salinization poses a persistent threat to irrigated agroecosystems, yet remote sensing-based salinity assessment remains predominantly calibrated against bulk electrical conductivity without fully integrating crop physiological variability. This review examines the evolution of remote sensing approaches for soil salinity mapping (1994&amp;amp;ndash;2024), with particular emphasis on the role of crop salt tolerance in shaping spectral interpretation and mapping accuracy. A systematic synthesis of 58 peer-reviewed studies retrieved from the Scopus database was conducted using bibliometric analysis and structured full-text thematic classification to evaluate methodological trends and conceptual integration across soil, crops, and spectral domains. The results reveal substantial technological advancement, including multispectral and hyperspectral sensing, machine learning frameworks, and multi-source data integration. However, most approaches remain surface-oriented and statistically calibrated, with limited operationalization of crop-specific tolerance thresholds, root-zone salinity dynamics, and hydrochemical variability. The findings indicate that crop salt tolerance functions as a mediating factor within the soil&amp;amp;ndash;plant&amp;amp;ndash;spectral continuum, influencing the stability and transferability of spectral&amp;amp;ndash;salinity relationships. Integrating physiological tolerance parameters and subsurface processes into modeling frameworks is essential for improving agronomic interpretability and supporting more reliable salinity management in irrigated systems.</p>
	]]></content:encoded>

	<dc:title>Role of Crop Salt Tolerance in Enhancing Remote Sensing-Based Soil Salinity Mapping Across Irrigated Agroecosystems: A Review</dc:title>
			<dc:creator>Zhassulan Smanov</dc:creator>
			<dc:creator>Jilili Abuduwaili</dc:creator>
			<dc:creator>Alim Samat</dc:creator>
			<dc:creator>Kanat Samarkhanov</dc:creator>
			<dc:creator>Shakhislam Laiskhanov</dc:creator>
			<dc:creator>Kanat Kulymbet</dc:creator>
			<dc:creator>Azamat Yershibul</dc:creator>
			<dc:creator>Saken Duisekov</dc:creator>
			<dc:creator>Assiya Massakbayeva</dc:creator>
			<dc:creator>Zhanerke Sharapkhanova</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091420</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>1420</prism:startingPage>
		<prism:doi>10.3390/rs18091420</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1420</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1421">

	<title>Remote Sensing, Vol. 18, Pages 1421: Evaluation of the Potential of Very-High-Resolution Satellite Imagery in Large-Scale Mapping</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1421</link>
	<description>With the rapid and ongoing expansion of urban areas, the need for accurate, reliable, and regularly updated topographic maps has become increasingly critical for planning and sustainable development. While traditional aerial photogrammetry&amp;amp;mdash;whether analog or digital&amp;amp;mdash;has long been the standard for such tasks, it remains costly, time-consuming, and logistically demanding, particularly when large or inaccessible regions are involved. This study proposes an alternative approach based on very-high-resolution satellite imagery, focusing specifically on data acquired from Morocco&amp;amp;rsquo;s Mohammed VI A and B satellites. The research evaluates the capacity of this satellite imagery to support large-scale topographic mapping, both in terms of geometric accuracy and the ability to identify essential urban features. To validate the results, we conducted a comparative analysis of satellite data with conventional photogrammetric imagery from analog cameras (RMK TOP) and digital sensors (ADS, DMC), using ground control points (GCPs) and differential GPS (DGPS) measurements for calibration and accuracy assessment. The outcomes demonstrate that planimetric accuracy from satellite imagery meets the required standards for mapping at 1:10,000 and 1:5000 scales. However, altimetric accuracy is closer to the upper permissible limits, especially in applications requiring finer detail. While major urban elements such as roads, buildings, and vegetation are well identified, smaller infrastructure components, such as power lines, remain challenging to detect. Despite these limitations, the study highlights the growing potential of satellite imagery as a cost-effective and operationally efficient alternative to traditional methods, particularly in rapidly evolving urban environments where frequent map updates are essential. Integration with GeoAI workflows is identified as a key direction for future research and is not part of the current methodology.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1421: Evaluation of the Potential of Very-High-Resolution Satellite Imagery in Large-Scale Mapping</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1421">doi: 10.3390/rs18091421</a></p>
	<p>Authors:
		Ilyas Afa
		Adnane Labbaci
		Laila El Ghazouani
		Hassan Radoine
		</p>
	<p>With the rapid and ongoing expansion of urban areas, the need for accurate, reliable, and regularly updated topographic maps has become increasingly critical for planning and sustainable development. While traditional aerial photogrammetry&amp;amp;mdash;whether analog or digital&amp;amp;mdash;has long been the standard for such tasks, it remains costly, time-consuming, and logistically demanding, particularly when large or inaccessible regions are involved. This study proposes an alternative approach based on very-high-resolution satellite imagery, focusing specifically on data acquired from Morocco&amp;amp;rsquo;s Mohammed VI A and B satellites. The research evaluates the capacity of this satellite imagery to support large-scale topographic mapping, both in terms of geometric accuracy and the ability to identify essential urban features. To validate the results, we conducted a comparative analysis of satellite data with conventional photogrammetric imagery from analog cameras (RMK TOP) and digital sensors (ADS, DMC), using ground control points (GCPs) and differential GPS (DGPS) measurements for calibration and accuracy assessment. The outcomes demonstrate that planimetric accuracy from satellite imagery meets the required standards for mapping at 1:10,000 and 1:5000 scales. However, altimetric accuracy is closer to the upper permissible limits, especially in applications requiring finer detail. While major urban elements such as roads, buildings, and vegetation are well identified, smaller infrastructure components, such as power lines, remain challenging to detect. Despite these limitations, the study highlights the growing potential of satellite imagery as a cost-effective and operationally efficient alternative to traditional methods, particularly in rapidly evolving urban environments where frequent map updates are essential. Integration with GeoAI workflows is identified as a key direction for future research and is not part of the current methodology.</p>
	]]></content:encoded>

	<dc:title>Evaluation of the Potential of Very-High-Resolution Satellite Imagery in Large-Scale Mapping</dc:title>
			<dc:creator>Ilyas Afa</dc:creator>
			<dc:creator>Adnane Labbaci</dc:creator>
			<dc:creator>Laila El Ghazouani</dc:creator>
			<dc:creator>Hassan Radoine</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091421</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1421</prism:startingPage>
		<prism:doi>10.3390/rs18091421</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1421</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1418">

	<title>Remote Sensing, Vol. 18, Pages 1418: Gated Lightweight CNN-Transformer Fusion for Real-Time Flood Segmentation on Satellite Internet Terminals Under Triple-Disruption Emergency Conditions</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1418</link>
	<description>During flood disasters, on-site operations often face the &amp;amp;ldquo;triple disruption&amp;amp;rdquo; of network outages, power cuts and blocked roads. This renders terrestrial cellular infrastructure inoperable and disrupts communication links. Satellite internet can partially restore emergency communications thanks to its wide-area coverage and resistance to ground damage. However, limited computing power, memory and unstable bandwidth at the terminal prevent cloud-based flood segmentation from providing near-real-time situational awareness. This paper therefore proposes a lightweight semantic flood segmentation framework for emergency terminals that uses satellite internet. This comprises a parallel dual-branch design with a lightweight U-Net-style convolutional neural network (CNN) branch for local boundary details and a compact Transformer branch for global context. A dynamic gated fusion mechanism (DGFM) balances local texture and global information adaptively. Experiments on the public synthetic aperture radar (SAR) dataset Sen1Floods11 demonstrate that the hybrid architecture strikes a balance between accuracy and inference efficiency. The proposed method combines gated fusion with quality-aware training. Compared to a lightweight CNN baseline and state-of-the-art segmentation models using the same protocol, the proposed configuration (Hybrid-Gated with Quality-Aware Training) achieves the highest mean intersection over union and F1 score among the compared fusion variants, while maintaining competitive false alarm and risk-sensitive performance under deployment constraints. This aligns with the preferences of emergency decision makers. The framework provides a deployable perception module for emergency systems supported by low-orbit satellites and terrestrial networks under triple-disruption conditions.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1418: Gated Lightweight CNN-Transformer Fusion for Real-Time Flood Segmentation on Satellite Internet Terminals Under Triple-Disruption Emergency Conditions</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1418">doi: 10.3390/rs18091418</a></p>
	<p>Authors:
		Yungui Nie
		Zhiguo Shi
		Jianing Li
		HuiLing Ge
		</p>
	<p>During flood disasters, on-site operations often face the &amp;amp;ldquo;triple disruption&amp;amp;rdquo; of network outages, power cuts and blocked roads. This renders terrestrial cellular infrastructure inoperable and disrupts communication links. Satellite internet can partially restore emergency communications thanks to its wide-area coverage and resistance to ground damage. However, limited computing power, memory and unstable bandwidth at the terminal prevent cloud-based flood segmentation from providing near-real-time situational awareness. This paper therefore proposes a lightweight semantic flood segmentation framework for emergency terminals that uses satellite internet. This comprises a parallel dual-branch design with a lightweight U-Net-style convolutional neural network (CNN) branch for local boundary details and a compact Transformer branch for global context. A dynamic gated fusion mechanism (DGFM) balances local texture and global information adaptively. Experiments on the public synthetic aperture radar (SAR) dataset Sen1Floods11 demonstrate that the hybrid architecture strikes a balance between accuracy and inference efficiency. The proposed method combines gated fusion with quality-aware training. Compared to a lightweight CNN baseline and state-of-the-art segmentation models using the same protocol, the proposed configuration (Hybrid-Gated with Quality-Aware Training) achieves the highest mean intersection over union and F1 score among the compared fusion variants, while maintaining competitive false alarm and risk-sensitive performance under deployment constraints. This aligns with the preferences of emergency decision makers. The framework provides a deployable perception module for emergency systems supported by low-orbit satellites and terrestrial networks under triple-disruption conditions.</p>
	]]></content:encoded>

	<dc:title>Gated Lightweight CNN-Transformer Fusion for Real-Time Flood Segmentation on Satellite Internet Terminals Under Triple-Disruption Emergency Conditions</dc:title>
			<dc:creator>Yungui Nie</dc:creator>
			<dc:creator>Zhiguo Shi</dc:creator>
			<dc:creator>Jianing Li</dc:creator>
			<dc:creator>HuiLing Ge</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091418</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1418</prism:startingPage>
		<prism:doi>10.3390/rs18091418</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1418</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1417">

	<title>Remote Sensing, Vol. 18, Pages 1417: Assessing Power System Reliability Using Anomaly Detection in Daily Nighttime Light Data</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1417</link>
	<description>Power-system reliability is crucial for sustainable development, but large-scale, long-term monitoring remains challenging. Existing nighttime light (NTL)-based outage detection methods often rely on fixed thresholds or prior information, limiting cross-regional application. To address this, we develop an adaptive thresholding framework using daily NASA Black Marble data. Observations are grouped by view angle to mitigate radiometric instability, and a per-pixel dynamic baseline is constructed from high-radiance statistics, enabling robust anomaly detection without prior outage timing. From the detected anomalies, we formulate a population-weighted NTL power reliability index (NTPRI) to quantify regional electricity service reliability. Validation across six diverse outage events yields an F1 score of 0.807. National-scale analysis shows NTPRI correlates significantly with the World Bank&amp;amp;rsquo;s System Average Interruption Duration Index (SAIDI). The derived Light Anomaly Rate (LAR) further supports pixel-level frequency analysis. Together, this framework provides a transferable remote-sensing tool for large-scale power-reliability assessment in data-scarce regions, supporting disaster impact evaluation and energy vulnerability analysis.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1417: Assessing Power System Reliability Using Anomaly Detection in Daily Nighttime Light Data</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1417">doi: 10.3390/rs18091417</a></p>
	<p>Authors:
		Nuo Xu
		Xin Cao
		Miaoying Chen
		</p>
	<p>Power-system reliability is crucial for sustainable development, but large-scale, long-term monitoring remains challenging. Existing nighttime light (NTL)-based outage detection methods often rely on fixed thresholds or prior information, limiting cross-regional application. To address this, we develop an adaptive thresholding framework using daily NASA Black Marble data. Observations are grouped by view angle to mitigate radiometric instability, and a per-pixel dynamic baseline is constructed from high-radiance statistics, enabling robust anomaly detection without prior outage timing. From the detected anomalies, we formulate a population-weighted NTL power reliability index (NTPRI) to quantify regional electricity service reliability. Validation across six diverse outage events yields an F1 score of 0.807. National-scale analysis shows NTPRI correlates significantly with the World Bank&amp;amp;rsquo;s System Average Interruption Duration Index (SAIDI). The derived Light Anomaly Rate (LAR) further supports pixel-level frequency analysis. Together, this framework provides a transferable remote-sensing tool for large-scale power-reliability assessment in data-scarce regions, supporting disaster impact evaluation and energy vulnerability analysis.</p>
	]]></content:encoded>

	<dc:title>Assessing Power System Reliability Using Anomaly Detection in Daily Nighttime Light Data</dc:title>
			<dc:creator>Nuo Xu</dc:creator>
			<dc:creator>Xin Cao</dc:creator>
			<dc:creator>Miaoying Chen</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091417</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1417</prism:startingPage>
		<prism:doi>10.3390/rs18091417</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1417</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1416">

	<title>Remote Sensing, Vol. 18, Pages 1416: A Modality-Aware Ensemble-of-Experts Model for Wildfire Spread Prediction</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1416</link>
	<description>Climate change has exacerbated natural hazards, including wildfires. In recent years, wildfires have become stronger and more frequent, threatening not only human lives worldwide but also ecosystems and wildlife. The proliferation of remote sensing data and derived variables has enabled deep learning models to help authorities to understand, mitigate, and manage wildfires. In this context, our work presents FireEx, a modality-aware Ensemble-of-Experts model for next-day wildfire spread prediction using remote sensing data of wildfires in Canada and Alaska. The dataset contains multi-source remote sensing data to segment next-day incremental wildfire growth, using the previous 24 h data. FireEx is based on a U-Net-designed multi-kernel Convolutional Neural Network, and is a combination of three models: two experts based on fuel data and weather data, respectively, and a generalist model trained on all the input channels of the dataset. The two experts and the generalist are trained independently and fused together using averaging. FireEx demonstrates strong performance with an F1-score of 48.9%, and ablation studies demonstrate the robustness of the architecture design, showing degraded performance when one of the experts is removed, therefore highlighting the importance of each expert and the generalist model. To the best of our knowledge, FireEx is the first Ensemble-of-Experts model for wildfire spread prediction, offering a modality-aware design approach and laying the groundwork for similar architectures in the field.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1416: A Modality-Aware Ensemble-of-Experts Model for Wildfire Spread Prediction</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1416">doi: 10.3390/rs18091416</a></p>
	<p>Authors:
		Henintsoa S. Andrianarivony
		Moulay A. Akhloufi
		</p>
	<p>Climate change has exacerbated natural hazards, including wildfires. In recent years, wildfires have become stronger and more frequent, threatening not only human lives worldwide but also ecosystems and wildlife. The proliferation of remote sensing data and derived variables has enabled deep learning models to help authorities to understand, mitigate, and manage wildfires. In this context, our work presents FireEx, a modality-aware Ensemble-of-Experts model for next-day wildfire spread prediction using remote sensing data of wildfires in Canada and Alaska. The dataset contains multi-source remote sensing data to segment next-day incremental wildfire growth, using the previous 24 h data. FireEx is based on a U-Net-designed multi-kernel Convolutional Neural Network, and is a combination of three models: two experts based on fuel data and weather data, respectively, and a generalist model trained on all the input channels of the dataset. The two experts and the generalist are trained independently and fused together using averaging. FireEx demonstrates strong performance with an F1-score of 48.9%, and ablation studies demonstrate the robustness of the architecture design, showing degraded performance when one of the experts is removed, therefore highlighting the importance of each expert and the generalist model. To the best of our knowledge, FireEx is the first Ensemble-of-Experts model for wildfire spread prediction, offering a modality-aware design approach and laying the groundwork for similar architectures in the field.</p>
	]]></content:encoded>

	<dc:title>A Modality-Aware Ensemble-of-Experts Model for Wildfire Spread Prediction</dc:title>
			<dc:creator>Henintsoa S. Andrianarivony</dc:creator>
			<dc:creator>Moulay A. Akhloufi</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091416</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1416</prism:startingPage>
		<prism:doi>10.3390/rs18091416</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1416</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1415">

	<title>Remote Sensing, Vol. 18, Pages 1415: Remote Sensing Inversion of Chlorophyll-a in the East China Sea Based on ALA-BP Neural Network</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1415</link>
	<description>Under the combined impacts of climate change and intensified human activities, harmful algal blooms (HABs) have occurred with increasing frequency in China&amp;amp;rsquo;s coastal waters, posing growing risks to marine ecosystems and regional sustainability. Chlorophyll-a concentration (Chl-a), a key indicator of phytoplankton biomass, plays a crucial role in HAB monitoring and early warning. This study integrates satellite remote sensing data from 2000 to 2004, 2011 to 2013, and 2023 to 2024 with in situ measurements and environmental variables (e.g., dissolved oxygen) to investigate Chl-a dynamics in the East China Sea. The results indicate pronounced spatiotemporal heterogeneity across the region. Spectral features were represented using band-ratio methods and the BRG model, followed by variable selection based on the Bayesian Information Criterion (BIC) to determine the optimal band combinations for model training. Six mainstream machine learning models were evaluated, and the Backpropagation Neural Network (BP) was selected as the baseline model due to its superior performance. To further improve model robustness and global optimization capability, the Artificial Lemming Algorithm (ALA) was employed to optimize the BP network, resulting in the ALA-BP inversion model. The optimized model achieved correlation coefficients of 0.933 on the test set and 0.940 on the independent validation set, outperforming the other models. The proposed model was further applied to the 2024 algal bloom event in the East China Sea, successfully capturing the spatiotemporal variations of Chl-a. This study provides an effective retrieval framework for Chl-a in optically complex coastal waters and demonstrates its applicability in HAB monitoring.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1415: Remote Sensing Inversion of Chlorophyll-a in the East China Sea Based on ALA-BP Neural Network</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1415">doi: 10.3390/rs18091415</a></p>
	<p>Authors:
		Lu Cao
		Ying Xiong
		Yuntao Wang
		Xiangbin Ran
		Jiayin Bian
		Qiang Fang
		Wentao Ma
		Huiyu Zheng
		</p>
	<p>Under the combined impacts of climate change and intensified human activities, harmful algal blooms (HABs) have occurred with increasing frequency in China&amp;amp;rsquo;s coastal waters, posing growing risks to marine ecosystems and regional sustainability. Chlorophyll-a concentration (Chl-a), a key indicator of phytoplankton biomass, plays a crucial role in HAB monitoring and early warning. This study integrates satellite remote sensing data from 2000 to 2004, 2011 to 2013, and 2023 to 2024 with in situ measurements and environmental variables (e.g., dissolved oxygen) to investigate Chl-a dynamics in the East China Sea. The results indicate pronounced spatiotemporal heterogeneity across the region. Spectral features were represented using band-ratio methods and the BRG model, followed by variable selection based on the Bayesian Information Criterion (BIC) to determine the optimal band combinations for model training. Six mainstream machine learning models were evaluated, and the Backpropagation Neural Network (BP) was selected as the baseline model due to its superior performance. To further improve model robustness and global optimization capability, the Artificial Lemming Algorithm (ALA) was employed to optimize the BP network, resulting in the ALA-BP inversion model. The optimized model achieved correlation coefficients of 0.933 on the test set and 0.940 on the independent validation set, outperforming the other models. The proposed model was further applied to the 2024 algal bloom event in the East China Sea, successfully capturing the spatiotemporal variations of Chl-a. This study provides an effective retrieval framework for Chl-a in optically complex coastal waters and demonstrates its applicability in HAB monitoring.</p>
	]]></content:encoded>

	<dc:title>Remote Sensing Inversion of Chlorophyll-a in the East China Sea Based on ALA-BP Neural Network</dc:title>
			<dc:creator>Lu Cao</dc:creator>
			<dc:creator>Ying Xiong</dc:creator>
			<dc:creator>Yuntao Wang</dc:creator>
			<dc:creator>Xiangbin Ran</dc:creator>
			<dc:creator>Jiayin Bian</dc:creator>
			<dc:creator>Qiang Fang</dc:creator>
			<dc:creator>Wentao Ma</dc:creator>
			<dc:creator>Huiyu Zheng</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091415</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1415</prism:startingPage>
		<prism:doi>10.3390/rs18091415</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1415</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1414">

	<title>Remote Sensing, Vol. 18, Pages 1414: A Methodological Framework for High-Latitude Coastal Classification Using ICESat-2 and Explainable Machine Learning</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1414</link>
	<description>High-latitude coastal regions are highly sensitive to climate change, yet their geomorphology is obscured by sea ice, landfast ice and seasonal snow, restricting the applicability of optical remote sensing for fine coastal classification. To address this limitation, we develop an interpretable coastal classification framework integrating ICESat-2 photon-counting LiDAR and explainable machine learning. Multi-dimensional morphometric features describing cross-shore geometry, vertical relief and local slope variability are extracted from ICESat-2 ATL03 along-track profiles to train a CatBoost classifier, with five-fold cross-validation and sample weighting to mitigate class imbalance. Introducing SHAP-based interpretability into ICESat-2-driven coastal geomorphic classification enables the identification of morphometric controls on coastal-type differentiation. Validated in the Bering Sea with 447 profiles and a 75%/25% stratified split, the framework achieved an overall accuracy of 86.6%, a macro-average recall of 89.4% and a Kappa coefficient of 0.84. SHAP analysis identifies that coastal width is the most influential feature for model-based classification of coastal geomorphic types, while slope and local steepness variability serve as important predictive indicators for distinguishing rocky and sedimentary coasts. This framework links data-driven classification to geomorphic processes and provides a potentially generalisable approach for fine-scale coastal mapping in high-latitude environments.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1414: A Methodological Framework for High-Latitude Coastal Classification Using ICESat-2 and Explainable Machine Learning</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1414">doi: 10.3390/rs18091414</a></p>
	<p>Authors:
		Kuifeng Luan
		Yuwei Li
		Youzhi Li
		Dandan Lin
		Weidong Zhu
		Changda Liu
		Lizhe Zhang
		</p>
	<p>High-latitude coastal regions are highly sensitive to climate change, yet their geomorphology is obscured by sea ice, landfast ice and seasonal snow, restricting the applicability of optical remote sensing for fine coastal classification. To address this limitation, we develop an interpretable coastal classification framework integrating ICESat-2 photon-counting LiDAR and explainable machine learning. Multi-dimensional morphometric features describing cross-shore geometry, vertical relief and local slope variability are extracted from ICESat-2 ATL03 along-track profiles to train a CatBoost classifier, with five-fold cross-validation and sample weighting to mitigate class imbalance. Introducing SHAP-based interpretability into ICESat-2-driven coastal geomorphic classification enables the identification of morphometric controls on coastal-type differentiation. Validated in the Bering Sea with 447 profiles and a 75%/25% stratified split, the framework achieved an overall accuracy of 86.6%, a macro-average recall of 89.4% and a Kappa coefficient of 0.84. SHAP analysis identifies that coastal width is the most influential feature for model-based classification of coastal geomorphic types, while slope and local steepness variability serve as important predictive indicators for distinguishing rocky and sedimentary coasts. This framework links data-driven classification to geomorphic processes and provides a potentially generalisable approach for fine-scale coastal mapping in high-latitude environments.</p>
	]]></content:encoded>

	<dc:title>A Methodological Framework for High-Latitude Coastal Classification Using ICESat-2 and Explainable Machine Learning</dc:title>
			<dc:creator>Kuifeng Luan</dc:creator>
			<dc:creator>Yuwei Li</dc:creator>
			<dc:creator>Youzhi Li</dc:creator>
			<dc:creator>Dandan Lin</dc:creator>
			<dc:creator>Weidong Zhu</dc:creator>
			<dc:creator>Changda Liu</dc:creator>
			<dc:creator>Lizhe Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091414</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1414</prism:startingPage>
		<prism:doi>10.3390/rs18091414</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1414</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1411">

	<title>Remote Sensing, Vol. 18, Pages 1411: Flood Susceptibility and Potential Flood Risk Assessment in Afghanistan Using Morphometric and Socioeconomic Indicators</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1411</link>
	<description>Afghanistan is highly vulnerable to climate-driven extremes because of its combination of rugged geography and socio-political instability. Frequent events of extreme precipitation, floods, and droughts pose severe socio-economic and environmental challenges. Floods are particularly destructive, yet national-scale potential flood risk in Afghanistan has not been systematically assessed, largely due to limited data and field access. This study addresses this gap by mapping flood susceptibility, vulnerability, and risk using remote sensing (RS) and geographic information systems (GIS) at both subbasin and provincial scales. We apply a hybrid approach that combines Principal Component Analysis (PCA) to identify key environmental, climatic, and socio-economic indicators with the Analytic Hierarchy Process (AHP) to derive consistent weights and reduce subjectivity in decision-making. The results show that the eastern and northeastern ssubbasins especially within the Panj-Amu and Kabul River basins, have the highest flood susceptibility due to intense precipitation, steep terrain, and efficient drainage. Vulnerability increases in the densely populated northern and northeastern provinces, where land-use change and socio-economic constraints elevate flood-related impacts. Overall, 31% and 20% of study areas are classified as Very High and High vulnerability zones, respectively. The composite potential flood-risk index identifies that approximately 24% and 22% of Afghanistan fall within Very High and High flood risk zones, concentrated in the northern and eastern provinces. Model performance, evaluated using Receiver Operating Characteristic (ROC) curves and Area Under the Curve (AUC), indicates strong agreement between mapped Very High/High risk zones and frequently flooded provinces, with the upper-threshold scenario yielding an AUC of 0.913. These findings support targeted resource allocation, mitigation planning, and disaster-risk reduction in data-scarce and conflict-affected mountain regions.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1411: Flood Susceptibility and Potential Flood Risk Assessment in Afghanistan Using Morphometric and Socioeconomic Indicators</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1411">doi: 10.3390/rs18091411</a></p>
	<p>Authors:
		Qutbudin Ishanch
		Kanchan Mishra
		Christiane Zarfl
		Kathryn E. Fitzsimmons
		</p>
	<p>Afghanistan is highly vulnerable to climate-driven extremes because of its combination of rugged geography and socio-political instability. Frequent events of extreme precipitation, floods, and droughts pose severe socio-economic and environmental challenges. Floods are particularly destructive, yet national-scale potential flood risk in Afghanistan has not been systematically assessed, largely due to limited data and field access. This study addresses this gap by mapping flood susceptibility, vulnerability, and risk using remote sensing (RS) and geographic information systems (GIS) at both subbasin and provincial scales. We apply a hybrid approach that combines Principal Component Analysis (PCA) to identify key environmental, climatic, and socio-economic indicators with the Analytic Hierarchy Process (AHP) to derive consistent weights and reduce subjectivity in decision-making. The results show that the eastern and northeastern ssubbasins especially within the Panj-Amu and Kabul River basins, have the highest flood susceptibility due to intense precipitation, steep terrain, and efficient drainage. Vulnerability increases in the densely populated northern and northeastern provinces, where land-use change and socio-economic constraints elevate flood-related impacts. Overall, 31% and 20% of study areas are classified as Very High and High vulnerability zones, respectively. The composite potential flood-risk index identifies that approximately 24% and 22% of Afghanistan fall within Very High and High flood risk zones, concentrated in the northern and eastern provinces. Model performance, evaluated using Receiver Operating Characteristic (ROC) curves and Area Under the Curve (AUC), indicates strong agreement between mapped Very High/High risk zones and frequently flooded provinces, with the upper-threshold scenario yielding an AUC of 0.913. These findings support targeted resource allocation, mitigation planning, and disaster-risk reduction in data-scarce and conflict-affected mountain regions.</p>
	]]></content:encoded>

	<dc:title>Flood Susceptibility and Potential Flood Risk Assessment in Afghanistan Using Morphometric and Socioeconomic Indicators</dc:title>
			<dc:creator>Qutbudin Ishanch</dc:creator>
			<dc:creator>Kanchan Mishra</dc:creator>
			<dc:creator>Christiane Zarfl</dc:creator>
			<dc:creator>Kathryn E. Fitzsimmons</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091411</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1411</prism:startingPage>
		<prism:doi>10.3390/rs18091411</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1411</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1413">

	<title>Remote Sensing, Vol. 18, Pages 1413: MMDFRNet: Dynamic Cross-Modal Decoupling and Alignment for Robust Rice Mapping</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1413</link>
	<description>Accurate rice mapping is critical for grain yield estimation and food security, yet traditional methods often struggle with asynchronous data quality and the inherent statistical gap between SAR and optical signals. To bridge this gap, we propose MMDFRNet, a novel multi-modal deep learning framework that synergistically integrates Sentinel-1 SAR and Sentinel-2 optical imagery. Unlike conventional static fusion approaches, MMDFRNet features a dual-stream modality-specific encoder architecture designed to decouple structural backscattering signals from spectral reflectance. Central to this framework is the multi-modal feature fusion (MMF) module, which employs an adaptive attention mechanism to dynamically align and recalibrate features based on their reliability, effectively mitigating noise from compromised modalities. Additionally, a multi-scale feature fusion (MSF) module is incorporated to coordinate hierarchical semantic information, enhancing boundary delineation in fragmented landscapes. Extensive experiments conducted across multiple study areas in China demonstrate the superiority of MMDFRNet. The model achieves a Precision of 0.9234, an IoU of 0.8612, and an F1-score of 0.9252. Notably, it consistently outperforms state-of-the-art benchmarks (e.g., UNetFormer, STMA, and CCRNet) by margins of up to 11.72% (Precision) and 7.39% (IoU) compared to classic baselines. Furthermore, rigorous ablation studies and degradation analyses confirm the model&amp;amp;rsquo;s robustness, verifying its ability to transform the degradation paradox into a performance booster through pixel-wise adaptive alignment. Consequently, MMDFRNet offers a promising solution for precise rice area statistics and long-term monitoring in complex agricultural landscapes.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1413: MMDFRNet: Dynamic Cross-Modal Decoupling and Alignment for Robust Rice Mapping</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1413">doi: 10.3390/rs18091413</a></p>
	<p>Authors:
		Tingyan Fu
		Jia Ge
		Shufang Tian
		</p>
	<p>Accurate rice mapping is critical for grain yield estimation and food security, yet traditional methods often struggle with asynchronous data quality and the inherent statistical gap between SAR and optical signals. To bridge this gap, we propose MMDFRNet, a novel multi-modal deep learning framework that synergistically integrates Sentinel-1 SAR and Sentinel-2 optical imagery. Unlike conventional static fusion approaches, MMDFRNet features a dual-stream modality-specific encoder architecture designed to decouple structural backscattering signals from spectral reflectance. Central to this framework is the multi-modal feature fusion (MMF) module, which employs an adaptive attention mechanism to dynamically align and recalibrate features based on their reliability, effectively mitigating noise from compromised modalities. Additionally, a multi-scale feature fusion (MSF) module is incorporated to coordinate hierarchical semantic information, enhancing boundary delineation in fragmented landscapes. Extensive experiments conducted across multiple study areas in China demonstrate the superiority of MMDFRNet. The model achieves a Precision of 0.9234, an IoU of 0.8612, and an F1-score of 0.9252. Notably, it consistently outperforms state-of-the-art benchmarks (e.g., UNetFormer, STMA, and CCRNet) by margins of up to 11.72% (Precision) and 7.39% (IoU) compared to classic baselines. Furthermore, rigorous ablation studies and degradation analyses confirm the model&amp;amp;rsquo;s robustness, verifying its ability to transform the degradation paradox into a performance booster through pixel-wise adaptive alignment. Consequently, MMDFRNet offers a promising solution for precise rice area statistics and long-term monitoring in complex agricultural landscapes.</p>
	]]></content:encoded>

	<dc:title>MMDFRNet: Dynamic Cross-Modal Decoupling and Alignment for Robust Rice Mapping</dc:title>
			<dc:creator>Tingyan Fu</dc:creator>
			<dc:creator>Jia Ge</dc:creator>
			<dc:creator>Shufang Tian</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091413</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1413</prism:startingPage>
		<prism:doi>10.3390/rs18091413</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1413</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1412">

	<title>Remote Sensing, Vol. 18, Pages 1412: Spatiotemporal Coupling and Driving Mechanisms Between Ecological Quality and Vegetation Carbon Sink&amp;ndash;Source Dynamics on the Loess Plateau, China</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1412</link>
	<description>Against the backdrop of global climate change and the &amp;amp;ldquo;carbon neutrality&amp;amp;rdquo; target, the ecological quality improvement of the Loess Plateau&amp;amp;mdash;a key region for ecological restoration in China&amp;amp;mdash;and its impact on vegetation carbon sources hold significant importance for regional carbon balance and ecological security. Based on MODIS and meteorological reanalysis data from 2002 to 2024, this study constructed the Remote Sensing Ecological Index (RSEI). Combined with a carbon source/sink model, it systematically assessed the spatiotemporal coupling evolution characteristics of ecological environment quality and vegetation carbon storage capacity in the Loess Plateau, and explored the synergistic driving mechanisms of major hydrothermal and surface factors. The results indicate the following: (1) From 2002 to 2024, the ecological environment of the Loess Plateau improved significantly, with the RSEI rising from moderate to good. This improvement was accompanied by a marked decrease in surface dryness, an increase in surface wetness, and notable growth in vegetation cover, revealing a positive coupling relationship characterized by &amp;amp;ldquo;reduced surface dryness&amp;amp;mdash;increased surface wetness&amp;amp;mdash;enhanced vegetation restoration.&amp;amp;rdquo; (2) Regional vegetation carbon storage capacity strengthened markedly. Gross Primary Productivity (GPP), Net Primary Productivity (NPP), and Net Ecosystem Productivity (NEP) all showed significant increasing trends, and the proportion of area classified as carbon sink increased substantially. (3) Spatially, carbon sink distribution exhibited a pattern of &amp;amp;ldquo;higher in the southeast, lower in the northwest.&amp;amp;rdquo; Sub-regions A and D were identified as core areas with higher ecological quality and carbon sink capacity, whereas sub-regions B and C were more ecologically fragile and served as primary carbon source areas. (4) The implementation of soil and water conservation measures on the Loess Plateau has effectively enhanced regional carbon storage capacity. Vegetation restoration, improved water conditions, and reduced surface dryness have jointly driven the transition of the Loess Plateau ecosystem from a &amp;amp;ldquo;vulnerable type&amp;amp;rdquo; to a &amp;amp;ldquo;recovering type&amp;amp;rdquo;, while ecological restoration projects have played a certain role in enhancing the carbon sink. This study provides a theoretical basis and scientific&amp;amp;ndash;technological support for ecological protection and high-quality development in the Yellow River Basin.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1412: Spatiotemporal Coupling and Driving Mechanisms Between Ecological Quality and Vegetation Carbon Sink&amp;ndash;Source Dynamics on the Loess Plateau, China</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1412">doi: 10.3390/rs18091412</a></p>
	<p>Authors:
		Yanyun Xiang
		Qifei Zhang
		Yang Lu
		Yunfang Li
		</p>
	<p>Against the backdrop of global climate change and the &amp;amp;ldquo;carbon neutrality&amp;amp;rdquo; target, the ecological quality improvement of the Loess Plateau&amp;amp;mdash;a key region for ecological restoration in China&amp;amp;mdash;and its impact on vegetation carbon sources hold significant importance for regional carbon balance and ecological security. Based on MODIS and meteorological reanalysis data from 2002 to 2024, this study constructed the Remote Sensing Ecological Index (RSEI). Combined with a carbon source/sink model, it systematically assessed the spatiotemporal coupling evolution characteristics of ecological environment quality and vegetation carbon storage capacity in the Loess Plateau, and explored the synergistic driving mechanisms of major hydrothermal and surface factors. The results indicate the following: (1) From 2002 to 2024, the ecological environment of the Loess Plateau improved significantly, with the RSEI rising from moderate to good. This improvement was accompanied by a marked decrease in surface dryness, an increase in surface wetness, and notable growth in vegetation cover, revealing a positive coupling relationship characterized by &amp;amp;ldquo;reduced surface dryness&amp;amp;mdash;increased surface wetness&amp;amp;mdash;enhanced vegetation restoration.&amp;amp;rdquo; (2) Regional vegetation carbon storage capacity strengthened markedly. Gross Primary Productivity (GPP), Net Primary Productivity (NPP), and Net Ecosystem Productivity (NEP) all showed significant increasing trends, and the proportion of area classified as carbon sink increased substantially. (3) Spatially, carbon sink distribution exhibited a pattern of &amp;amp;ldquo;higher in the southeast, lower in the northwest.&amp;amp;rdquo; Sub-regions A and D were identified as core areas with higher ecological quality and carbon sink capacity, whereas sub-regions B and C were more ecologically fragile and served as primary carbon source areas. (4) The implementation of soil and water conservation measures on the Loess Plateau has effectively enhanced regional carbon storage capacity. Vegetation restoration, improved water conditions, and reduced surface dryness have jointly driven the transition of the Loess Plateau ecosystem from a &amp;amp;ldquo;vulnerable type&amp;amp;rdquo; to a &amp;amp;ldquo;recovering type&amp;amp;rdquo;, while ecological restoration projects have played a certain role in enhancing the carbon sink. This study provides a theoretical basis and scientific&amp;amp;ndash;technological support for ecological protection and high-quality development in the Yellow River Basin.</p>
	]]></content:encoded>

	<dc:title>Spatiotemporal Coupling and Driving Mechanisms Between Ecological Quality and Vegetation Carbon Sink&amp;amp;ndash;Source Dynamics on the Loess Plateau, China</dc:title>
			<dc:creator>Yanyun Xiang</dc:creator>
			<dc:creator>Qifei Zhang</dc:creator>
			<dc:creator>Yang Lu</dc:creator>
			<dc:creator>Yunfang Li</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091412</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1412</prism:startingPage>
		<prism:doi>10.3390/rs18091412</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1412</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1409">

	<title>Remote Sensing, Vol. 18, Pages 1409: Fast Spatial Denoising of InSAR Interferograms via Empirical Statistical Modeling</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1409</link>
	<description>SAR interferometry (InSAR) provides a framework for extracting high-resolution topographic information and detecting surface deformation. By analyzing the phase difference between radar acquisitions obtained at different times, one can characterize landscape geometry and surface changes. However, inherent phase noise often compromises the reliability of the resulting interferometric products. Consequently, there is a sustained need for spatial filtering techniques that suppress noise while preserving structural integrity and resolution. This work addresses the challenge of filtering the unwrapped phase, a process traditionally reliant on accurate coherence images to identify reliable pixels. We evaluate three statistically based spatial filters for phase noise reduction. The Enhanced Lee filter, which utilizes spatial adaptation and a physically grounded probability model, serves as the baseline for comparison. We examine the Gierull model, which improves computational efficiency by restricting the parameter space. To further reduce execution time, we propose and evaluate two empirical alternatives: the truncated wrapped normal (TcN) and the truncated wrapped Cauchy (TcC) distributions. Results indicate that these empirical models significantly reduce computational demand without degrading the quality of the filtered phase. We assess performance using a simulated dataset for objective validation alongside InSAR imagery of La Cumbre volcano, Los Alamos, and Robledo volcano. While the proposed models demonstrate significant gains in computational efficiency compared to current methods, we identify numerical integration as a primary bottleneck in the filtering process; this challenge warrants further investigation. Our results indicate that empirical statistical models provide a viable path for accelerated InSAR processing with accuracy equivalent to traditional, computationally intensive approaches.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1409: Fast Spatial Denoising of InSAR Interferograms via Empirical Statistical Modeling</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1409">doi: 10.3390/rs18091409</a></p>
	<p>Authors:
		Anderson A. De Borba
		Joselito E. De Araújo
		Alejandro C. Frery
		</p>
	<p>SAR interferometry (InSAR) provides a framework for extracting high-resolution topographic information and detecting surface deformation. By analyzing the phase difference between radar acquisitions obtained at different times, one can characterize landscape geometry and surface changes. However, inherent phase noise often compromises the reliability of the resulting interferometric products. Consequently, there is a sustained need for spatial filtering techniques that suppress noise while preserving structural integrity and resolution. This work addresses the challenge of filtering the unwrapped phase, a process traditionally reliant on accurate coherence images to identify reliable pixels. We evaluate three statistically based spatial filters for phase noise reduction. The Enhanced Lee filter, which utilizes spatial adaptation and a physically grounded probability model, serves as the baseline for comparison. We examine the Gierull model, which improves computational efficiency by restricting the parameter space. To further reduce execution time, we propose and evaluate two empirical alternatives: the truncated wrapped normal (TcN) and the truncated wrapped Cauchy (TcC) distributions. Results indicate that these empirical models significantly reduce computational demand without degrading the quality of the filtered phase. We assess performance using a simulated dataset for objective validation alongside InSAR imagery of La Cumbre volcano, Los Alamos, and Robledo volcano. While the proposed models demonstrate significant gains in computational efficiency compared to current methods, we identify numerical integration as a primary bottleneck in the filtering process; this challenge warrants further investigation. Our results indicate that empirical statistical models provide a viable path for accelerated InSAR processing with accuracy equivalent to traditional, computationally intensive approaches.</p>
	]]></content:encoded>

	<dc:title>Fast Spatial Denoising of InSAR Interferograms via Empirical Statistical Modeling</dc:title>
			<dc:creator>Anderson A. De Borba</dc:creator>
			<dc:creator>Joselito E. De Araújo</dc:creator>
			<dc:creator>Alejandro C. Frery</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091409</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1409</prism:startingPage>
		<prism:doi>10.3390/rs18091409</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1409</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1410">

	<title>Remote Sensing, Vol. 18, Pages 1410: Detecting the Response of Column Carbon Dioxide Concentration to Anthropogenic Emissions Using the OCO Series Satellites</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1410</link>
	<description>Quantifying anthropogenic CO2 increments is vital for assessing emission reductions. Using a seamless XCO2 dataset over China reconstructed from OCO-2/3 satellite retrievals and machine learning, combined with EOF decomposition and LISA analysis, this study investigates XCO2 anomalies and local anthropogenic increments (dXCO2) at national and urban agglomeration scales. Nationally, XCO2 anomalies exhibit a &amp;amp;ldquo;southeast positive, northwest negative&amp;amp;rdquo; spatial pattern aligning with human activities and a &amp;amp;ldquo;winter high, summer low&amp;amp;rdquo; seasonal cycle. EOF analysis reveals four dominant modes: anthropogenic&amp;amp;ndash;natural trade-offs, East Asian summer monsoon modulation, local emissions, and baseline context. At the regional scale, multi-year mean dXCO2 (2015&amp;amp;ndash;2019) in Beijing&amp;amp;ndash;Tianjin&amp;amp;ndash;Hebei (BTH), Yangtze River Delta (YRD), and Pearl River Delta (PRD) are 3.46 &amp;amp;plusmn; 0.45 ppm, 1.30 &amp;amp;plusmn; 0.36 ppm, and 0.08 &amp;amp;plusmn; 0.14 ppm, respectively, showing higher values in northern heavy industrial zones. During the 2020&amp;amp;ndash;2022 pandemic, dXCO2 decreased in BTH (2.28 &amp;amp;plusmn; 0.73 ppm) and YRD (1.16 &amp;amp;plusmn; 0.43 ppm) but increased in PRD (0.28 &amp;amp;plusmn; 0.27 ppm). Compared to pre-pandemic levels, lockdowns saw dXCO2 decrease slightly in YRD while increasing in BTH and PRD, reflecting differential responses of regional industrial structures. This study demonstrates the potential of seamless XCO2 data for monitoring anthropogenic enhancement signals, and the proposed LISA-based method offers new support for regionally differentiated emission reduction assessments.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1410: Detecting the Response of Column Carbon Dioxide Concentration to Anthropogenic Emissions Using the OCO Series Satellites</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1410">doi: 10.3390/rs18091410</a></p>
	<p>Authors:
		Wenkai Zhang
		Xi Chen
		Li Duan
		Xiuwei Xing
		Shiran Song
		Qian Zhou
		</p>
	<p>Quantifying anthropogenic CO2 increments is vital for assessing emission reductions. Using a seamless XCO2 dataset over China reconstructed from OCO-2/3 satellite retrievals and machine learning, combined with EOF decomposition and LISA analysis, this study investigates XCO2 anomalies and local anthropogenic increments (dXCO2) at national and urban agglomeration scales. Nationally, XCO2 anomalies exhibit a &amp;amp;ldquo;southeast positive, northwest negative&amp;amp;rdquo; spatial pattern aligning with human activities and a &amp;amp;ldquo;winter high, summer low&amp;amp;rdquo; seasonal cycle. EOF analysis reveals four dominant modes: anthropogenic&amp;amp;ndash;natural trade-offs, East Asian summer monsoon modulation, local emissions, and baseline context. At the regional scale, multi-year mean dXCO2 (2015&amp;amp;ndash;2019) in Beijing&amp;amp;ndash;Tianjin&amp;amp;ndash;Hebei (BTH), Yangtze River Delta (YRD), and Pearl River Delta (PRD) are 3.46 &amp;amp;plusmn; 0.45 ppm, 1.30 &amp;amp;plusmn; 0.36 ppm, and 0.08 &amp;amp;plusmn; 0.14 ppm, respectively, showing higher values in northern heavy industrial zones. During the 2020&amp;amp;ndash;2022 pandemic, dXCO2 decreased in BTH (2.28 &amp;amp;plusmn; 0.73 ppm) and YRD (1.16 &amp;amp;plusmn; 0.43 ppm) but increased in PRD (0.28 &amp;amp;plusmn; 0.27 ppm). Compared to pre-pandemic levels, lockdowns saw dXCO2 decrease slightly in YRD while increasing in BTH and PRD, reflecting differential responses of regional industrial structures. This study demonstrates the potential of seamless XCO2 data for monitoring anthropogenic enhancement signals, and the proposed LISA-based method offers new support for regionally differentiated emission reduction assessments.</p>
	]]></content:encoded>

	<dc:title>Detecting the Response of Column Carbon Dioxide Concentration to Anthropogenic Emissions Using the OCO Series Satellites</dc:title>
			<dc:creator>Wenkai Zhang</dc:creator>
			<dc:creator>Xi Chen</dc:creator>
			<dc:creator>Li Duan</dc:creator>
			<dc:creator>Xiuwei Xing</dc:creator>
			<dc:creator>Shiran Song</dc:creator>
			<dc:creator>Qian Zhou</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091410</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1410</prism:startingPage>
		<prism:doi>10.3390/rs18091410</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1410</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1408">

	<title>Remote Sensing, Vol. 18, Pages 1408: A Novel Dense Image Matching Point Cloud Filtering Algorithm Integrating Visible Light and Progressive Triangulated Irregular Network Densification for High-Accuracy Mining Subsidence Monitoring</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1408</link>
	<description>Effective monitoring of surface damage in mining areas is vital for ecological restoration. Unmanned aerial vehicles (UAVs) have been widely used to obtain ground subsidence data owing to their low cost and ease of operation. The images captured by UAVs can generate dense image matching (DIM) point clouds, which, after screening, can be used to create a digital elevation model (DEM) required for deformation analysis. Existing filtering algorithms mainly rely on the spatial geometric features of point clouds and rarely utilize color information, which limits their accuracy in areas with vegetation coverage. To address this issue, this study proposes a H-PTD method that combines visible light with progressive triangulated irregular network densification (PTD). First, initial ground seeds are selected based on the H value in the HSV space. Subsequently, a triangulated irregular network (TIN) is constructed, and iterative densification is performed by evaluating the relationship between the target point and adjacent triangular faces, thereby achieving an accurate distinction between ground and non-ground. Evaluated on three terrain datasets and against five classical methods, the results indicate that the Total error in the H-PTD cross-matrix is controlled between 2.9% and 7.8%, and remains below 8% overall. The standard deviation of the DEM difference is around 0.02 m. Compared to other methods, H-PTD shows higher filtering accuracy and better terrain adaptability, making it more promising for monitoring mining areas and providing a more reliable tool for subsidence detection based on UAVs.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1408: A Novel Dense Image Matching Point Cloud Filtering Algorithm Integrating Visible Light and Progressive Triangulated Irregular Network Densification for High-Accuracy Mining Subsidence Monitoring</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1408">doi: 10.3390/rs18091408</a></p>
	<p>Authors:
		Mingmei Zhang
		Yibo He
		Zhenqi Hu
		Rui Wang
		Dawei Zhou
		</p>
	<p>Effective monitoring of surface damage in mining areas is vital for ecological restoration. Unmanned aerial vehicles (UAVs) have been widely used to obtain ground subsidence data owing to their low cost and ease of operation. The images captured by UAVs can generate dense image matching (DIM) point clouds, which, after screening, can be used to create a digital elevation model (DEM) required for deformation analysis. Existing filtering algorithms mainly rely on the spatial geometric features of point clouds and rarely utilize color information, which limits their accuracy in areas with vegetation coverage. To address this issue, this study proposes a H-PTD method that combines visible light with progressive triangulated irregular network densification (PTD). First, initial ground seeds are selected based on the H value in the HSV space. Subsequently, a triangulated irregular network (TIN) is constructed, and iterative densification is performed by evaluating the relationship between the target point and adjacent triangular faces, thereby achieving an accurate distinction between ground and non-ground. Evaluated on three terrain datasets and against five classical methods, the results indicate that the Total error in the H-PTD cross-matrix is controlled between 2.9% and 7.8%, and remains below 8% overall. The standard deviation of the DEM difference is around 0.02 m. Compared to other methods, H-PTD shows higher filtering accuracy and better terrain adaptability, making it more promising for monitoring mining areas and providing a more reliable tool for subsidence detection based on UAVs.</p>
	]]></content:encoded>

	<dc:title>A Novel Dense Image Matching Point Cloud Filtering Algorithm Integrating Visible Light and Progressive Triangulated Irregular Network Densification for High-Accuracy Mining Subsidence Monitoring</dc:title>
			<dc:creator>Mingmei Zhang</dc:creator>
			<dc:creator>Yibo He</dc:creator>
			<dc:creator>Zhenqi Hu</dc:creator>
			<dc:creator>Rui Wang</dc:creator>
			<dc:creator>Dawei Zhou</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091408</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1408</prism:startingPage>
		<prism:doi>10.3390/rs18091408</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1408</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1406">

	<title>Remote Sensing, Vol. 18, Pages 1406: Towards Balanced Supervision: Cumulative Quality-Based Dynamic Assignment for Fine-Grained Remote Sensing Object Detection</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1406</link>
	<description>Fine-grained object detection (FGOD) is crucial for identifying visually similar sub-categories in remote sensing imagery. However, existing detectors suffer from severe supervision imbalance because static label assignment strategies assign a fixed number of positive samples to all sub-categories and targets. To address this challenge, this paper presents Cumulative Quality-based Dynamic Assignment (CQDA), a fine-grained aware label assignment algorithm that dynamically calculates the optimal positive budget for each instance based on its cumulative alignment quality. Moreover, to further resolve feature-space confusion, this paper introduces two modules: a frequency-decoupled enhancement algorithm to sharpen discriminative features, and an orthogonal classification head to maximize inter-class separability. Integrated into the KFIoU framework, extensive experiments demonstrate that the proposed method consistently achieves performance improvements of 4.2, 15.8, and 35.3 in mAP@0.5 on the fine-grained oriented object detection datasets FAIR1M-v2, MAR20, and ShipRSImageNet, respectively.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1406: Towards Balanced Supervision: Cumulative Quality-Based Dynamic Assignment for Fine-Grained Remote Sensing Object Detection</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1406">doi: 10.3390/rs18091406</a></p>
	<p>Authors:
		Yida Pan
		Haoran Zhu
		Zijuan Chen
		Guangyou Yang
		Wen Yang
		</p>
	<p>Fine-grained object detection (FGOD) is crucial for identifying visually similar sub-categories in remote sensing imagery. However, existing detectors suffer from severe supervision imbalance because static label assignment strategies assign a fixed number of positive samples to all sub-categories and targets. To address this challenge, this paper presents Cumulative Quality-based Dynamic Assignment (CQDA), a fine-grained aware label assignment algorithm that dynamically calculates the optimal positive budget for each instance based on its cumulative alignment quality. Moreover, to further resolve feature-space confusion, this paper introduces two modules: a frequency-decoupled enhancement algorithm to sharpen discriminative features, and an orthogonal classification head to maximize inter-class separability. Integrated into the KFIoU framework, extensive experiments demonstrate that the proposed method consistently achieves performance improvements of 4.2, 15.8, and 35.3 in mAP@0.5 on the fine-grained oriented object detection datasets FAIR1M-v2, MAR20, and ShipRSImageNet, respectively.</p>
	]]></content:encoded>

	<dc:title>Towards Balanced Supervision: Cumulative Quality-Based Dynamic Assignment for Fine-Grained Remote Sensing Object Detection</dc:title>
			<dc:creator>Yida Pan</dc:creator>
			<dc:creator>Haoran Zhu</dc:creator>
			<dc:creator>Zijuan Chen</dc:creator>
			<dc:creator>Guangyou Yang</dc:creator>
			<dc:creator>Wen Yang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091406</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1406</prism:startingPage>
		<prism:doi>10.3390/rs18091406</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1406</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1407">

	<title>Remote Sensing, Vol. 18, Pages 1407: Indoor UAV Localization via Multi-Anchor One-Shot Calibration and Factor Graph Fusion</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1407</link>
	<description>Indoor localization for unmanned aerial vehicles (UAVs) remains challenging in GNSS-denied environments due to the difficulty of position calibration of multiple ultra-wideband (UWB) anchors and the asynchronous fusion of heterogeneous sensors. This paper proposes a multi-sensor fusion localization framework that integrates multi-anchor one-shot calibration with factor graph optimization (FGO). First, Landmark Multidimensional Scaling (LMDS) is used to reconstruct the relative geometry of the anchors and the onboard tag from ranging measurements. Then, rigid Procrustes alignment is performed using a small number of anchors with known coordinates in the East&amp;amp;ndash;North&amp;amp;ndash;Up (ENU) frame to recover the transformation to the ENU frame, thereby enabling efficient position calibration of multiple UWB anchors and UAV pose initialization. Subsequently, a tightly coupled factor graph is constructed by incorporating inertial measurement unit (IMU) pre-integration, UWB ranging, laser rangefinder height measurements, and visual&amp;amp;ndash;inertial odometry (VIO) pose constraints. The resulting nonlinear optimization problem is solved using incremental smoothing, which improves robustness against non-line-of-sight (NLOS) errors and long-term drift. Experimental results on anchor calibration, public datasets, and real-world indoor UAV flights demonstrate that the proposed method improves the accuracy and robustness of indoor UAV localization. In particular, on the real-world rectangle trajectory, FGO-TC reduces the RMSE by approximately 38.8% compared with FGO-LC.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1407: Indoor UAV Localization via Multi-Anchor One-Shot Calibration and Factor Graph Fusion</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1407">doi: 10.3390/rs18091407</a></p>
	<p>Authors:
		Jianmin Zhao
		Zhongliang Deng
		Wenju Su
		Boyang Lou
		Yanxu Liu
		</p>
	<p>Indoor localization for unmanned aerial vehicles (UAVs) remains challenging in GNSS-denied environments due to the difficulty of position calibration of multiple ultra-wideband (UWB) anchors and the asynchronous fusion of heterogeneous sensors. This paper proposes a multi-sensor fusion localization framework that integrates multi-anchor one-shot calibration with factor graph optimization (FGO). First, Landmark Multidimensional Scaling (LMDS) is used to reconstruct the relative geometry of the anchors and the onboard tag from ranging measurements. Then, rigid Procrustes alignment is performed using a small number of anchors with known coordinates in the East&amp;amp;ndash;North&amp;amp;ndash;Up (ENU) frame to recover the transformation to the ENU frame, thereby enabling efficient position calibration of multiple UWB anchors and UAV pose initialization. Subsequently, a tightly coupled factor graph is constructed by incorporating inertial measurement unit (IMU) pre-integration, UWB ranging, laser rangefinder height measurements, and visual&amp;amp;ndash;inertial odometry (VIO) pose constraints. The resulting nonlinear optimization problem is solved using incremental smoothing, which improves robustness against non-line-of-sight (NLOS) errors and long-term drift. Experimental results on anchor calibration, public datasets, and real-world indoor UAV flights demonstrate that the proposed method improves the accuracy and robustness of indoor UAV localization. In particular, on the real-world rectangle trajectory, FGO-TC reduces the RMSE by approximately 38.8% compared with FGO-LC.</p>
	]]></content:encoded>

	<dc:title>Indoor UAV Localization via Multi-Anchor One-Shot Calibration and Factor Graph Fusion</dc:title>
			<dc:creator>Jianmin Zhao</dc:creator>
			<dc:creator>Zhongliang Deng</dc:creator>
			<dc:creator>Wenju Su</dc:creator>
			<dc:creator>Boyang Lou</dc:creator>
			<dc:creator>Yanxu Liu</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091407</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1407</prism:startingPage>
		<prism:doi>10.3390/rs18091407</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1407</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1405">

	<title>Remote Sensing, Vol. 18, Pages 1405: Short-to-Medium Term Ocean Wind Speed Prediction via Sparse Grid Dynamic Spatial Modeling and DAI-LSTM-AT Hybrid Framework</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1405</link>
	<description>This study addresses the critical need for accurate sea wind speed predictions to support ocean wind farm operations, equipment maintenance, and maritime navigation safety. To enhance prediction accuracy for any location within target sea areas, we propose a short-to-medium-term wind speed prediction method that effectively explores spatiotemporal correlations in ocean reanalysis grid data. The method involves collecting and reanalyzing data, as well as spatial processing, to reconstruct the historical wind speed sequence at the target point. Finally, a future wind speed time series is generated using an LSTM network and a Transformer encoder. Test results validated against NOAA buoy data demonstrate the effectiveness of our spatiotemporal prediction model, achieving RMSE values of 1.161 m/s, 1.500 m/s, and 1.854 m/s for 1 h, 6 h, and 12 h predictions, respectively, outperforming comparative methods. The conclusions are threefold: (1) The proposed hybrid model effectively captures spatiotemporal dependencies and achieves more accurate spatiotemporal predictions compared to the benchmark model; (2) taking into account seasonal factors and forecasting time periods, the method proposed in this paper maintains good stability; (3) this framework provides a reliable technical approach for generating operational references in maritime navigation and wind power maintenance, with potential applications in wind farm siting and resource assessment.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1405: Short-to-Medium Term Ocean Wind Speed Prediction via Sparse Grid Dynamic Spatial Modeling and DAI-LSTM-AT Hybrid Framework</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1405">doi: 10.3390/rs18091405</a></p>
	<p>Authors:
		Qiaoying Guo
		Rengyu Chen
		Dibo Dong
		Feiyu Feng
		Qian Sun
		Liqiao Ning
		Xiaojie Xie
		Jinlin Li
		</p>
	<p>This study addresses the critical need for accurate sea wind speed predictions to support ocean wind farm operations, equipment maintenance, and maritime navigation safety. To enhance prediction accuracy for any location within target sea areas, we propose a short-to-medium-term wind speed prediction method that effectively explores spatiotemporal correlations in ocean reanalysis grid data. The method involves collecting and reanalyzing data, as well as spatial processing, to reconstruct the historical wind speed sequence at the target point. Finally, a future wind speed time series is generated using an LSTM network and a Transformer encoder. Test results validated against NOAA buoy data demonstrate the effectiveness of our spatiotemporal prediction model, achieving RMSE values of 1.161 m/s, 1.500 m/s, and 1.854 m/s for 1 h, 6 h, and 12 h predictions, respectively, outperforming comparative methods. The conclusions are threefold: (1) The proposed hybrid model effectively captures spatiotemporal dependencies and achieves more accurate spatiotemporal predictions compared to the benchmark model; (2) taking into account seasonal factors and forecasting time periods, the method proposed in this paper maintains good stability; (3) this framework provides a reliable technical approach for generating operational references in maritime navigation and wind power maintenance, with potential applications in wind farm siting and resource assessment.</p>
	]]></content:encoded>

	<dc:title>Short-to-Medium Term Ocean Wind Speed Prediction via Sparse Grid Dynamic Spatial Modeling and DAI-LSTM-AT Hybrid Framework</dc:title>
			<dc:creator>Qiaoying Guo</dc:creator>
			<dc:creator>Rengyu Chen</dc:creator>
			<dc:creator>Dibo Dong</dc:creator>
			<dc:creator>Feiyu Feng</dc:creator>
			<dc:creator>Qian Sun</dc:creator>
			<dc:creator>Liqiao Ning</dc:creator>
			<dc:creator>Xiaojie Xie</dc:creator>
			<dc:creator>Jinlin Li</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091405</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1405</prism:startingPage>
		<prism:doi>10.3390/rs18091405</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1405</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1403">

	<title>Remote Sensing, Vol. 18, Pages 1403: Fully Automated Wind Site Assessment in Complex Terrain Using Satellite Data and Global Circulation Models</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1403</link>
	<description>A globally applicable and fully automated simulation method based on satellite-derived Earth Observation (EO) data and global circulation models was developed and validated. Inputs to the simulation are DSM/DTM layers, surface roughness layer, forest canopy layer, and single-level point data from the European Centre for Medium-Range Weather Forecasts fifth-generation ECMWF reanalysis (ECMWF ERA5, a global circulation model produced by the Copernicus Climate Change Service (C3S)). High-resolution roughness length maps are produced by deep learning from optical satellite data. Velocity fields are predicted by fluid dynamics simulations in OpenFOAM using the IDDES turbulence model, a 3D resolved tree canopy implemented as isotropic momentum sinks, and a corrector step based on sub-grid-scale dynamic downscaling of ERA5 data. No calibration data from wind measurements close to the target are necessary to achieve results accurate enough for site assessments and wind park planning. The presented method is suitable for the prediction of average wind speeds and average power densities in complex terrain with high ruggedness indices for WEC (wind energy converter) installations closer to the ground and at hub heights of typical large-scale WECs.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1403: Fully Automated Wind Site Assessment in Complex Terrain Using Satellite Data and Global Circulation Models</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1403">doi: 10.3390/rs18091403</a></p>
	<p>Authors:
		Andras Horvath
		Karlheinz Gutjahr
		Christian Kuttner
		Katharina Hofer-Schmitz
		Roland Perko
		</p>
	<p>A globally applicable and fully automated simulation method based on satellite-derived Earth Observation (EO) data and global circulation models was developed and validated. Inputs to the simulation are DSM/DTM layers, surface roughness layer, forest canopy layer, and single-level point data from the European Centre for Medium-Range Weather Forecasts fifth-generation ECMWF reanalysis (ECMWF ERA5, a global circulation model produced by the Copernicus Climate Change Service (C3S)). High-resolution roughness length maps are produced by deep learning from optical satellite data. Velocity fields are predicted by fluid dynamics simulations in OpenFOAM using the IDDES turbulence model, a 3D resolved tree canopy implemented as isotropic momentum sinks, and a corrector step based on sub-grid-scale dynamic downscaling of ERA5 data. No calibration data from wind measurements close to the target are necessary to achieve results accurate enough for site assessments and wind park planning. The presented method is suitable for the prediction of average wind speeds and average power densities in complex terrain with high ruggedness indices for WEC (wind energy converter) installations closer to the ground and at hub heights of typical large-scale WECs.</p>
	]]></content:encoded>

	<dc:title>Fully Automated Wind Site Assessment in Complex Terrain Using Satellite Data and Global Circulation Models</dc:title>
			<dc:creator>Andras Horvath</dc:creator>
			<dc:creator>Karlheinz Gutjahr</dc:creator>
			<dc:creator>Christian Kuttner</dc:creator>
			<dc:creator>Katharina Hofer-Schmitz</dc:creator>
			<dc:creator>Roland Perko</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091403</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1403</prism:startingPage>
		<prism:doi>10.3390/rs18091403</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1403</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1400">

	<title>Remote Sensing, Vol. 18, Pages 1400: A 3D Gaussian Splatting Method with Deterministic Structure-Sensitive Adaptive Density Control for UAV Orthophoto Generation</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1400</link>
	<description>Unmanned Aerial Vehicle (UAV) orthophoto generation in complex environments remains challenging because weak textures, reflective surfaces, occlusions, and large scene extents can cause incomplete reconstruction, ghosting, and seam artifacts. Although 3D Gaussian Splatting (3DGS) offers an efficient explicit scene representation, its use in large-scale UAV orthophoto generation is limited by high memory consumption, unstable densification, and insufficient support for mapping-oriented orthographic rendering. This paper proposes a single-GPU 3DGS framework for UAV orthophoto generation by integrating adaptive spatial block partitioning, deterministic structure-sensitive adaptive density control, and core&amp;amp;ndash;buffer tiled orthographic rendering with weighted blending. The proposed framework decomposes large scenes into resource-bounded subregions, guides Gaussian densification using fixed multi-view neighborhoods and edge-enhanced dynamic consistency, and generates large-format orthophotos with reduced boundary and seam artifacts. Experiments on MatrixCity-S and multiple UAV photogrammetric datasets show that the method achieves competitive reconstruction quality and improved resource efficiency. On MatrixCity-S, it reaches 29.01 dB PSNR and 0.901 SSIM, while completing training in 1 h 49 min on a single NVIDIA RTX 3090 GPU. Compared with BlockGS, peak VRAM consumption is reduced by more than 38% across datasets. Under geo-aligned comparison conditions, line-measurement comparisons with MetaShape and Pix4DMapper yield RMSE values of 0.099 m and 0.087 m, respectively. These results demonstrate the potential of the proposed framework for memory-efficient 3DGS-based UAV orthophoto generation under constrained hardware resources, while further control-point-based validation is still needed for rigorous surveying-grade applications.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1400: A 3D Gaussian Splatting Method with Deterministic Structure-Sensitive Adaptive Density Control for UAV Orthophoto Generation</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1400">doi: 10.3390/rs18091400</a></p>
	<p>Authors:
		Ke Yan
		Hui Wang
		Zhuxin Li
		Yuting Wang
		Shuo Li
		Liyong Wang
		</p>
	<p>Unmanned Aerial Vehicle (UAV) orthophoto generation in complex environments remains challenging because weak textures, reflective surfaces, occlusions, and large scene extents can cause incomplete reconstruction, ghosting, and seam artifacts. Although 3D Gaussian Splatting (3DGS) offers an efficient explicit scene representation, its use in large-scale UAV orthophoto generation is limited by high memory consumption, unstable densification, and insufficient support for mapping-oriented orthographic rendering. This paper proposes a single-GPU 3DGS framework for UAV orthophoto generation by integrating adaptive spatial block partitioning, deterministic structure-sensitive adaptive density control, and core&amp;amp;ndash;buffer tiled orthographic rendering with weighted blending. The proposed framework decomposes large scenes into resource-bounded subregions, guides Gaussian densification using fixed multi-view neighborhoods and edge-enhanced dynamic consistency, and generates large-format orthophotos with reduced boundary and seam artifacts. Experiments on MatrixCity-S and multiple UAV photogrammetric datasets show that the method achieves competitive reconstruction quality and improved resource efficiency. On MatrixCity-S, it reaches 29.01 dB PSNR and 0.901 SSIM, while completing training in 1 h 49 min on a single NVIDIA RTX 3090 GPU. Compared with BlockGS, peak VRAM consumption is reduced by more than 38% across datasets. Under geo-aligned comparison conditions, line-measurement comparisons with MetaShape and Pix4DMapper yield RMSE values of 0.099 m and 0.087 m, respectively. These results demonstrate the potential of the proposed framework for memory-efficient 3DGS-based UAV orthophoto generation under constrained hardware resources, while further control-point-based validation is still needed for rigorous surveying-grade applications.</p>
	]]></content:encoded>

	<dc:title>A 3D Gaussian Splatting Method with Deterministic Structure-Sensitive Adaptive Density Control for UAV Orthophoto Generation</dc:title>
			<dc:creator>Ke Yan</dc:creator>
			<dc:creator>Hui Wang</dc:creator>
			<dc:creator>Zhuxin Li</dc:creator>
			<dc:creator>Yuting Wang</dc:creator>
			<dc:creator>Shuo Li</dc:creator>
			<dc:creator>Liyong Wang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091400</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1400</prism:startingPage>
		<prism:doi>10.3390/rs18091400</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1400</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1404">

	<title>Remote Sensing, Vol. 18, Pages 1404: A Sequential Cooperative Inversion Framework of DC Resistivity and Frequency-Domain Electromagnetic Data to Enhance Subsurface Imaging in Geoscience and Engineering</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1404</link>
	<description>The characterisation of subsurface electrical resistivity is a fundamental requirement for geoscientific and engineering applications, including groundwater exploration and structural assessments. This study examines the sequential cooperative inversion of direct current resistivity and frequency-domain electromagnetic data and compares the results to the inverse models obtained from separate (individual) inversions of the datasets. The proposed cooperative framework is applied to both synthetic datasets generated through forward modelling and field data acquired at the Morgenzon Farm site, South Africa, to delineate a dolerite dyke of hydrogeological significance. Individual inversions identified distinct features but exhibit limitations: direct current resistivity highlights a two-layered medium with minor anomalies, while frequency-domain electromagnetic data identify a resistive anomaly. In contrast, the sequential cooperative inversion approach, which uses the output of one dataset to constrain the other, provides improved subsurface imaging results, reduces ambiguity, and enables the integration of complementary information from both methods. The results indicate that resistivity models constrained by inverse frequency-domain electromagnetic data provide improved representation of subsurface geometry and amplitude compared to individual approaches. These findings support the use of a non-destructive testing approach for improved subsurface imaging, facilitating better-informed decision-making in infrastructure projects and resource management</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1404: A Sequential Cooperative Inversion Framework of DC Resistivity and Frequency-Domain Electromagnetic Data to Enhance Subsurface Imaging in Geoscience and Engineering</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1404">doi: 10.3390/rs18091404</a></p>
	<p>Authors:
		Ramin Varfinezhad
		Saeed Parnow
		Francois Daniel Fourie
		Fabio Tosti
		</p>
	<p>The characterisation of subsurface electrical resistivity is a fundamental requirement for geoscientific and engineering applications, including groundwater exploration and structural assessments. This study examines the sequential cooperative inversion of direct current resistivity and frequency-domain electromagnetic data and compares the results to the inverse models obtained from separate (individual) inversions of the datasets. The proposed cooperative framework is applied to both synthetic datasets generated through forward modelling and field data acquired at the Morgenzon Farm site, South Africa, to delineate a dolerite dyke of hydrogeological significance. Individual inversions identified distinct features but exhibit limitations: direct current resistivity highlights a two-layered medium with minor anomalies, while frequency-domain electromagnetic data identify a resistive anomaly. In contrast, the sequential cooperative inversion approach, which uses the output of one dataset to constrain the other, provides improved subsurface imaging results, reduces ambiguity, and enables the integration of complementary information from both methods. The results indicate that resistivity models constrained by inverse frequency-domain electromagnetic data provide improved representation of subsurface geometry and amplitude compared to individual approaches. These findings support the use of a non-destructive testing approach for improved subsurface imaging, facilitating better-informed decision-making in infrastructure projects and resource management</p>
	]]></content:encoded>

	<dc:title>A Sequential Cooperative Inversion Framework of DC Resistivity and Frequency-Domain Electromagnetic Data to Enhance Subsurface Imaging in Geoscience and Engineering</dc:title>
			<dc:creator>Ramin Varfinezhad</dc:creator>
			<dc:creator>Saeed Parnow</dc:creator>
			<dc:creator>Francois Daniel Fourie</dc:creator>
			<dc:creator>Fabio Tosti</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091404</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1404</prism:startingPage>
		<prism:doi>10.3390/rs18091404</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1404</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1402">

	<title>Remote Sensing, Vol. 18, Pages 1402: From Single-Look to Multi-Temporal SAR Despeckling: A Latent-Space Guided Transfer Learning Approach</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1402</link>
	<description>Synthetic Aperture Radar (SAR) images are affected by speckle noise, which limits their application in fine object interpretation and quantitative analysis. Recent deep learning-based single-image SAR despeckling methods have made significant progress in spatial structure modeling but struggle to exploit temporal redundancy in multi-temporal data. Existing multi-temporal despeckling methods usually rely on complex spatiotemporal network structures, which are prone to overfitting or excessive smoothing of details when training samples are limited. To address these challenges, this paper proposes a latent-space-guided multi-temporal SAR despeckling method from the perspective of transfer learning and representation alignment, achieving effective knowledge transfer from single-image SAR despeckling to multi-temporal despeckling tasks. The method treats the single-image SAR despeckling task as a knowledge source domain, using stable latent space representations learned from the pre-trained single-image despeckling model as prior constraints. A latent space regularization mechanism is introduced during the training of the multi-temporal despeckling model, thereby establishing an explicit representation bridge between the 2D spatial model and the 3D spatiotemporal model. With this strategy, the multi-temporal model inherits the structural perception capability of the single-image model under limited training samples, improving speckle suppression while effectively maintaining image detail and structural consistency. Additionally, a pure convolutional network architecture is employed to support variable-length multi-temporal sequence input, enhancing the method&amp;amp;rsquo;s adaptability under different temporal sampling conditions.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1402: From Single-Look to Multi-Temporal SAR Despeckling: A Latent-Space Guided Transfer Learning Approach</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1402">doi: 10.3390/rs18091402</a></p>
	<p>Authors:
		Baojing Pan
		Ze Yu
		Xianxun Yao
		Zhiqiang Tian
		Wei Ren
		</p>
	<p>Synthetic Aperture Radar (SAR) images are affected by speckle noise, which limits their application in fine object interpretation and quantitative analysis. Recent deep learning-based single-image SAR despeckling methods have made significant progress in spatial structure modeling but struggle to exploit temporal redundancy in multi-temporal data. Existing multi-temporal despeckling methods usually rely on complex spatiotemporal network structures, which are prone to overfitting or excessive smoothing of details when training samples are limited. To address these challenges, this paper proposes a latent-space-guided multi-temporal SAR despeckling method from the perspective of transfer learning and representation alignment, achieving effective knowledge transfer from single-image SAR despeckling to multi-temporal despeckling tasks. The method treats the single-image SAR despeckling task as a knowledge source domain, using stable latent space representations learned from the pre-trained single-image despeckling model as prior constraints. A latent space regularization mechanism is introduced during the training of the multi-temporal despeckling model, thereby establishing an explicit representation bridge between the 2D spatial model and the 3D spatiotemporal model. With this strategy, the multi-temporal model inherits the structural perception capability of the single-image model under limited training samples, improving speckle suppression while effectively maintaining image detail and structural consistency. Additionally, a pure convolutional network architecture is employed to support variable-length multi-temporal sequence input, enhancing the method&amp;amp;rsquo;s adaptability under different temporal sampling conditions.</p>
	]]></content:encoded>

	<dc:title>From Single-Look to Multi-Temporal SAR Despeckling: A Latent-Space Guided Transfer Learning Approach</dc:title>
			<dc:creator>Baojing Pan</dc:creator>
			<dc:creator>Ze Yu</dc:creator>
			<dc:creator>Xianxun Yao</dc:creator>
			<dc:creator>Zhiqiang Tian</dc:creator>
			<dc:creator>Wei Ren</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091402</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1402</prism:startingPage>
		<prism:doi>10.3390/rs18091402</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1402</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1401">

	<title>Remote Sensing, Vol. 18, Pages 1401: Cross-Sections and Dimensions: A LiDAR-Based GIS Tool for Bankfull Channel Mapping</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1401</link>
	<description>Accurate and reproducible delineation of stream bankfull geometry remains a persistent challenge in environmental planning. To address this gap, we developed the Cross-Sections and Dimensions Tool, a semi-automated, slope-based method for extracting stream cross-sections and estimating bankfull width, elevation and depth using high-resolution elevation data. The tool applies a configurable slope threshold to identify bank edges, generates perpendicular cross-sections from a stream centreline, and stores all outputs in a structured geodatabase to ensure transparency and reproducibility. Validation against manually delineated bankfull polygons across 191 km of stream length in Greater Melbourne, Australia, demonstrated strong spatial agreement, with an average F1 score (a measure of prediction-observation overlap) of 74% and a mean absolute error of 0.64 m in bankfull elevation. The tool was most reliable in larger streams (Strahler order 5 and above) with low to moderate vegetation canopy cover (&amp;amp;lt;80%). We also investigated the practical visibility limits of small or indistinct channels typically encountered by human mappers and verified that the tool did not produce unrealistic channel delineations. This approach advances geomorphic feature extraction by grounding bankfull delineation in deterministic geometry rather than hydrological recurrence or data-driven modelling. In practice, it enables scalable, transparent, and repeatable analysis of stream morphology for ecological assessment, infrastructure planning, and waterway management.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1401: Cross-Sections and Dimensions: A LiDAR-Based GIS Tool for Bankfull Channel Mapping</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1401">doi: 10.3390/rs18091401</a></p>
	<p>Authors:
		Joshphar Kunapo
		Kathryn Russell
		</p>
	<p>Accurate and reproducible delineation of stream bankfull geometry remains a persistent challenge in environmental planning. To address this gap, we developed the Cross-Sections and Dimensions Tool, a semi-automated, slope-based method for extracting stream cross-sections and estimating bankfull width, elevation and depth using high-resolution elevation data. The tool applies a configurable slope threshold to identify bank edges, generates perpendicular cross-sections from a stream centreline, and stores all outputs in a structured geodatabase to ensure transparency and reproducibility. Validation against manually delineated bankfull polygons across 191 km of stream length in Greater Melbourne, Australia, demonstrated strong spatial agreement, with an average F1 score (a measure of prediction-observation overlap) of 74% and a mean absolute error of 0.64 m in bankfull elevation. The tool was most reliable in larger streams (Strahler order 5 and above) with low to moderate vegetation canopy cover (&amp;amp;lt;80%). We also investigated the practical visibility limits of small or indistinct channels typically encountered by human mappers and verified that the tool did not produce unrealistic channel delineations. This approach advances geomorphic feature extraction by grounding bankfull delineation in deterministic geometry rather than hydrological recurrence or data-driven modelling. In practice, it enables scalable, transparent, and repeatable analysis of stream morphology for ecological assessment, infrastructure planning, and waterway management.</p>
	]]></content:encoded>

	<dc:title>Cross-Sections and Dimensions: A LiDAR-Based GIS Tool for Bankfull Channel Mapping</dc:title>
			<dc:creator>Joshphar Kunapo</dc:creator>
			<dc:creator>Kathryn Russell</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091401</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1401</prism:startingPage>
		<prism:doi>10.3390/rs18091401</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1401</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1399">

	<title>Remote Sensing, Vol. 18, Pages 1399: Simulating Net Ecosystem Exchange of China&amp;rsquo;s Three Staple Food Crops and Their Responses to Heatwaves</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1399</link>
	<description>Agricultural carbon sequestration is increasingly threatened by heatwaves, yet accurately simulating crop-explicit net ecosystem exchange (NEE) under heat stress remains challenging. In this study, we used a WRF&amp;amp;ndash;VPRM&amp;amp;ndash;CROP model to simulate the spatiotemporal dynamics of NEE for rice, wheat, and maize in China, quantifying the impacts of the record-breaking 2022 heatwave. Model validation against multi-source observations confirmed its reliability, with correlation coefficients (r) reaching 0.49&amp;amp;ndash;0.85 (p &amp;amp;lt; 0.001). Results show that the cumulative summer NEE of the study region reaches 620.32 Tg C, with contributions of 274.94 Tg C from rice and 345.09 Tg C from maize, while wheat contributes 157.83 Tg C during spring. The 2022 heatwave led to substantial reductions in crop NEE, with decreases of 79.70 Tg C for rice, 33.13 Tg C for wheat, and 100.74 Tg C for maize. Total summer NEE decreased by 171.46 Tg C, with an annual reduction of 213.57 Tg C. Spatially, the most pronounced declines in NEE are concentrated in East China and North China, whereas slight increases are observed in western Heilongjiang (maize-growing areas) and parts of eastern coastal wheat-growing regions. At the provincial scale, the most severe yield losses occur in Henan (29.65 Mt) and Shandong (14.50 Mt). This study quantifies the impacts of extreme heatwaves on carbon exchange in China&amp;amp;rsquo;s major staple crop systems, providing a scientific basis for regional agricultural climate adaptation and disaster risk mitigation.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1399: Simulating Net Ecosystem Exchange of China&amp;rsquo;s Three Staple Food Crops and Their Responses to Heatwaves</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1399">doi: 10.3390/rs18091399</a></p>
	<p>Authors:
		Yanzi Sun
		Shuyu Zhao
		Jiayao Yu
		Mengkun Zhu
		Weiwei Liu
		Lihua Wang
		Gang Yang
		Tian Feng
		</p>
	<p>Agricultural carbon sequestration is increasingly threatened by heatwaves, yet accurately simulating crop-explicit net ecosystem exchange (NEE) under heat stress remains challenging. In this study, we used a WRF&amp;amp;ndash;VPRM&amp;amp;ndash;CROP model to simulate the spatiotemporal dynamics of NEE for rice, wheat, and maize in China, quantifying the impacts of the record-breaking 2022 heatwave. Model validation against multi-source observations confirmed its reliability, with correlation coefficients (r) reaching 0.49&amp;amp;ndash;0.85 (p &amp;amp;lt; 0.001). Results show that the cumulative summer NEE of the study region reaches 620.32 Tg C, with contributions of 274.94 Tg C from rice and 345.09 Tg C from maize, while wheat contributes 157.83 Tg C during spring. The 2022 heatwave led to substantial reductions in crop NEE, with decreases of 79.70 Tg C for rice, 33.13 Tg C for wheat, and 100.74 Tg C for maize. Total summer NEE decreased by 171.46 Tg C, with an annual reduction of 213.57 Tg C. Spatially, the most pronounced declines in NEE are concentrated in East China and North China, whereas slight increases are observed in western Heilongjiang (maize-growing areas) and parts of eastern coastal wheat-growing regions. At the provincial scale, the most severe yield losses occur in Henan (29.65 Mt) and Shandong (14.50 Mt). This study quantifies the impacts of extreme heatwaves on carbon exchange in China&amp;amp;rsquo;s major staple crop systems, providing a scientific basis for regional agricultural climate adaptation and disaster risk mitigation.</p>
	]]></content:encoded>

	<dc:title>Simulating Net Ecosystem Exchange of China&amp;amp;rsquo;s Three Staple Food Crops and Their Responses to Heatwaves</dc:title>
			<dc:creator>Yanzi Sun</dc:creator>
			<dc:creator>Shuyu Zhao</dc:creator>
			<dc:creator>Jiayao Yu</dc:creator>
			<dc:creator>Mengkun Zhu</dc:creator>
			<dc:creator>Weiwei Liu</dc:creator>
			<dc:creator>Lihua Wang</dc:creator>
			<dc:creator>Gang Yang</dc:creator>
			<dc:creator>Tian Feng</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091399</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1399</prism:startingPage>
		<prism:doi>10.3390/rs18091399</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1399</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1398">

	<title>Remote Sensing, Vol. 18, Pages 1398: A Spaceborne Tomographic SAR Reconstruction Method Based on Building Structural Characteristics</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1398</link>
	<description>The acquisition of spaceborne tomographic data usually requires a longer period of time due to the satellite&amp;amp;rsquo;s long revisit period. To address this issue, it is possible to leverage the similarity of neighboring pixels in order to perform tomographic reconstruction of building targets in urban areas. As a result, the insufficient number of samples can be approximately substituted by pixels with similar scattering characteristics. However, the current utilization of building structures is often limited to horizontal characteristics such as contour lines (CL); in addition, extraction methods either rely on external data as prior information or are constrained by the need to fit operations, which limits the shape of the contour lines. This paper proposes a spaceborne tomographic reconstruction method based on building characteristics, starting from data and fully utilizing the horizontal and vertical characteristics of buildings for reconstruction. First, interferometric information is used to assist in tomographic processing and a strategy combining multi-point growth with multi-level fusion is employed to extract contour lines. Additionally, the vertical characteristics of buildings are established to provide constraints on the solution space for tomographic processing. The three-dimensional reconstruction of isolated and vertical buildings is then achieved by combining signal elimination techniques. By more fully exploiting the structural characteristics of buildings, the proposed method is capable of recovering building structures even with a limited number of samples. The effectiveness of the proposed method is validated through simulated data and TerraSAR-X data.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1398: A Spaceborne Tomographic SAR Reconstruction Method Based on Building Structural Characteristics</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1398">doi: 10.3390/rs18091398</a></p>
	<p>Authors:
		Sisi Dong
		Weidong Yu
		Jili Wang
		Yulun Wu
		Zhichao Wang
		</p>
	<p>The acquisition of spaceborne tomographic data usually requires a longer period of time due to the satellite&amp;amp;rsquo;s long revisit period. To address this issue, it is possible to leverage the similarity of neighboring pixels in order to perform tomographic reconstruction of building targets in urban areas. As a result, the insufficient number of samples can be approximately substituted by pixels with similar scattering characteristics. However, the current utilization of building structures is often limited to horizontal characteristics such as contour lines (CL); in addition, extraction methods either rely on external data as prior information or are constrained by the need to fit operations, which limits the shape of the contour lines. This paper proposes a spaceborne tomographic reconstruction method based on building characteristics, starting from data and fully utilizing the horizontal and vertical characteristics of buildings for reconstruction. First, interferometric information is used to assist in tomographic processing and a strategy combining multi-point growth with multi-level fusion is employed to extract contour lines. Additionally, the vertical characteristics of buildings are established to provide constraints on the solution space for tomographic processing. The three-dimensional reconstruction of isolated and vertical buildings is then achieved by combining signal elimination techniques. By more fully exploiting the structural characteristics of buildings, the proposed method is capable of recovering building structures even with a limited number of samples. The effectiveness of the proposed method is validated through simulated data and TerraSAR-X data.</p>
	]]></content:encoded>

	<dc:title>A Spaceborne Tomographic SAR Reconstruction Method Based on Building Structural Characteristics</dc:title>
			<dc:creator>Sisi Dong</dc:creator>
			<dc:creator>Weidong Yu</dc:creator>
			<dc:creator>Jili Wang</dc:creator>
			<dc:creator>Yulun Wu</dc:creator>
			<dc:creator>Zhichao Wang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091398</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1398</prism:startingPage>
		<prism:doi>10.3390/rs18091398</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1398</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1397">

	<title>Remote Sensing, Vol. 18, Pages 1397: Semantic Density-Guided ResNet for Dense Infrared Small Target Detection</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1397</link>
	<description>Dense infrared small target detection (ISTD) in long-range remote sensing is critical for multi-target surveillance, yet existing benchmarks mostly contain only sparsely distributed targets and rarely reflect dense scenes. To address this limitation, we construct a new dense satellite ISTD dataset, IR-SatDense, by compositing small targets onto real satellite infrared backgrounds and partitioning it into subsets using the Average Minimum Inter-Target Distance (AMID) to explicitly control target density. By visualizing multi-stage backbone features, we observe that in dense scenes the deepest stage naturally forms compact, high-response target clusters in the semantic feature maps, while low- and middle-level features remain heavily cluttered. This motivates us to treat high-level semantic density as a global prior to guide low-level feature enhancement. Therefore, we propose Semantic Density-Guided ResNet (SDG-ResNet), a plug-in backbone that attaches a lightweight semantic density head to the deepest stage and injects the predicted density map into intermediate layers through Semantic Density-Guided Refine (SDGR) blocks with residual spatial gating. Integrated into representative transformer-based detectors, including Deformable DETR, DETA, and DINO, SDG-ResNet consistently improves the probability of detection (PD) at comparable false alarm (FA) levels on IR-SatDense while maintaining competitive performance on the sparse dataset IRSTD-1K.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1397: Semantic Density-Guided ResNet for Dense Infrared Small Target Detection</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1397">doi: 10.3390/rs18091397</a></p>
	<p>Authors:
		Xin Zhang
		Wei An
		Xinyi Ying
		Ruojing Li
		Nuo Chen
		Boyang Li
		Chao Xiao
		Miao Li
		</p>
	<p>Dense infrared small target detection (ISTD) in long-range remote sensing is critical for multi-target surveillance, yet existing benchmarks mostly contain only sparsely distributed targets and rarely reflect dense scenes. To address this limitation, we construct a new dense satellite ISTD dataset, IR-SatDense, by compositing small targets onto real satellite infrared backgrounds and partitioning it into subsets using the Average Minimum Inter-Target Distance (AMID) to explicitly control target density. By visualizing multi-stage backbone features, we observe that in dense scenes the deepest stage naturally forms compact, high-response target clusters in the semantic feature maps, while low- and middle-level features remain heavily cluttered. This motivates us to treat high-level semantic density as a global prior to guide low-level feature enhancement. Therefore, we propose Semantic Density-Guided ResNet (SDG-ResNet), a plug-in backbone that attaches a lightweight semantic density head to the deepest stage and injects the predicted density map into intermediate layers through Semantic Density-Guided Refine (SDGR) blocks with residual spatial gating. Integrated into representative transformer-based detectors, including Deformable DETR, DETA, and DINO, SDG-ResNet consistently improves the probability of detection (PD) at comparable false alarm (FA) levels on IR-SatDense while maintaining competitive performance on the sparse dataset IRSTD-1K.</p>
	]]></content:encoded>

	<dc:title>Semantic Density-Guided ResNet for Dense Infrared Small Target Detection</dc:title>
			<dc:creator>Xin Zhang</dc:creator>
			<dc:creator>Wei An</dc:creator>
			<dc:creator>Xinyi Ying</dc:creator>
			<dc:creator>Ruojing Li</dc:creator>
			<dc:creator>Nuo Chen</dc:creator>
			<dc:creator>Boyang Li</dc:creator>
			<dc:creator>Chao Xiao</dc:creator>
			<dc:creator>Miao Li</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091397</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1397</prism:startingPage>
		<prism:doi>10.3390/rs18091397</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1397</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1396">

	<title>Remote Sensing, Vol. 18, Pages 1396: Multi-Frequency GNSS-IR Water-Level Estimation Using NMEA Observations from Low-Cost GNSS Receivers</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1396</link>
	<description>The high-precision, continuous monitoring of the surface water level is of great importance for water resource management and the conservation of ecological systems. This study proposes a GNSS-IR-based water-level estimation method using NMEA observations collected from low-cost GNSS receivers. First, the NMEA-recorded satellite elevation angle, azimuth angle, and signal-to-noise ratio (SNR) are processed using time-series characteristics for improving the resolution and applicability of these GNSS observations. Then, the multi-frequency GNSS signal-based reflector height inversion models are developed by making use of the Lomb&amp;amp;ndash;Scargle periodogram method. Finally, the Velocity Pausing Particle Swarm Optimization (VPPSO) algorithm is employed to calculate the reflector height estimation and thus the water level. Two experimental data sets collected in two different environments were used to test the proposed method. The experimental results show that the root mean square error (RMSE) of the water-level estimation error is less than 6 cm for the proposed method when the in situ ones are in the range of 196.4 cm to 296.1 cm. This study provides a theoretical and technical foundation for the development of the low-cost GNSS-IR water-level measuring instrument.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1396: Multi-Frequency GNSS-IR Water-Level Estimation Using NMEA Observations from Low-Cost GNSS Receivers</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1396">doi: 10.3390/rs18091396</a></p>
	<p>Authors:
		Yangkai Gao
		Tianhe Xu
		Yunwei Li
		Hai Guo
		</p>
	<p>The high-precision, continuous monitoring of the surface water level is of great importance for water resource management and the conservation of ecological systems. This study proposes a GNSS-IR-based water-level estimation method using NMEA observations collected from low-cost GNSS receivers. First, the NMEA-recorded satellite elevation angle, azimuth angle, and signal-to-noise ratio (SNR) are processed using time-series characteristics for improving the resolution and applicability of these GNSS observations. Then, the multi-frequency GNSS signal-based reflector height inversion models are developed by making use of the Lomb&amp;amp;ndash;Scargle periodogram method. Finally, the Velocity Pausing Particle Swarm Optimization (VPPSO) algorithm is employed to calculate the reflector height estimation and thus the water level. Two experimental data sets collected in two different environments were used to test the proposed method. The experimental results show that the root mean square error (RMSE) of the water-level estimation error is less than 6 cm for the proposed method when the in situ ones are in the range of 196.4 cm to 296.1 cm. This study provides a theoretical and technical foundation for the development of the low-cost GNSS-IR water-level measuring instrument.</p>
	]]></content:encoded>

	<dc:title>Multi-Frequency GNSS-IR Water-Level Estimation Using NMEA Observations from Low-Cost GNSS Receivers</dc:title>
			<dc:creator>Yangkai Gao</dc:creator>
			<dc:creator>Tianhe Xu</dc:creator>
			<dc:creator>Yunwei Li</dc:creator>
			<dc:creator>Hai Guo</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091396</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1396</prism:startingPage>
		<prism:doi>10.3390/rs18091396</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1396</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1395">

	<title>Remote Sensing, Vol. 18, Pages 1395: Deciphering the Seasonal Thermal Environments in Kunming&amp;rsquo;s Central Urban Area Using LST and Interpretable Geo-Machine Learning</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1395</link>
	<description>Rapid urbanization and complex topography complicate Urban Heat Island (UHI) spatio-temporal dynamics. Traditional models and coarse-resolution imagery often fail to capture fine-scale, spatially non-stationary seasonal driving mechanisms. This study investigates the multi-dimensional drivers of surface thermal dynamics in Kunming, a typical low-latitude plateau city, using seasonal median LST composite (2018&amp;amp;ndash;2025). Integrating eXtreme Gradient Boosting (XGBoost) with eXplainable Artificial Intelligence (XAI) models decoupled the nonlinear impacts of these drivers. Results reveal a seasonal thermal dichotomy: Summer exhibits the most intense UHI effect with extreme peak temperatures, while Spring presents an anomaly where natural and vegetated Local Climate Zones (LCZs) show pronounced warming. SHapley Additive exPlanations (SHAP) analysis identified a seasonal rotation: anthropogenic and structural factors dominate Summer and Autumn warming, whereas natural and topographic regulators govern Spring and Winter. GeoShapley deconstruction demonstrated strong spatial non-stationarity. Building-density warming is amplified in poorly ventilated urban cores, and fragmented vegetation&amp;amp;rsquo;s cooling is offset by anthropogenic heat during peak summer. This study provides new insights into the seasonal drivers of urban thermal environments in plateau cities.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1395: Deciphering the Seasonal Thermal Environments in Kunming&amp;rsquo;s Central Urban Area Using LST and Interpretable Geo-Machine Learning</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1395">doi: 10.3390/rs18091395</a></p>
	<p>Authors:
		Jiangqin Chao
		Yingyun Li
		Jianyu Liu
		Jing Fan
		Yinghui Zhou
		Maofen Li
		Shiguang Xu
		</p>
	<p>Rapid urbanization and complex topography complicate Urban Heat Island (UHI) spatio-temporal dynamics. Traditional models and coarse-resolution imagery often fail to capture fine-scale, spatially non-stationary seasonal driving mechanisms. This study investigates the multi-dimensional drivers of surface thermal dynamics in Kunming, a typical low-latitude plateau city, using seasonal median LST composite (2018&amp;amp;ndash;2025). Integrating eXtreme Gradient Boosting (XGBoost) with eXplainable Artificial Intelligence (XAI) models decoupled the nonlinear impacts of these drivers. Results reveal a seasonal thermal dichotomy: Summer exhibits the most intense UHI effect with extreme peak temperatures, while Spring presents an anomaly where natural and vegetated Local Climate Zones (LCZs) show pronounced warming. SHapley Additive exPlanations (SHAP) analysis identified a seasonal rotation: anthropogenic and structural factors dominate Summer and Autumn warming, whereas natural and topographic regulators govern Spring and Winter. GeoShapley deconstruction demonstrated strong spatial non-stationarity. Building-density warming is amplified in poorly ventilated urban cores, and fragmented vegetation&amp;amp;rsquo;s cooling is offset by anthropogenic heat during peak summer. This study provides new insights into the seasonal drivers of urban thermal environments in plateau cities.</p>
	]]></content:encoded>

	<dc:title>Deciphering the Seasonal Thermal Environments in Kunming&amp;amp;rsquo;s Central Urban Area Using LST and Interpretable Geo-Machine Learning</dc:title>
			<dc:creator>Jiangqin Chao</dc:creator>
			<dc:creator>Yingyun Li</dc:creator>
			<dc:creator>Jianyu Liu</dc:creator>
			<dc:creator>Jing Fan</dc:creator>
			<dc:creator>Yinghui Zhou</dc:creator>
			<dc:creator>Maofen Li</dc:creator>
			<dc:creator>Shiguang Xu</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091395</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1395</prism:startingPage>
		<prism:doi>10.3390/rs18091395</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1395</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1394">

	<title>Remote Sensing, Vol. 18, Pages 1394: A Small Object Detection Transformer for UAV Remote Sensing Imagery via Multi-Scale Perception and Cross-Spatial-Frequency Domain Fusion</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1394</link>
	<description>Small object detection in UAV remote sensing imagery has long faced significant challenges. Existing Transformer-based detectors still suffer from feature degradation and insufficient multi-scale information fusion when handling small objects with sparse pixels and complex backgrounds. To address this, we propose MSF-DETR, a Transformer-based detector with multi-scale perception and cross-spatial-frequency domain fusion. Specifically, we design a multi-scale perception attention feature extraction network that integrates a Poly Kernel Inception module with a bidirectional contextual anchor attention mechanism via a dual-pathway fusion block, enabling simultaneous capture of multi-granularity features and long-range semantic dependencies. We further develop a feature alignment and cross-spatial-frequency enhancement pyramid that enriches shallow-layer spatial details through feature reorganization and leverages a spatial-frequency dual-domain collaborative strategy to capture both local textures and global spectral dependencies. Cross-scale dynamic intensity modulation combined with decoupled lightweight downsampling further effectively suppresses semantic noise, corrects feature misalignment, and preserves critical edge details. Finally, a Shape-NWD loss is devised to incorporate geometric and scale constraints, effectively alleviating the positional sensitivity of IoU for small targets. Extensive experiments on three public benchmarks demonstrate the superior performance of MSF-DETR; notably, on the VisDrone dataset, it achieves improvements of 7.45% and 8.71% in mAP50 and mAP50:95 over the baseline.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1394: A Small Object Detection Transformer for UAV Remote Sensing Imagery via Multi-Scale Perception and Cross-Spatial-Frequency Domain Fusion</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1394">doi: 10.3390/rs18091394</a></p>
	<p>Authors:
		Chenglong Shi
		Hui Wang
		Xiaolin Fu
		Pingping Liu
		Hongchang Ke
		</p>
	<p>Small object detection in UAV remote sensing imagery has long faced significant challenges. Existing Transformer-based detectors still suffer from feature degradation and insufficient multi-scale information fusion when handling small objects with sparse pixels and complex backgrounds. To address this, we propose MSF-DETR, a Transformer-based detector with multi-scale perception and cross-spatial-frequency domain fusion. Specifically, we design a multi-scale perception attention feature extraction network that integrates a Poly Kernel Inception module with a bidirectional contextual anchor attention mechanism via a dual-pathway fusion block, enabling simultaneous capture of multi-granularity features and long-range semantic dependencies. We further develop a feature alignment and cross-spatial-frequency enhancement pyramid that enriches shallow-layer spatial details through feature reorganization and leverages a spatial-frequency dual-domain collaborative strategy to capture both local textures and global spectral dependencies. Cross-scale dynamic intensity modulation combined with decoupled lightweight downsampling further effectively suppresses semantic noise, corrects feature misalignment, and preserves critical edge details. Finally, a Shape-NWD loss is devised to incorporate geometric and scale constraints, effectively alleviating the positional sensitivity of IoU for small targets. Extensive experiments on three public benchmarks demonstrate the superior performance of MSF-DETR; notably, on the VisDrone dataset, it achieves improvements of 7.45% and 8.71% in mAP50 and mAP50:95 over the baseline.</p>
	]]></content:encoded>

	<dc:title>A Small Object Detection Transformer for UAV Remote Sensing Imagery via Multi-Scale Perception and Cross-Spatial-Frequency Domain Fusion</dc:title>
			<dc:creator>Chenglong Shi</dc:creator>
			<dc:creator>Hui Wang</dc:creator>
			<dc:creator>Xiaolin Fu</dc:creator>
			<dc:creator>Pingping Liu</dc:creator>
			<dc:creator>Hongchang Ke</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091394</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1394</prism:startingPage>
		<prism:doi>10.3390/rs18091394</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1394</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1393">

	<title>Remote Sensing, Vol. 18, Pages 1393: Research on the Application of Time-Frequency Characteristics of GPR in Railway Mud Pumping Intelligent Detection</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1393</link>
	<description>Ground penetrating radar (GPR), as an efficient non-destructive testing technique, plays a crucial role in the structural condition assessment and defect identification of railway ballast. Typical defects such as mud pumping generally exhibit characteristics in B-scan images including weak reflections, blurred boundaries, and irregular structures, which pose significant challenges for stable detection and precise localization using existing methods that rely primarily on spatial feature modeling. Most current deep learning approaches focus on modeling spatial or temporal information, while lacking effective utilization of frequency-domain features, thereby limiting their discriminative capability under complex electromagnetic environments. To address these issues, this paper proposes a single-stage object detection framework, termed YOLO-DGW, based on time-frequency collaborative modeling. Built upon YOLOv8, the proposed method introduces a structure-aware spatial enhancement module to improve the representation of continuous GPR echo structures. Meanwhile, frequency-domain information is incorporated as a modulation prior to guide spatial feature learning, enhancing the model&amp;amp;rsquo;s sensitivity to weak reflections and complex-shaped targets. In addition, A-CIoU loss function is designed to improve localization accuracy and stability for defect regions of varying scales. Experimental results demonstrate that YOLO-DGW achieves an F1-score of 63.06% and an AP@0.50 of 62.07%, representing improvements of approximately 7.41% and 2.8%, respectively, over the strongest baseline method. Compared with several mainstream object detection models, the proposed approach exhibits superior performance in both detection accuracy and cross-region generalization capability. These findings indicate that integrating frequency-domain information into spatial feature learning through a modulation mechanism can effectively enhance the model&amp;amp;rsquo;s ability to discriminate weak-reflection anomalies, providing a novel time-frequency collaborative modeling paradigm for railway GPR defect detection.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1393: Research on the Application of Time-Frequency Characteristics of GPR in Railway Mud Pumping Intelligent Detection</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1393">doi: 10.3390/rs18091393</a></p>
	<p>Authors:
		Wenxing Shi
		Shilei Wang
		Feng Yang
		Chi Zhang
		Fanruo Li
		Suping Peng
		</p>
	<p>Ground penetrating radar (GPR), as an efficient non-destructive testing technique, plays a crucial role in the structural condition assessment and defect identification of railway ballast. Typical defects such as mud pumping generally exhibit characteristics in B-scan images including weak reflections, blurred boundaries, and irregular structures, which pose significant challenges for stable detection and precise localization using existing methods that rely primarily on spatial feature modeling. Most current deep learning approaches focus on modeling spatial or temporal information, while lacking effective utilization of frequency-domain features, thereby limiting their discriminative capability under complex electromagnetic environments. To address these issues, this paper proposes a single-stage object detection framework, termed YOLO-DGW, based on time-frequency collaborative modeling. Built upon YOLOv8, the proposed method introduces a structure-aware spatial enhancement module to improve the representation of continuous GPR echo structures. Meanwhile, frequency-domain information is incorporated as a modulation prior to guide spatial feature learning, enhancing the model&amp;amp;rsquo;s sensitivity to weak reflections and complex-shaped targets. In addition, A-CIoU loss function is designed to improve localization accuracy and stability for defect regions of varying scales. Experimental results demonstrate that YOLO-DGW achieves an F1-score of 63.06% and an AP@0.50 of 62.07%, representing improvements of approximately 7.41% and 2.8%, respectively, over the strongest baseline method. Compared with several mainstream object detection models, the proposed approach exhibits superior performance in both detection accuracy and cross-region generalization capability. These findings indicate that integrating frequency-domain information into spatial feature learning through a modulation mechanism can effectively enhance the model&amp;amp;rsquo;s ability to discriminate weak-reflection anomalies, providing a novel time-frequency collaborative modeling paradigm for railway GPR defect detection.</p>
	]]></content:encoded>

	<dc:title>Research on the Application of Time-Frequency Characteristics of GPR in Railway Mud Pumping Intelligent Detection</dc:title>
			<dc:creator>Wenxing Shi</dc:creator>
			<dc:creator>Shilei Wang</dc:creator>
			<dc:creator>Feng Yang</dc:creator>
			<dc:creator>Chi Zhang</dc:creator>
			<dc:creator>Fanruo Li</dc:creator>
			<dc:creator>Suping Peng</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091393</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1393</prism:startingPage>
		<prism:doi>10.3390/rs18091393</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1393</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1392">

	<title>Remote Sensing, Vol. 18, Pages 1392: Satellite-Based Chlorophyll-a Prediction Reveals Salinity-Dominated Regime Shifts in the East China Sea: A 22-Year Multi-Sensor Analysis with Explainable AI</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1392</link>
	<description>We developed an explainable machine learning framework combining 22 years (2003&amp;amp;ndash;2024) of multi-sensor satellite data (MODIS Aqua, CMEMS, C3S) with zone-specific SHAP attribution to quantify chlorophyll-a (Chl-a) mechanisms in the East China Sea. A geography-free XGBoost model achieved R2=0.802 on 1.4 million pixel-month observations, and counterfactual experiments confirmed its superior environmental sensitivity over location-dependent models. Multi-strategy threshold detection identified two critical salinity boundaries&amp;amp;mdash;11.62 psu marking the turbidity-to-productivity transition (Cohen&amp;amp;rsquo;s d=&amp;amp;minus;2.92) and 34.03 psu at the Kuroshio Front (d=&amp;amp;minus;1.04)&amp;amp;mdash;neither of which coincides with traditional physical definitions. Zone-specific SHAP analysis revealed that sea surface salinity (SSS) dominates Chl-a attribution across all zones but through fundamentally different mechanisms. We propose an &amp;amp;ldquo;SSS Triple-Role Framework&amp;amp;rdquo; in which salinity serves as turbidity proxy in estuarine waters, nutrient proxy in transitional waters, and dilution signal offshore, resolving the apparent contradiction of simultaneous positive and negative salinity effects. Non-additive interactions&amp;amp;mdash;including SSS &amp;amp;times; SST coupling (61% modulation) and SST &amp;amp;times; sea level amplification during Kuroshio intrusions&amp;amp;mdash;further demonstrate hierarchical controls missed by additive models. These findings provide quantitative benchmarks for ecosystem monitoring in river-dominated marginal seas.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1392: Satellite-Based Chlorophyll-a Prediction Reveals Salinity-Dominated Regime Shifts in the East China Sea: A 22-Year Multi-Sensor Analysis with Explainable AI</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1392">doi: 10.3390/rs18091392</a></p>
	<p>Authors:
		Shuyao Liu
		Zhen Han
		</p>
	<p>We developed an explainable machine learning framework combining 22 years (2003&amp;amp;ndash;2024) of multi-sensor satellite data (MODIS Aqua, CMEMS, C3S) with zone-specific SHAP attribution to quantify chlorophyll-a (Chl-a) mechanisms in the East China Sea. A geography-free XGBoost model achieved R2=0.802 on 1.4 million pixel-month observations, and counterfactual experiments confirmed its superior environmental sensitivity over location-dependent models. Multi-strategy threshold detection identified two critical salinity boundaries&amp;amp;mdash;11.62 psu marking the turbidity-to-productivity transition (Cohen&amp;amp;rsquo;s d=&amp;amp;minus;2.92) and 34.03 psu at the Kuroshio Front (d=&amp;amp;minus;1.04)&amp;amp;mdash;neither of which coincides with traditional physical definitions. Zone-specific SHAP analysis revealed that sea surface salinity (SSS) dominates Chl-a attribution across all zones but through fundamentally different mechanisms. We propose an &amp;amp;ldquo;SSS Triple-Role Framework&amp;amp;rdquo; in which salinity serves as turbidity proxy in estuarine waters, nutrient proxy in transitional waters, and dilution signal offshore, resolving the apparent contradiction of simultaneous positive and negative salinity effects. Non-additive interactions&amp;amp;mdash;including SSS &amp;amp;times; SST coupling (61% modulation) and SST &amp;amp;times; sea level amplification during Kuroshio intrusions&amp;amp;mdash;further demonstrate hierarchical controls missed by additive models. These findings provide quantitative benchmarks for ecosystem monitoring in river-dominated marginal seas.</p>
	]]></content:encoded>

	<dc:title>Satellite-Based Chlorophyll-a Prediction Reveals Salinity-Dominated Regime Shifts in the East China Sea: A 22-Year Multi-Sensor Analysis with Explainable AI</dc:title>
			<dc:creator>Shuyao Liu</dc:creator>
			<dc:creator>Zhen Han</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091392</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1392</prism:startingPage>
		<prism:doi>10.3390/rs18091392</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1392</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1390">

	<title>Remote Sensing, Vol. 18, Pages 1390: Global Inversion of Terrestrial Net Ecosystem Exchange: Integrating Explicit Multi-Source Predictors and High-Dimensional Remote-Sensing Embeddings</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1390</link>
	<description>Terrestrial ecosystems play a critical role in regulating atmospheric CO2 through land&amp;amp;ndash;atmosphere carbon exchange. While Net Ecosystem Exchange (NEE) serves as a key integrative metric for carbon dynamics, its robust global estimation remains challenging due to profound environmental heterogeneity and nonlinear ecosystem responses. In this study, we propose a dual-track experimental framework to invert annual global terrestrial NEE at a 0.1&amp;amp;deg; spatial resolution for 2000&amp;amp;ndash;2024. Initially, a long-term historical baseline inversion (2000&amp;amp;ndash;2024) was developed using explicit multi-source environmental predictors. Subsequently, to overcome the representational limitations of conventional spectral indices over complex terrains, we integrated high-dimensional remote-sensing embeddings from the AlphaEarth framework for the 2017&amp;amp;ndash;2024 overlapping period. This approach was designed to explicitly quantify the added value of these advanced features. Our results demonstrate that embedding features substantially enhance inversion performance, reducing prediction errors and improving spatial coherence. Adopting the standard meteorological sign convention, global terrestrial NEE remained consistently negative. Based on the 2000&amp;amp;ndash;2024 baseline inversion, our predicted global NEE fluctuated between &amp;amp;minus;3.50 and &amp;amp;minus;4.38 Pg C yr&amp;amp;minus;1. To validate these long-term estimates, we systematically cross-validated our results against an independent, recently published multi-network fusion dataset, which reported a comparable range of &amp;amp;minus;3.11 to &amp;amp;minus;3.75 Pg C yr&amp;amp;minus;1. This comparison demonstrates consistent interannual dynamics and corroborates the magnitude of the global terrestrial carbon sink. Spatial patterns exhibit a stable latitudinal structure, with stronger net carbon uptake in low latitudes. Interannual variability is expressed mainly as magnitude fluctuations rather than systematic spatial reorganization. Overall, this study highlights that high-dimensional Earth observation embeddings provide significant, measurable information gains for global NEE inversion without introducing new process-based assumptions, thereby offering a robust and internally consistent basis for evaluating long-term carbon dynamics.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1390: Global Inversion of Terrestrial Net Ecosystem Exchange: Integrating Explicit Multi-Source Predictors and High-Dimensional Remote-Sensing Embeddings</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1390">doi: 10.3390/rs18091390</a></p>
	<p>Authors:
		Peng Du
		Lei Cui
		Yi Lian
		Haixiao Li
		Jiaxu Fan
		Xinrui Zhou
		Yanyan Chen
		</p>
	<p>Terrestrial ecosystems play a critical role in regulating atmospheric CO2 through land&amp;amp;ndash;atmosphere carbon exchange. While Net Ecosystem Exchange (NEE) serves as a key integrative metric for carbon dynamics, its robust global estimation remains challenging due to profound environmental heterogeneity and nonlinear ecosystem responses. In this study, we propose a dual-track experimental framework to invert annual global terrestrial NEE at a 0.1&amp;amp;deg; spatial resolution for 2000&amp;amp;ndash;2024. Initially, a long-term historical baseline inversion (2000&amp;amp;ndash;2024) was developed using explicit multi-source environmental predictors. Subsequently, to overcome the representational limitations of conventional spectral indices over complex terrains, we integrated high-dimensional remote-sensing embeddings from the AlphaEarth framework for the 2017&amp;amp;ndash;2024 overlapping period. This approach was designed to explicitly quantify the added value of these advanced features. Our results demonstrate that embedding features substantially enhance inversion performance, reducing prediction errors and improving spatial coherence. Adopting the standard meteorological sign convention, global terrestrial NEE remained consistently negative. Based on the 2000&amp;amp;ndash;2024 baseline inversion, our predicted global NEE fluctuated between &amp;amp;minus;3.50 and &amp;amp;minus;4.38 Pg C yr&amp;amp;minus;1. To validate these long-term estimates, we systematically cross-validated our results against an independent, recently published multi-network fusion dataset, which reported a comparable range of &amp;amp;minus;3.11 to &amp;amp;minus;3.75 Pg C yr&amp;amp;minus;1. This comparison demonstrates consistent interannual dynamics and corroborates the magnitude of the global terrestrial carbon sink. Spatial patterns exhibit a stable latitudinal structure, with stronger net carbon uptake in low latitudes. Interannual variability is expressed mainly as magnitude fluctuations rather than systematic spatial reorganization. Overall, this study highlights that high-dimensional Earth observation embeddings provide significant, measurable information gains for global NEE inversion without introducing new process-based assumptions, thereby offering a robust and internally consistent basis for evaluating long-term carbon dynamics.</p>
	]]></content:encoded>

	<dc:title>Global Inversion of Terrestrial Net Ecosystem Exchange: Integrating Explicit Multi-Source Predictors and High-Dimensional Remote-Sensing Embeddings</dc:title>
			<dc:creator>Peng Du</dc:creator>
			<dc:creator>Lei Cui</dc:creator>
			<dc:creator>Yi Lian</dc:creator>
			<dc:creator>Haixiao Li</dc:creator>
			<dc:creator>Jiaxu Fan</dc:creator>
			<dc:creator>Xinrui Zhou</dc:creator>
			<dc:creator>Yanyan Chen</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091390</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1390</prism:startingPage>
		<prism:doi>10.3390/rs18091390</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1390</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1391">

	<title>Remote Sensing, Vol. 18, Pages 1391: A Multi-Dimensional Feature-Driven Method for Remote Sensing-Based Identification of Cereal and Oil Crops in the Tibetan Plateau</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1391</link>
	<description>Fragmented farmland and persistent cloud&amp;amp;ndash;snow interference in the high-altitude cold regions of the Qinghai&amp;amp;ndash;Tibet Plateau, coupled with unstable crop phenology, pose significant challenges for accurate cereal and oil crop identification using single-date imagery or low-dimensional features. This study focused on the agricultural areas of the Shigatse River Valley in the Qinghai&amp;amp;ndash;Tibet Plateau. Leveraging the Google Earth Engine (GEE) cloud computing platform, we integrated Sentinel-2 remote sensing data with field survey sampling data to extract the planting structures, distribution patterns, and cultivated areas of cereal and oil crops. Three machine-learning classifiers&amp;amp;mdash;Random Forest (RF), Support Vector Machine (SVM), and Gradient Boosted Trees (GBT)&amp;amp;mdash;were evaluated to investigate the influence of different feature sets and classifier combinations on mapping accuracy. The results indicated that when all feature bands were utilized, the RF classifier achieved the highest performance, with an overall accuracy of 84.77% and a kappa coefficient of 0.64, outperforming both the SVM and GBT models. The incorporation of phenological and topographic features further enhanced classification accuracy, providing a robust framework for identifying cereal and oil crops in high-altitude environments. Based on the optimal model estimation, the cultivated areas in 2021 were 581.52 km2 for highland barley, 295.39 km2 for wheat, and 386.81 km2 for rapeseed. Their spatial patterns closely aligned with the valley-terrace topography and local irrigation conditions. These findings offer novel insights and a reliable methodology for the rapid extraction of crop spatial information in regions with complex planting structures.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1391: A Multi-Dimensional Feature-Driven Method for Remote Sensing-Based Identification of Cereal and Oil Crops in the Tibetan Plateau</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1391">doi: 10.3390/rs18091391</a></p>
	<p>Authors:
		Aoxue Li
		Haijing Shi
		Yangyang Liu
		Zhongming Wen
		Alfredo R. Huete
		Hongming Zhang
		Gang Zhao
		Ye Wang
		Guang Yang
		Xihua Yang
		</p>
	<p>Fragmented farmland and persistent cloud&amp;amp;ndash;snow interference in the high-altitude cold regions of the Qinghai&amp;amp;ndash;Tibet Plateau, coupled with unstable crop phenology, pose significant challenges for accurate cereal and oil crop identification using single-date imagery or low-dimensional features. This study focused on the agricultural areas of the Shigatse River Valley in the Qinghai&amp;amp;ndash;Tibet Plateau. Leveraging the Google Earth Engine (GEE) cloud computing platform, we integrated Sentinel-2 remote sensing data with field survey sampling data to extract the planting structures, distribution patterns, and cultivated areas of cereal and oil crops. Three machine-learning classifiers&amp;amp;mdash;Random Forest (RF), Support Vector Machine (SVM), and Gradient Boosted Trees (GBT)&amp;amp;mdash;were evaluated to investigate the influence of different feature sets and classifier combinations on mapping accuracy. The results indicated that when all feature bands were utilized, the RF classifier achieved the highest performance, with an overall accuracy of 84.77% and a kappa coefficient of 0.64, outperforming both the SVM and GBT models. The incorporation of phenological and topographic features further enhanced classification accuracy, providing a robust framework for identifying cereal and oil crops in high-altitude environments. Based on the optimal model estimation, the cultivated areas in 2021 were 581.52 km2 for highland barley, 295.39 km2 for wheat, and 386.81 km2 for rapeseed. Their spatial patterns closely aligned with the valley-terrace topography and local irrigation conditions. These findings offer novel insights and a reliable methodology for the rapid extraction of crop spatial information in regions with complex planting structures.</p>
	]]></content:encoded>

	<dc:title>A Multi-Dimensional Feature-Driven Method for Remote Sensing-Based Identification of Cereal and Oil Crops in the Tibetan Plateau</dc:title>
			<dc:creator>Aoxue Li</dc:creator>
			<dc:creator>Haijing Shi</dc:creator>
			<dc:creator>Yangyang Liu</dc:creator>
			<dc:creator>Zhongming Wen</dc:creator>
			<dc:creator>Alfredo R. Huete</dc:creator>
			<dc:creator>Hongming Zhang</dc:creator>
			<dc:creator>Gang Zhao</dc:creator>
			<dc:creator>Ye Wang</dc:creator>
			<dc:creator>Guang Yang</dc:creator>
			<dc:creator>Xihua Yang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091391</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1391</prism:startingPage>
		<prism:doi>10.3390/rs18091391</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1391</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1388">

	<title>Remote Sensing, Vol. 18, Pages 1388: Development of a Spatiotemporal Estimation Method for Rice Plant Height Using Pattern Matching Based on Time-Series Satellite-Derived Vegetation Indices and In Situ Measurements</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1388</link>
	<description>Rice plant height is a key indicator of crop growth and phenology, yet continuous daily estimation remains challenging under limited field observations. This study proposes an interpretable Bayesian LUT-based framework to estimate rice plant height from time-series, satellite-derived GCVI, and sparse in situ measurements. Daily plant height was estimated as a posterior-weighted ensemble of multiple LUT-derived heights, together with uncertainty reflecting ambiguity among plausible growth trajectories. Applied to rice paddies in Ryugasaki City, Japan, using Harmonized Landsat&amp;amp;ndash;Sentinel-2 data from the 2025 growing season, the method achieved and RMSE = 7.08 cm on the validation dataset, outperforming simple baseline approaches. The estimated daily height time series also enabled evaluation of the timing at which plant height reached 70 cm, revealing clear spatial variability among fields and an associated uncertainty of approximately 10 days. Although this threshold was discussed with reference to previous studies on L-band SAR sensitivity, the present study relied solely on optical observations. Overall, the proposed framework provides a data-efficient and explainable approach for daily, spatially explicit rice growth monitoring, while current limitations include the single-region, single-year LUT construction and the simplified statistical assumptions used in the Bayesian weighting framework.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1388: Development of a Spatiotemporal Estimation Method for Rice Plant Height Using Pattern Matching Based on Time-Series Satellite-Derived Vegetation Indices and In Situ Measurements</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1388">doi: 10.3390/rs18091388</a></p>
	<p>Authors:
		Shoki Shimda
		Go Segami
		Kei Oyoshi
		</p>
	<p>Rice plant height is a key indicator of crop growth and phenology, yet continuous daily estimation remains challenging under limited field observations. This study proposes an interpretable Bayesian LUT-based framework to estimate rice plant height from time-series, satellite-derived GCVI, and sparse in situ measurements. Daily plant height was estimated as a posterior-weighted ensemble of multiple LUT-derived heights, together with uncertainty reflecting ambiguity among plausible growth trajectories. Applied to rice paddies in Ryugasaki City, Japan, using Harmonized Landsat&amp;amp;ndash;Sentinel-2 data from the 2025 growing season, the method achieved and RMSE = 7.08 cm on the validation dataset, outperforming simple baseline approaches. The estimated daily height time series also enabled evaluation of the timing at which plant height reached 70 cm, revealing clear spatial variability among fields and an associated uncertainty of approximately 10 days. Although this threshold was discussed with reference to previous studies on L-band SAR sensitivity, the present study relied solely on optical observations. Overall, the proposed framework provides a data-efficient and explainable approach for daily, spatially explicit rice growth monitoring, while current limitations include the single-region, single-year LUT construction and the simplified statistical assumptions used in the Bayesian weighting framework.</p>
	]]></content:encoded>

	<dc:title>Development of a Spatiotemporal Estimation Method for Rice Plant Height Using Pattern Matching Based on Time-Series Satellite-Derived Vegetation Indices and In Situ Measurements</dc:title>
			<dc:creator>Shoki Shimda</dc:creator>
			<dc:creator>Go Segami</dc:creator>
			<dc:creator>Kei Oyoshi</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091388</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1388</prism:startingPage>
		<prism:doi>10.3390/rs18091388</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1388</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1389">

	<title>Remote Sensing, Vol. 18, Pages 1389: Test-Time Candidate-Aware Dual Refinement for Remote Sensing Image&amp;ndash;Text Retrieval</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1389</link>
	<description>Remote sensing image&amp;amp;ndash;text retrieval (RSITR) is a pivotal task aimed at achieving efficient bidirectional matching between visual content and textual descriptions in large-scale remote sensing databases. Nevertheless, it faces a fundamental challenge: the severe information asymmetry between sparse, abstract captions and dense, multi-scale overhead imagery. Prior works predominantly focus on learning static cross-modal representations during training; however, this frozen inference process is fundamentally limited in bridging the asymmetry due to its inability to dynamically compensate for missing details or resolve visual ambiguities in heterogeneous scenes. To overcome this limitation, we propose CADRE (Test-Time Candidate-Aware Dual Refinement), a retrieval-backbone-agnostic framework exploiting retrieved candidates as feedback for bidirectional alignment. Operating on a novel Inject-and-Suppress paradigm, CADRE comprises two complementary modules. First, the Visual-Context Injection (VCI) module addresses textual sparsity by incorporating an adaptive filtering mechanism to efficiently mine hierarchical visual evidence from high-confidence candidates and inject it into the query via a domain-adapted Multimodal Large Language Model (MLLM). Second, the Query-Guided Disambiguation (QGD) module targets visual ambiguity by generating multi-view visual hypotheses and utilizing the query as a semantic probe to suppress background noise. Extensive experiments on three standard benchmarks (RSICD, RSITMD, and UCM) demonstrate good transferability across several strong RSITR backbones.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1389: Test-Time Candidate-Aware Dual Refinement for Remote Sensing Image&amp;ndash;Text Retrieval</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1389">doi: 10.3390/rs18091389</a></p>
	<p>Authors:
		Bofan Zhang
		Hao Wu
		</p>
	<p>Remote sensing image&amp;amp;ndash;text retrieval (RSITR) is a pivotal task aimed at achieving efficient bidirectional matching between visual content and textual descriptions in large-scale remote sensing databases. Nevertheless, it faces a fundamental challenge: the severe information asymmetry between sparse, abstract captions and dense, multi-scale overhead imagery. Prior works predominantly focus on learning static cross-modal representations during training; however, this frozen inference process is fundamentally limited in bridging the asymmetry due to its inability to dynamically compensate for missing details or resolve visual ambiguities in heterogeneous scenes. To overcome this limitation, we propose CADRE (Test-Time Candidate-Aware Dual Refinement), a retrieval-backbone-agnostic framework exploiting retrieved candidates as feedback for bidirectional alignment. Operating on a novel Inject-and-Suppress paradigm, CADRE comprises two complementary modules. First, the Visual-Context Injection (VCI) module addresses textual sparsity by incorporating an adaptive filtering mechanism to efficiently mine hierarchical visual evidence from high-confidence candidates and inject it into the query via a domain-adapted Multimodal Large Language Model (MLLM). Second, the Query-Guided Disambiguation (QGD) module targets visual ambiguity by generating multi-view visual hypotheses and utilizing the query as a semantic probe to suppress background noise. Extensive experiments on three standard benchmarks (RSICD, RSITMD, and UCM) demonstrate good transferability across several strong RSITR backbones.</p>
	]]></content:encoded>

	<dc:title>Test-Time Candidate-Aware Dual Refinement for Remote Sensing Image&amp;amp;ndash;Text Retrieval</dc:title>
			<dc:creator>Bofan Zhang</dc:creator>
			<dc:creator>Hao Wu</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091389</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1389</prism:startingPage>
		<prism:doi>10.3390/rs18091389</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1389</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1387">

	<title>Remote Sensing, Vol. 18, Pages 1387: Copernicus Sentinel-2C Radiometric Calibration and Validation Status</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1387</link>
	<description>The optical high spatial resolution component of the ESA Copernicus Earth Observation program is relying on the Sentinel-2 satellites. To secure the mission continuity, the Sentinel-2C unit was launched and has recently joined the Sentinel-2A and Sentinel-2B operational plan. The objective of the paper is to provide a status and a quantified assessment of the radiometric inter-operability of the latest unit with the constellation. The analyses reported here were performed using different vicarious methods during the commissioning phase of Sentinel-2C. Two of the methods were used for the first time with a Sentinel-2 satellite: lunar calibration and tandem inter-comparisons on selected surfaces. The results of the different methods are compared and the vicarious radiometric adjustment strategy is described. Finally, we discuss the impact of the different sources of uncertainty impacting the radiometric assessment.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1387: Copernicus Sentinel-2C Radiometric Calibration and Validation Status</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1387">doi: 10.3390/rs18091387</a></p>
	<p>Authors:
		Sébastien Clerc
		Damien Rodat
		Bruno Lafrance
		Bahjat Alhammoud
		Silvia Enache
		Alexis Deru
		Louis Rivoire
		Stefan Adriaensen
		Emmanuel Hillairet
		Rosalinda Morrone
		Rosario Iannone
		Valentina Boccia
		</p>
	<p>The optical high spatial resolution component of the ESA Copernicus Earth Observation program is relying on the Sentinel-2 satellites. To secure the mission continuity, the Sentinel-2C unit was launched and has recently joined the Sentinel-2A and Sentinel-2B operational plan. The objective of the paper is to provide a status and a quantified assessment of the radiometric inter-operability of the latest unit with the constellation. The analyses reported here were performed using different vicarious methods during the commissioning phase of Sentinel-2C. Two of the methods were used for the first time with a Sentinel-2 satellite: lunar calibration and tandem inter-comparisons on selected surfaces. The results of the different methods are compared and the vicarious radiometric adjustment strategy is described. Finally, we discuss the impact of the different sources of uncertainty impacting the radiometric assessment.</p>
	]]></content:encoded>

	<dc:title>Copernicus Sentinel-2C Radiometric Calibration and Validation Status</dc:title>
			<dc:creator>Sébastien Clerc</dc:creator>
			<dc:creator>Damien Rodat</dc:creator>
			<dc:creator>Bruno Lafrance</dc:creator>
			<dc:creator>Bahjat Alhammoud</dc:creator>
			<dc:creator>Silvia Enache</dc:creator>
			<dc:creator>Alexis Deru</dc:creator>
			<dc:creator>Louis Rivoire</dc:creator>
			<dc:creator>Stefan Adriaensen</dc:creator>
			<dc:creator>Emmanuel Hillairet</dc:creator>
			<dc:creator>Rosalinda Morrone</dc:creator>
			<dc:creator>Rosario Iannone</dc:creator>
			<dc:creator>Valentina Boccia</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091387</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1387</prism:startingPage>
		<prism:doi>10.3390/rs18091387</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1387</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1384">

	<title>Remote Sensing, Vol. 18, Pages 1384: A Multisensor Framework for Satellite Data Simulation: Generating Representative Datasets for Future ESA Missions&amp;mdash;CHIME and LSTM</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1384</link>
	<description>The preparation for next-generation Earth Observation missions, such as the European Space Agency&amp;amp;rsquo;s (ESA) Copernicus Hyperspectral Imaging Mission for the Environment (CHIME) and Land Surface Temperature Monitoring (LSTM), requires robust pre-launch proxy datasets. Because current simulation methodologies frequently rely on isolated, platform-specific approaches, this study proposes a comprehensive, unified multisensor framework capable of dynamically generating operationally realistic CHIME and LSTM datasets from diverse airborne and satellite sources. Three distinct processing pipelines were established. For hyperspectral data simulation, precursor satellite imagery (PRISMA and EnMAP) and high-resolution airborne measurements (HySpex) were harmonized to CHIME&amp;amp;rsquo;s 30 m specifications utilizing Spectral Response Function (SRF) adjustments, Point Spread Function (PSF) spatial resampling, and 6S atmospheric radiative transfer modeling. For thermal data simulation, archive Landsat 8/9 and ASTER imagery were transformed into LSTM&amp;amp;rsquo;s target 50 m, 5-band configuration using a synergistic two-step approach: a physics-based Spectral Super-Resolution (SSR) module followed by an AI-driven Spatial Super-Resolution (SpSR) transformer network. Evaluated across highly diverse inland, coastal, and riverine testbeds in Italy, the simulated products demonstrated high spectral, spatial, and radiometric fidelity. While inherently constrained by the native spectral ranges of the input sensors and by the current lack of absolute on-orbit mission data for validation, the downscaled images closely reproduced complex thermal patterns and water-quality gradients. Ultimately, this scalable framework provides the remote sensing community with early access to representative datasets and mission performance assessments, while accelerating pre-launch algorithm development and testing for environmental monitoring applications&amp;amp;mdash;particularly those focused on water discharges.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1384: A Multisensor Framework for Satellite Data Simulation: Generating Representative Datasets for Future ESA Missions&amp;mdash;CHIME and LSTM</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1384">doi: 10.3390/rs18091384</a></p>
	<p>Authors:
		Pelagia Koutsantoni
		Maria Kremezi
		Vassilia Karathanassi
		Paola Di Lauro
		José Andrés Vargas-Solano
		Giulio Ceriola
		Antonello Aiello
		Elisabetta Lamboglia
		</p>
	<p>The preparation for next-generation Earth Observation missions, such as the European Space Agency&amp;amp;rsquo;s (ESA) Copernicus Hyperspectral Imaging Mission for the Environment (CHIME) and Land Surface Temperature Monitoring (LSTM), requires robust pre-launch proxy datasets. Because current simulation methodologies frequently rely on isolated, platform-specific approaches, this study proposes a comprehensive, unified multisensor framework capable of dynamically generating operationally realistic CHIME and LSTM datasets from diverse airborne and satellite sources. Three distinct processing pipelines were established. For hyperspectral data simulation, precursor satellite imagery (PRISMA and EnMAP) and high-resolution airborne measurements (HySpex) were harmonized to CHIME&amp;amp;rsquo;s 30 m specifications utilizing Spectral Response Function (SRF) adjustments, Point Spread Function (PSF) spatial resampling, and 6S atmospheric radiative transfer modeling. For thermal data simulation, archive Landsat 8/9 and ASTER imagery were transformed into LSTM&amp;amp;rsquo;s target 50 m, 5-band configuration using a synergistic two-step approach: a physics-based Spectral Super-Resolution (SSR) module followed by an AI-driven Spatial Super-Resolution (SpSR) transformer network. Evaluated across highly diverse inland, coastal, and riverine testbeds in Italy, the simulated products demonstrated high spectral, spatial, and radiometric fidelity. While inherently constrained by the native spectral ranges of the input sensors and by the current lack of absolute on-orbit mission data for validation, the downscaled images closely reproduced complex thermal patterns and water-quality gradients. Ultimately, this scalable framework provides the remote sensing community with early access to representative datasets and mission performance assessments, while accelerating pre-launch algorithm development and testing for environmental monitoring applications&amp;amp;mdash;particularly those focused on water discharges.</p>
	]]></content:encoded>

	<dc:title>A Multisensor Framework for Satellite Data Simulation: Generating Representative Datasets for Future ESA Missions&amp;amp;mdash;CHIME and LSTM</dc:title>
			<dc:creator>Pelagia Koutsantoni</dc:creator>
			<dc:creator>Maria Kremezi</dc:creator>
			<dc:creator>Vassilia Karathanassi</dc:creator>
			<dc:creator>Paola Di Lauro</dc:creator>
			<dc:creator>José Andrés Vargas-Solano</dc:creator>
			<dc:creator>Giulio Ceriola</dc:creator>
			<dc:creator>Antonello Aiello</dc:creator>
			<dc:creator>Elisabetta Lamboglia</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091384</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1384</prism:startingPage>
		<prism:doi>10.3390/rs18091384</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1384</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1386">

	<title>Remote Sensing, Vol. 18, Pages 1386: LiDAR-Guided 3D Gaussian Splatting with Differentiable UDF-Based Regularization for Mine Tunnel Reconstruction</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1386</link>
	<description>Underground mine tunnels are often characterized by extremely uneven illumination, weak surface textures, and frequent dynamic interference, which severely undermine multi-view photometric consistency and easily induce floating artifacts and spatial divergence in conventional vision-based 3D Gaussian Splatting (3DGS). To address these issues, we propose a LiDAR-guided 3DGS framework for underground tunnel reconstruction based on dynamic-object removal and differentiable unsigned distance field (UDF) regularization. First, a dynamic foreground removal strategy with background restoration is introduced to remove transient foreground disturbances and restore static supervision consistency. Second, LiDAR point clouds are leveraged to initialize Gaussian primitives with a reliable geometric skeleton in weak-texture regions. More importantly, LiDAR priors are further converted into a differentiable UDF field and serve as a persistent geometric constraint. A dual-track mechanism is designed, where continuous geometric attraction pulls mildly deviated Gaussians back toward the physical surface and periodic out-of-bound culling removes severely drifting primitives. Experiments on real underground tunnel and chamber scenes show a clear scene-dependent behavior of the proposed method. In the tunnel scene, the method achieves the best SSIM together with competitive PSNR and LPIPS, while also reducing redundant out-of-bound primitives and improving geometric cleanliness. In the chamber scene, however, its advantages under global full-reference metrics are less evident. These results suggest that the proposed LiDAR-guided and differentiable UDF-regularized framework is particularly beneficial for weak-texture tunnel environments, while further improvement is still needed for chamber scenes with more complex appearance variations.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1386: LiDAR-Guided 3D Gaussian Splatting with Differentiable UDF-Based Regularization for Mine Tunnel Reconstruction</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1386">doi: 10.3390/rs18091386</a></p>
	<p>Authors:
		Xinyu Wu
		Yajing Liu
		Mei Li
		Huimin Guo
		Yuanpei Gou
		</p>
	<p>Underground mine tunnels are often characterized by extremely uneven illumination, weak surface textures, and frequent dynamic interference, which severely undermine multi-view photometric consistency and easily induce floating artifacts and spatial divergence in conventional vision-based 3D Gaussian Splatting (3DGS). To address these issues, we propose a LiDAR-guided 3DGS framework for underground tunnel reconstruction based on dynamic-object removal and differentiable unsigned distance field (UDF) regularization. First, a dynamic foreground removal strategy with background restoration is introduced to remove transient foreground disturbances and restore static supervision consistency. Second, LiDAR point clouds are leveraged to initialize Gaussian primitives with a reliable geometric skeleton in weak-texture regions. More importantly, LiDAR priors are further converted into a differentiable UDF field and serve as a persistent geometric constraint. A dual-track mechanism is designed, where continuous geometric attraction pulls mildly deviated Gaussians back toward the physical surface and periodic out-of-bound culling removes severely drifting primitives. Experiments on real underground tunnel and chamber scenes show a clear scene-dependent behavior of the proposed method. In the tunnel scene, the method achieves the best SSIM together with competitive PSNR and LPIPS, while also reducing redundant out-of-bound primitives and improving geometric cleanliness. In the chamber scene, however, its advantages under global full-reference metrics are less evident. These results suggest that the proposed LiDAR-guided and differentiable UDF-regularized framework is particularly beneficial for weak-texture tunnel environments, while further improvement is still needed for chamber scenes with more complex appearance variations.</p>
	]]></content:encoded>

	<dc:title>LiDAR-Guided 3D Gaussian Splatting with Differentiable UDF-Based Regularization for Mine Tunnel Reconstruction</dc:title>
			<dc:creator>Xinyu Wu</dc:creator>
			<dc:creator>Yajing Liu</dc:creator>
			<dc:creator>Mei Li</dc:creator>
			<dc:creator>Huimin Guo</dc:creator>
			<dc:creator>Yuanpei Gou</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091386</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1386</prism:startingPage>
		<prism:doi>10.3390/rs18091386</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1386</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1385">

	<title>Remote Sensing, Vol. 18, Pages 1385: Deep Learning-Based Waterline Detection Applied to Wave Period Measurement in the Nearshore Swash Zone</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1385</link>
	<description>This study proposes an integrated framework combining aerial photography of unmanned aerial vehicle (UAV), AI-based waterline detection, and a rigorous quality control (QC) scheme for estimating wave periods in the swash zone. The proposed approach automatically extracts instantaneous waterlines from high-resolution UAV videos and converts them into wave series using timestack analysis. The DeepUNet model achieved a pixel-level recognition score of 75.0% for both F1-score and Dice, demonstrating reliable performance in detecting thin waterline features. The integration of spatial and temporal QC further improves the robustness of waterline tracking and reduces false detections. Wave periods derived from wave series across different cross-sections in the swash zone exhibit spatial consistent and qualitative consistency when contextually compared with offshore data buoy observations, while the quantitative differences reflect variation in nearshore wave dynamics. These results confirm the feasibility and effectiveness of the proposed framework for high-resolution nearshore wave monitoring.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1385: Deep Learning-Based Waterline Detection Applied to Wave Period Measurement in the Nearshore Swash Zone</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1385">doi: 10.3390/rs18091385</a></p>
	<p>Authors:
		Laurence Zsu-Hsin Chuang
		Po-An Tsai
		Mei-Huei Chen
		</p>
	<p>This study proposes an integrated framework combining aerial photography of unmanned aerial vehicle (UAV), AI-based waterline detection, and a rigorous quality control (QC) scheme for estimating wave periods in the swash zone. The proposed approach automatically extracts instantaneous waterlines from high-resolution UAV videos and converts them into wave series using timestack analysis. The DeepUNet model achieved a pixel-level recognition score of 75.0% for both F1-score and Dice, demonstrating reliable performance in detecting thin waterline features. The integration of spatial and temporal QC further improves the robustness of waterline tracking and reduces false detections. Wave periods derived from wave series across different cross-sections in the swash zone exhibit spatial consistent and qualitative consistency when contextually compared with offshore data buoy observations, while the quantitative differences reflect variation in nearshore wave dynamics. These results confirm the feasibility and effectiveness of the proposed framework for high-resolution nearshore wave monitoring.</p>
	]]></content:encoded>

	<dc:title>Deep Learning-Based Waterline Detection Applied to Wave Period Measurement in the Nearshore Swash Zone</dc:title>
			<dc:creator>Laurence Zsu-Hsin Chuang</dc:creator>
			<dc:creator>Po-An Tsai</dc:creator>
			<dc:creator>Mei-Huei Chen</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091385</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1385</prism:startingPage>
		<prism:doi>10.3390/rs18091385</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1385</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1383">

	<title>Remote Sensing, Vol. 18, Pages 1383: 3D Aeromagnetic Inversion Using Unsupervised Deep Learning: Imaging Deep Magnetic Structures in the Panxi Region, SW China</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1383</link>
	<description>Panzhihua-type V&amp;amp;ndash;Ti magnetite deposits in the Panxi region are hosted in mafic&amp;amp;ndash;ultramafic intrusions, and their exploration potential depends strongly on the deep distribution of ore-bearing intrusions. High-resolution 3D magnetic inversion is an effective tool to image the geometry of these intrusions. Using 1:50,000 aeromagnetic data, we applied an unsupervised deep learning inversion to obtain the 3D magnetic susceptibility structure of related intrusions. The results show that magnetic anomalies are mainly NS and NEE trending, with minor NNW-trending features. NS-trending sources occur in the Baima&amp;amp;ndash;Miyi&amp;amp;ndash;Hongge zone between the Xigeda&amp;amp;ndash;Yuanmou and Anninghe faults, while NEE-trending anomalies lie west of the Xigeda&amp;amp;ndash;Yuanmou fault and east of the Chenghai fault. Integrated geological analysis reveals two Late Variscan rift systems: the Anninghe rift and the Panzhihua rift. Deep fault-controlled magma ascent and emplacement, forming the Emeishan large igneous province, are associated with strongly magnetic intrusions. Mantle plume-derived magmas, differentiated in shallow and deep magma chambers, generate well-differentiated layered complexes at depths &amp;amp;lt; 10 km with magnetic intensities of 5&amp;amp;ndash;10 A/m. Shear structures within paleorifts provide favorable emplacement conditions and controlled ore localization. We propose a three-in-one ore-controlling mechanism involving rift systems, intrusive rocks, and shear structures for Panzhihua-type V&amp;amp;ndash;Ti magnetite mineralization.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1383: 3D Aeromagnetic Inversion Using Unsupervised Deep Learning: Imaging Deep Magnetic Structures in the Panxi Region, SW China</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1383">doi: 10.3390/rs18091383</a></p>
	<p>Authors:
		Yu Zhang
		Chu Jian
		Zhipeng Cheng
		Jun Li
		Zhengwei Xu
		Chao Sui
		</p>
	<p>Panzhihua-type V&amp;amp;ndash;Ti magnetite deposits in the Panxi region are hosted in mafic&amp;amp;ndash;ultramafic intrusions, and their exploration potential depends strongly on the deep distribution of ore-bearing intrusions. High-resolution 3D magnetic inversion is an effective tool to image the geometry of these intrusions. Using 1:50,000 aeromagnetic data, we applied an unsupervised deep learning inversion to obtain the 3D magnetic susceptibility structure of related intrusions. The results show that magnetic anomalies are mainly NS and NEE trending, with minor NNW-trending features. NS-trending sources occur in the Baima&amp;amp;ndash;Miyi&amp;amp;ndash;Hongge zone between the Xigeda&amp;amp;ndash;Yuanmou and Anninghe faults, while NEE-trending anomalies lie west of the Xigeda&amp;amp;ndash;Yuanmou fault and east of the Chenghai fault. Integrated geological analysis reveals two Late Variscan rift systems: the Anninghe rift and the Panzhihua rift. Deep fault-controlled magma ascent and emplacement, forming the Emeishan large igneous province, are associated with strongly magnetic intrusions. Mantle plume-derived magmas, differentiated in shallow and deep magma chambers, generate well-differentiated layered complexes at depths &amp;amp;lt; 10 km with magnetic intensities of 5&amp;amp;ndash;10 A/m. Shear structures within paleorifts provide favorable emplacement conditions and controlled ore localization. We propose a three-in-one ore-controlling mechanism involving rift systems, intrusive rocks, and shear structures for Panzhihua-type V&amp;amp;ndash;Ti magnetite mineralization.</p>
	]]></content:encoded>

	<dc:title>3D Aeromagnetic Inversion Using Unsupervised Deep Learning: Imaging Deep Magnetic Structures in the Panxi Region, SW China</dc:title>
			<dc:creator>Yu Zhang</dc:creator>
			<dc:creator>Chu Jian</dc:creator>
			<dc:creator>Zhipeng Cheng</dc:creator>
			<dc:creator>Jun Li</dc:creator>
			<dc:creator>Zhengwei Xu</dc:creator>
			<dc:creator>Chao Sui</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091383</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1383</prism:startingPage>
		<prism:doi>10.3390/rs18091383</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1383</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1382">

	<title>Remote Sensing, Vol. 18, Pages 1382: Assessing the Application of Mobile Light Detection and Ranging in Complex Mixed-Species Forest Inventory</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1382</link>
	<description>Understanding forest dynamics requires reliable inventories that assess tree- and stand-level characteristics. Traditionally, this has relied on field measurements such as diameter at breast height (DBH), height, and crown attributes, but these methods are labor-intensive and spatially limited. Remote sensing, particularly Light Detection and Ranging (LiDAR), has expanded forest inventory capacity by generating three-dimensional structural information. Mobile laser scanning (MLS), a recent adaptation, offers flexible, high-resolution data collection, though its performance across complex forests is still being evaluated. This study assessed the effectiveness of MLS in detecting individual trees and estimating DBH in mixed-species forests of the Northeastern United States. We also evaluated the influence of tree- and plot-level characteristics on detection accuracy and DBH estimation. Results showed an 85.2% tree detection rate, a 23.5% commission rate, and a DBH root mean square error (RMSE) of 1.98 cm (9.65%). Among the variables tested, tree DBH was the only significant predictor of detection probability; tree density and relative density had minimal effect. These findings demonstrate that MLS can achieve precise DBH estimation when trees are correctly identified, but false detections remain a limitation. Further methodological improvements are needed to enhance accuracy in structurally complex forests and advance MLS for operational forest monitoring.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1382: Assessing the Application of Mobile Light Detection and Ranging in Complex Mixed-Species Forest Inventory</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1382">doi: 10.3390/rs18091382</a></p>
	<p>Authors:
		Hunter Moore
		Mark J. Ducey
		Benjamin T. Fraser
		Olivia Fraser
		</p>
	<p>Understanding forest dynamics requires reliable inventories that assess tree- and stand-level characteristics. Traditionally, this has relied on field measurements such as diameter at breast height (DBH), height, and crown attributes, but these methods are labor-intensive and spatially limited. Remote sensing, particularly Light Detection and Ranging (LiDAR), has expanded forest inventory capacity by generating three-dimensional structural information. Mobile laser scanning (MLS), a recent adaptation, offers flexible, high-resolution data collection, though its performance across complex forests is still being evaluated. This study assessed the effectiveness of MLS in detecting individual trees and estimating DBH in mixed-species forests of the Northeastern United States. We also evaluated the influence of tree- and plot-level characteristics on detection accuracy and DBH estimation. Results showed an 85.2% tree detection rate, a 23.5% commission rate, and a DBH root mean square error (RMSE) of 1.98 cm (9.65%). Among the variables tested, tree DBH was the only significant predictor of detection probability; tree density and relative density had minimal effect. These findings demonstrate that MLS can achieve precise DBH estimation when trees are correctly identified, but false detections remain a limitation. Further methodological improvements are needed to enhance accuracy in structurally complex forests and advance MLS for operational forest monitoring.</p>
	]]></content:encoded>

	<dc:title>Assessing the Application of Mobile Light Detection and Ranging in Complex Mixed-Species Forest Inventory</dc:title>
			<dc:creator>Hunter Moore</dc:creator>
			<dc:creator>Mark J. Ducey</dc:creator>
			<dc:creator>Benjamin T. Fraser</dc:creator>
			<dc:creator>Olivia Fraser</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091382</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1382</prism:startingPage>
		<prism:doi>10.3390/rs18091382</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1382</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1381">

	<title>Remote Sensing, Vol. 18, Pages 1381: A Refined Prediction Model for Regional Zenith Troposphere Combining ICEEMDAN and BiLSTM-XGBoost</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1381</link>
	<description>To address the degradation of zenith tropospheric delay (ZTD) prediction accuracy caused by time-varying noise and error accumulation in multi-step forecasting, this study proposes an integrated prediction model, named IBX, which combines improved complete ensemble empirical mode decomposition with adaptive noise (ICEEMDAN), bidirectional long short-term memory (BiLSTM), and extreme gradient boosting (XGBoost). In the proposed framework, ICEEMDAN is first used to decompose the original ZTD series into components at different temporal scales. A three-criterion reconstruction strategy based on the Pearson correlation coefficient, dominant period, and sample entropy is then applied to obtain high-, medium-, and low-frequency subsequences with clearer physical meanings. BiLSTM and XGBoost are used to predict the reconstructed components, and their outputs are fused through a root mean square error (RMS)-based weighting strategy to improve forecasting robustness. Hourly ZTD data from 27 global navigation satellite system (GNSS) stations in China from 2011 to 2020 were used for model validation under 1&amp;amp;ndash;12 h rolling forecasting horizons. The results show that IBX achieves the best overall performance among the tested models. Its mean RMS and mean absolute error (MAE) over the 1&amp;amp;ndash;12 h horizons are 14.17 mm and 10.24 mm, respectively, which are 22.5% and 21.4% lower than those of the baseline BiLSTM model. Spatial and climate-region-based analyses further indicate that ZTD prediction accuracy is strongly affected by altitude, regional moisture conditions, and climate type. The proposed IBX model shows stable error suppression across heterogeneous station environments, especially in the temperate monsoon region and low-altitude regions with complex water vapor variability. These results demonstrate that IBX provides a reliable and physically interpretable approach for short- to medium-term ZTD forecasting and real-time atmospheric delay correction.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1381: A Refined Prediction Model for Regional Zenith Troposphere Combining ICEEMDAN and BiLSTM-XGBoost</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1381">doi: 10.3390/rs18091381</a></p>
	<p>Authors:
		Chao Chen
		Yinghao Zhao
		Wenyuan Zhang
		Yulong Ge
		Jiajia Yuan
		Chao Hu
		</p>
	<p>To address the degradation of zenith tropospheric delay (ZTD) prediction accuracy caused by time-varying noise and error accumulation in multi-step forecasting, this study proposes an integrated prediction model, named IBX, which combines improved complete ensemble empirical mode decomposition with adaptive noise (ICEEMDAN), bidirectional long short-term memory (BiLSTM), and extreme gradient boosting (XGBoost). In the proposed framework, ICEEMDAN is first used to decompose the original ZTD series into components at different temporal scales. A three-criterion reconstruction strategy based on the Pearson correlation coefficient, dominant period, and sample entropy is then applied to obtain high-, medium-, and low-frequency subsequences with clearer physical meanings. BiLSTM and XGBoost are used to predict the reconstructed components, and their outputs are fused through a root mean square error (RMS)-based weighting strategy to improve forecasting robustness. Hourly ZTD data from 27 global navigation satellite system (GNSS) stations in China from 2011 to 2020 were used for model validation under 1&amp;amp;ndash;12 h rolling forecasting horizons. The results show that IBX achieves the best overall performance among the tested models. Its mean RMS and mean absolute error (MAE) over the 1&amp;amp;ndash;12 h horizons are 14.17 mm and 10.24 mm, respectively, which are 22.5% and 21.4% lower than those of the baseline BiLSTM model. Spatial and climate-region-based analyses further indicate that ZTD prediction accuracy is strongly affected by altitude, regional moisture conditions, and climate type. The proposed IBX model shows stable error suppression across heterogeneous station environments, especially in the temperate monsoon region and low-altitude regions with complex water vapor variability. These results demonstrate that IBX provides a reliable and physically interpretable approach for short- to medium-term ZTD forecasting and real-time atmospheric delay correction.</p>
	]]></content:encoded>

	<dc:title>A Refined Prediction Model for Regional Zenith Troposphere Combining ICEEMDAN and BiLSTM-XGBoost</dc:title>
			<dc:creator>Chao Chen</dc:creator>
			<dc:creator>Yinghao Zhao</dc:creator>
			<dc:creator>Wenyuan Zhang</dc:creator>
			<dc:creator>Yulong Ge</dc:creator>
			<dc:creator>Jiajia Yuan</dc:creator>
			<dc:creator>Chao Hu</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091381</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1381</prism:startingPage>
		<prism:doi>10.3390/rs18091381</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1381</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1379">

	<title>Remote Sensing, Vol. 18, Pages 1379: A Hybrid Drone SINS/GNSS Information Fusion Method Based on Attention-Augmented TCN in GNSS-Denied Environments</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1379</link>
	<description>In the field of drone navigation systems, a high-precision positioning solution can be provided by an integrated strapdown inertial navigation system (SINS)/global navigation satellite system (GNSS). But when satellite signals are interfered with or blocked by tall buildings, the errors of SINS will disperse rapidly due to the complex air and mechanical vibrations, leading to a serious degradation of navigation accuracy. To enhance the positioning performance in this situation, this paper proposes a hybrid information fusion method based on attention-augmented temporal convolutional network (TCN) for drone SINS/GNSS navigation system. A feature integration and prediction model is constructed to provide a pseudo-positioning reference for the integrated navigation filter during GNSS-denied periods, in which TCN is used to establish a predictive positioning error correction model based on inertial measurements and SINS data, while a self-attention model is incorporated to extract complex global drone motion features. The performance of the proposed method has been experimentally verified using Global Positioning System (GPS) and SINS data collected from real drone flight test. Comparison results among the proposed model, SINS with TCN, SINS with convergent Kalman filter (KF) prediction section and SINS-only indicate that the proposed method can effectively improve the drone positioning accuracy in specific GNSS-denied environments.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1379: A Hybrid Drone SINS/GNSS Information Fusion Method Based on Attention-Augmented TCN in GNSS-Denied Environments</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1379">doi: 10.3390/rs18091379</a></p>
	<p>Authors:
		Chuan Xu
		Shuai Chen
		Daxiang Zhao
		Zhikuan Hou
		Changhui Jiang
		</p>
	<p>In the field of drone navigation systems, a high-precision positioning solution can be provided by an integrated strapdown inertial navigation system (SINS)/global navigation satellite system (GNSS). But when satellite signals are interfered with or blocked by tall buildings, the errors of SINS will disperse rapidly due to the complex air and mechanical vibrations, leading to a serious degradation of navigation accuracy. To enhance the positioning performance in this situation, this paper proposes a hybrid information fusion method based on attention-augmented temporal convolutional network (TCN) for drone SINS/GNSS navigation system. A feature integration and prediction model is constructed to provide a pseudo-positioning reference for the integrated navigation filter during GNSS-denied periods, in which TCN is used to establish a predictive positioning error correction model based on inertial measurements and SINS data, while a self-attention model is incorporated to extract complex global drone motion features. The performance of the proposed method has been experimentally verified using Global Positioning System (GPS) and SINS data collected from real drone flight test. Comparison results among the proposed model, SINS with TCN, SINS with convergent Kalman filter (KF) prediction section and SINS-only indicate that the proposed method can effectively improve the drone positioning accuracy in specific GNSS-denied environments.</p>
	]]></content:encoded>

	<dc:title>A Hybrid Drone SINS/GNSS Information Fusion Method Based on Attention-Augmented TCN in GNSS-Denied Environments</dc:title>
			<dc:creator>Chuan Xu</dc:creator>
			<dc:creator>Shuai Chen</dc:creator>
			<dc:creator>Daxiang Zhao</dc:creator>
			<dc:creator>Zhikuan Hou</dc:creator>
			<dc:creator>Changhui Jiang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091379</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1379</prism:startingPage>
		<prism:doi>10.3390/rs18091379</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1379</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1380">

	<title>Remote Sensing, Vol. 18, Pages 1380: DAFE-Net: Direction-Aware Feature Enhancement Network for SAR Ship Detection</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1380</link>
	<description>Synthetic Aperture Radar (SAR) ship detection is important for maritime surveillance and maritime security. However, existing methods still suffer from insufficient backbone representation, inadequate directional structure modeling, and limited cross-scale interaction under complex backgrounds. To address these issues, we propose a Direction-Aware Feature Enhancement Network (DAFE-Net). First, a Multi-Branch Feature Interaction Module (MBFIM) is designed to improve the collaborative representation of global structures and local details. Second, a Direction-Aware Contrast Enhancement Module (DACEM) is introduced to explicitly model the directional bright&amp;amp;ndash;dark coupled structures of SAR ships, thereby improving target&amp;amp;ndash;background discrimination under complex clutter. Finally, a Feature-Focused Diffusion Pyramid Network (FFDPN) is constructed to strengthen cross-scale feature interaction and improve the detection of multi-scale ship targets. Experimental results show that the proposed method outperforms several competitive detectors on the merged SSDD and HRSID dataset. Compared with DEIM-D-FINE, our method improves AP by 3.1% and APL by 5.0%. These results demonstrate that the proposed method provides an effective direction-aware modeling approach for SAR ship detection.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1380: DAFE-Net: Direction-Aware Feature Enhancement Network for SAR Ship Detection</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1380">doi: 10.3390/rs18091380</a></p>
	<p>Authors:
		Junjie Zeng
		Xinxin Tang
		Shuang Li
		</p>
	<p>Synthetic Aperture Radar (SAR) ship detection is important for maritime surveillance and maritime security. However, existing methods still suffer from insufficient backbone representation, inadequate directional structure modeling, and limited cross-scale interaction under complex backgrounds. To address these issues, we propose a Direction-Aware Feature Enhancement Network (DAFE-Net). First, a Multi-Branch Feature Interaction Module (MBFIM) is designed to improve the collaborative representation of global structures and local details. Second, a Direction-Aware Contrast Enhancement Module (DACEM) is introduced to explicitly model the directional bright&amp;amp;ndash;dark coupled structures of SAR ships, thereby improving target&amp;amp;ndash;background discrimination under complex clutter. Finally, a Feature-Focused Diffusion Pyramid Network (FFDPN) is constructed to strengthen cross-scale feature interaction and improve the detection of multi-scale ship targets. Experimental results show that the proposed method outperforms several competitive detectors on the merged SSDD and HRSID dataset. Compared with DEIM-D-FINE, our method improves AP by 3.1% and APL by 5.0%. These results demonstrate that the proposed method provides an effective direction-aware modeling approach for SAR ship detection.</p>
	]]></content:encoded>

	<dc:title>DAFE-Net: Direction-Aware Feature Enhancement Network for SAR Ship Detection</dc:title>
			<dc:creator>Junjie Zeng</dc:creator>
			<dc:creator>Xinxin Tang</dc:creator>
			<dc:creator>Shuang Li</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091380</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1380</prism:startingPage>
		<prism:doi>10.3390/rs18091380</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1380</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1376">

	<title>Remote Sensing, Vol. 18, Pages 1376: A Bidirectional Spatiotemporal Deep Learning Model with Integrated Vegetation&amp;ndash;Thermal Features for Wildfire Detection</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1376</link>
	<description>Quicker identifying abilities are required due to the rising frequency and severity of wildfires. Although polar-orbiting satellites with medium and high resolution can accurately identify wildfires, the majority of available fire detection images originate from such platforms. However, their low temporal revisit rates restrict the potential for early warning. Geostationary satellites provide minute-level, continuous monitoring that corresponds with the quick onset of wildfires; however, their dependence on conventional threshold methods and coarse spatial resolution result in notable detection errors. This study developed an integrated deep learning framework for accurate wildfire detection in low-resolution geostationary imagery in order to get over these restrictions. A novel dynamic index, the Dynamic Normalized Burn Ratio&amp;amp;mdash;Thermal (DNBRT), was proposed to characterize wildfire progression by integrating instantaneous thermal anomalies with dynamic vegetation signals. Based on this, a Fire Spatiotemporal Network (FST-Net) was designed, with an efficient residual backbone, a Convolutional Block Attention Module (CBAM) for feature refinement, and a Bidirectional Long Short-Term Memory (BiLSTM) network to capture temporal evolution. Trained and evaluated on an FY-4B-based fire/non-fire dataset, the proposed framework demonstrated superior performance. FST-Net outperformed benchmark models, improving accuracy and recall by averages of 10.30% and 9.32% respectively while achieving faster inference speed. An ablation experiment confirmed the critical role of fusing thermal and vegetation features in DNBRT, with 92.7% accuracy and 94.9% recall. Compared to the FY-4B fire product, the proposed framework enables earlier detection, maintains more complete tracking of fire progression, and exhibits greater robustness under complex burning conditions while achieving sub-hectare (0.36 ha) detection sensitivity at the 2 km resolution. By synergizing a discriminative dynamic index with an efficient spatiotemporal architecture, this work provides an effective solution for operational, real-time monitoring of small and early-stage wildfires from geostationary satellites.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1376: A Bidirectional Spatiotemporal Deep Learning Model with Integrated Vegetation&amp;ndash;Thermal Features for Wildfire Detection</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1376">doi: 10.3390/rs18091376</a></p>
	<p>Authors:
		Han Luo
		Ming Wang
		Lei He
		Bin Liu
		Yuxia Li
		Dan Tang
		</p>
	<p>Quicker identifying abilities are required due to the rising frequency and severity of wildfires. Although polar-orbiting satellites with medium and high resolution can accurately identify wildfires, the majority of available fire detection images originate from such platforms. However, their low temporal revisit rates restrict the potential for early warning. Geostationary satellites provide minute-level, continuous monitoring that corresponds with the quick onset of wildfires; however, their dependence on conventional threshold methods and coarse spatial resolution result in notable detection errors. This study developed an integrated deep learning framework for accurate wildfire detection in low-resolution geostationary imagery in order to get over these restrictions. A novel dynamic index, the Dynamic Normalized Burn Ratio&amp;amp;mdash;Thermal (DNBRT), was proposed to characterize wildfire progression by integrating instantaneous thermal anomalies with dynamic vegetation signals. Based on this, a Fire Spatiotemporal Network (FST-Net) was designed, with an efficient residual backbone, a Convolutional Block Attention Module (CBAM) for feature refinement, and a Bidirectional Long Short-Term Memory (BiLSTM) network to capture temporal evolution. Trained and evaluated on an FY-4B-based fire/non-fire dataset, the proposed framework demonstrated superior performance. FST-Net outperformed benchmark models, improving accuracy and recall by averages of 10.30% and 9.32% respectively while achieving faster inference speed. An ablation experiment confirmed the critical role of fusing thermal and vegetation features in DNBRT, with 92.7% accuracy and 94.9% recall. Compared to the FY-4B fire product, the proposed framework enables earlier detection, maintains more complete tracking of fire progression, and exhibits greater robustness under complex burning conditions while achieving sub-hectare (0.36 ha) detection sensitivity at the 2 km resolution. By synergizing a discriminative dynamic index with an efficient spatiotemporal architecture, this work provides an effective solution for operational, real-time monitoring of small and early-stage wildfires from geostationary satellites.</p>
	]]></content:encoded>

	<dc:title>A Bidirectional Spatiotemporal Deep Learning Model with Integrated Vegetation&amp;amp;ndash;Thermal Features for Wildfire Detection</dc:title>
			<dc:creator>Han Luo</dc:creator>
			<dc:creator>Ming Wang</dc:creator>
			<dc:creator>Lei He</dc:creator>
			<dc:creator>Bin Liu</dc:creator>
			<dc:creator>Yuxia Li</dc:creator>
			<dc:creator>Dan Tang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091376</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1376</prism:startingPage>
		<prism:doi>10.3390/rs18091376</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1376</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1378">

	<title>Remote Sensing, Vol. 18, Pages 1378: Groundwater Level Response Processes in Arid Northwest China Based on Remote Sensing and Causal Inference: From Influential Variables to Transmission Pathways</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1378</link>
	<description>Groundwater level (GWL) variations in the arid regions of Northwest China are driven by both natural processes and human activities. Identifying causal links between hydrological variables is fundamental to understanding groundwater evolution and conducting dynamic simulations. This study integrates the Mann&amp;amp;ndash;Kendall test, Seasonal-Trend decomposition using Loess, and the Peter and Clark Momentum-threshold and Momentary Conditional Independence (PCMCI) causal inference to analyze GWL variation characteristics and causal response processes across seven sub-basins in the Tarim Basin using multi-source remote sensing data. Results show an overall decline in GWL, primarily in the north-central part of the basin, with the Kaidu&amp;amp;ndash;Konqi River Basin reaching a maximum rate of 0.51 m/year. The trend components reveal localized depletion alongside broad stability, while seasonal components exhibit three types of temporal shifts in fluctuations. A mismatch exists between the prevalence of environmental influences and their causal strength. Daytime land surface temperature (LSTD), surface runoff (RO), and evapotranspiration (ET) show the highest detection frequencies, yet volumetric soil water in layers 2 (SWVL2) and RO exhibit the largest ranges in strength and drive variations at specific sites. Response times are asymmetric. Negative effects from ET on GWL transmit quickly, while positive recovery is slow. Conversely, positive recharge from volumetric soil water in layer 1 (SWVL1) is faster than its negative lag. At the basin scale, surface processes recharge GWL while mediating indirect influences from other variables. Climate and agricultural irrigation act as direct sinks. Depending on local conditions, three regional patterns emerge: direct climate-driven depletion, obstructed shallow water retention, and indirect compensation from agricultural water use. Causal networks indicate that RO and SWVL1 have the highest centrality and dominate water output, whereas SWVL2 acts as a passive receiver. Pathways from the surface to GWL are also asymmetric. The most frequent path involves step-by-step infiltration along RO &amp;amp;rarr; ET &amp;amp;rarr; SWVL1 &amp;amp;rarr; SWVL2 &amp;amp;rarr; GWL. In contrast, the paths with the highest cumulative strength are shorter and faster, specifically RO &amp;amp;rarr; ET &amp;amp;rarr; GWL and RO &amp;amp;rarr; SWVL1 &amp;amp;rarr; GWL. The identified pathways and lag parameters provide a direct basis for groundwater dynamic modeling and water resource management in the basin.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1378: Groundwater Level Response Processes in Arid Northwest China Based on Remote Sensing and Causal Inference: From Influential Variables to Transmission Pathways</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1378">doi: 10.3390/rs18091378</a></p>
	<p>Authors:
		Liang Zeng
		Shaohui Chen
		</p>
	<p>Groundwater level (GWL) variations in the arid regions of Northwest China are driven by both natural processes and human activities. Identifying causal links between hydrological variables is fundamental to understanding groundwater evolution and conducting dynamic simulations. This study integrates the Mann&amp;amp;ndash;Kendall test, Seasonal-Trend decomposition using Loess, and the Peter and Clark Momentum-threshold and Momentary Conditional Independence (PCMCI) causal inference to analyze GWL variation characteristics and causal response processes across seven sub-basins in the Tarim Basin using multi-source remote sensing data. Results show an overall decline in GWL, primarily in the north-central part of the basin, with the Kaidu&amp;amp;ndash;Konqi River Basin reaching a maximum rate of 0.51 m/year. The trend components reveal localized depletion alongside broad stability, while seasonal components exhibit three types of temporal shifts in fluctuations. A mismatch exists between the prevalence of environmental influences and their causal strength. Daytime land surface temperature (LSTD), surface runoff (RO), and evapotranspiration (ET) show the highest detection frequencies, yet volumetric soil water in layers 2 (SWVL2) and RO exhibit the largest ranges in strength and drive variations at specific sites. Response times are asymmetric. Negative effects from ET on GWL transmit quickly, while positive recovery is slow. Conversely, positive recharge from volumetric soil water in layer 1 (SWVL1) is faster than its negative lag. At the basin scale, surface processes recharge GWL while mediating indirect influences from other variables. Climate and agricultural irrigation act as direct sinks. Depending on local conditions, three regional patterns emerge: direct climate-driven depletion, obstructed shallow water retention, and indirect compensation from agricultural water use. Causal networks indicate that RO and SWVL1 have the highest centrality and dominate water output, whereas SWVL2 acts as a passive receiver. Pathways from the surface to GWL are also asymmetric. The most frequent path involves step-by-step infiltration along RO &amp;amp;rarr; ET &amp;amp;rarr; SWVL1 &amp;amp;rarr; SWVL2 &amp;amp;rarr; GWL. In contrast, the paths with the highest cumulative strength are shorter and faster, specifically RO &amp;amp;rarr; ET &amp;amp;rarr; GWL and RO &amp;amp;rarr; SWVL1 &amp;amp;rarr; GWL. The identified pathways and lag parameters provide a direct basis for groundwater dynamic modeling and water resource management in the basin.</p>
	]]></content:encoded>

	<dc:title>Groundwater Level Response Processes in Arid Northwest China Based on Remote Sensing and Causal Inference: From Influential Variables to Transmission Pathways</dc:title>
			<dc:creator>Liang Zeng</dc:creator>
			<dc:creator>Shaohui Chen</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091378</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1378</prism:startingPage>
		<prism:doi>10.3390/rs18091378</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1378</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1377">

	<title>Remote Sensing, Vol. 18, Pages 1377: Time-Lapse Absolute Gravity Measurements Unveil Subsurface Water Content Variations in Central Italy</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1377</link>
	<description>We present and discuss time-lapse gravity variations recorded by a large-scale absolute gravity network operating in Central Italy. The network comprises four stations distributed across the Lazio, Umbria, and Abruzzo regions, areas affected by the significant seismic activity of 2009 and 2016&amp;amp;ndash;2017. From 2018 to 2023, six campaigns were carefully conducted using an FG5 absolute gravimeter. We detected significant gravity decreases around 2020 reaching between &amp;amp;minus;15 and &amp;amp;minus;20 &amp;amp;mu;Gal in three sites and approximately &amp;amp;minus;37 &amp;amp;mu;Gal at the fourth. The Sentinel-1 time series of permanent scatterers (PS) allowed us to exclude significant contribution from vertical deformations to the observed gravity changes. We analyzed both ground-based data (rainfall gauges and well water levels) and satellite-based observations (the Gravity Recovery and Climate Experiment-Follow-On, GRACE-FO, mission) together with the Global Land Data Assimilation System (GLDAS) and precipitation models. The results reveal a significant decrease in the regional groundwater content from 2018 to the end of 2020, which coincides temporally with the observed gravity decrease. We show that the absolute gravity variation trends observed at all stations are consistent with regional-scale hydrological processes, pointing to a significant decrease in terrestrial water storage (TWS) during the same time interval. At L&amp;amp;rsquo;Aquila (AQUI), the gravity anomaly is larger than expected from regional hydrological products alone, suggesting an additional local component possibly related to the hydrogeological response of the fractured karst system undergoing significant post-seismic activity.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1377: Time-Lapse Absolute Gravity Measurements Unveil Subsurface Water Content Variations in Central Italy</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1377">doi: 10.3390/rs18091377</a></p>
	<p>Authors:
		Federica Riguzzi
		Francesco Pintori
		Filippo Greco
		Giovanna Berrino
		</p>
	<p>We present and discuss time-lapse gravity variations recorded by a large-scale absolute gravity network operating in Central Italy. The network comprises four stations distributed across the Lazio, Umbria, and Abruzzo regions, areas affected by the significant seismic activity of 2009 and 2016&amp;amp;ndash;2017. From 2018 to 2023, six campaigns were carefully conducted using an FG5 absolute gravimeter. We detected significant gravity decreases around 2020 reaching between &amp;amp;minus;15 and &amp;amp;minus;20 &amp;amp;mu;Gal in three sites and approximately &amp;amp;minus;37 &amp;amp;mu;Gal at the fourth. The Sentinel-1 time series of permanent scatterers (PS) allowed us to exclude significant contribution from vertical deformations to the observed gravity changes. We analyzed both ground-based data (rainfall gauges and well water levels) and satellite-based observations (the Gravity Recovery and Climate Experiment-Follow-On, GRACE-FO, mission) together with the Global Land Data Assimilation System (GLDAS) and precipitation models. The results reveal a significant decrease in the regional groundwater content from 2018 to the end of 2020, which coincides temporally with the observed gravity decrease. We show that the absolute gravity variation trends observed at all stations are consistent with regional-scale hydrological processes, pointing to a significant decrease in terrestrial water storage (TWS) during the same time interval. At L&amp;amp;rsquo;Aquila (AQUI), the gravity anomaly is larger than expected from regional hydrological products alone, suggesting an additional local component possibly related to the hydrogeological response of the fractured karst system undergoing significant post-seismic activity.</p>
	]]></content:encoded>

	<dc:title>Time-Lapse Absolute Gravity Measurements Unveil Subsurface Water Content Variations in Central Italy</dc:title>
			<dc:creator>Federica Riguzzi</dc:creator>
			<dc:creator>Francesco Pintori</dc:creator>
			<dc:creator>Filippo Greco</dc:creator>
			<dc:creator>Giovanna Berrino</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091377</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1377</prism:startingPage>
		<prism:doi>10.3390/rs18091377</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1377</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1375">

	<title>Remote Sensing, Vol. 18, Pages 1375: Asymmetric Responses of Spring and Autumn Phenology to Permafrost Degradation in the Source Region of the Yangtze River</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1375</link>
	<description>The Source Region of the Yangtze River is a high-altitude area with extensive permafrost on the Tibetan Plateau. While temperature, precipitation, and radiation significantly affect vegetation phenology, the influence of permafrost changes remains unclear. Using the daily Long-term Seamless NOAA AVHRR NDVI Dataset of China (2003&amp;amp;ndash;2022), we extracted the start (SOS) and end (EOS) of the growing season in the Source Region of the Yangtze River (SRYR). Soil thawing date (SOT) was obtained from freeze&amp;amp;ndash;thaw state products, while active layer thickness (ALT) was estimated using the Stefan model based on MODIS land surface temperature (LST). Partial least squares regression and mediation analysis quantified the direct and indirect effects of permafrost degradation. Results show: (1) The end of the growing season (EOS) became significantly earlier in 64.33% of the region, while the start of the growing season (SOS) showed little change. (2) The effect of SOT on SOS depends on moisture conditions. Earlier SOT leads to earlier SOS in wetter areas by supplying meltwater, but delays SOS in cold&amp;amp;ndash;dry areas by increasing soil water loss. (3) Thicker ALT strongly promotes earlier EOS, accounting for up to 42.61% of EOS variation in cold&amp;amp;ndash;dry zones, because a deeper active layer potentially promotes downward movement of water, which may further lead to the potential leaching of nutrients from the shallow root zone, limiting resources for shallow-rooted plants. (4) Alpine meadows respond more strongly to permafrost changes than alpine grasslands. Overall, water loss caused by permafrost degradation may reduce the potential lengthening of the growing season under climate warming, highlighting the key role of soil water in linking permafrost and vegetation dynamics.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1375: Asymmetric Responses of Spring and Autumn Phenology to Permafrost Degradation in the Source Region of the Yangtze River</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1375">doi: 10.3390/rs18091375</a></p>
	<p>Authors:
		Minghan Xu
		Shufang Tian
		Qian Li
		Tianqi Li
		Xiaoqing Zhao
		Ruiyao Fan
		</p>
	<p>The Source Region of the Yangtze River is a high-altitude area with extensive permafrost on the Tibetan Plateau. While temperature, precipitation, and radiation significantly affect vegetation phenology, the influence of permafrost changes remains unclear. Using the daily Long-term Seamless NOAA AVHRR NDVI Dataset of China (2003&amp;amp;ndash;2022), we extracted the start (SOS) and end (EOS) of the growing season in the Source Region of the Yangtze River (SRYR). Soil thawing date (SOT) was obtained from freeze&amp;amp;ndash;thaw state products, while active layer thickness (ALT) was estimated using the Stefan model based on MODIS land surface temperature (LST). Partial least squares regression and mediation analysis quantified the direct and indirect effects of permafrost degradation. Results show: (1) The end of the growing season (EOS) became significantly earlier in 64.33% of the region, while the start of the growing season (SOS) showed little change. (2) The effect of SOT on SOS depends on moisture conditions. Earlier SOT leads to earlier SOS in wetter areas by supplying meltwater, but delays SOS in cold&amp;amp;ndash;dry areas by increasing soil water loss. (3) Thicker ALT strongly promotes earlier EOS, accounting for up to 42.61% of EOS variation in cold&amp;amp;ndash;dry zones, because a deeper active layer potentially promotes downward movement of water, which may further lead to the potential leaching of nutrients from the shallow root zone, limiting resources for shallow-rooted plants. (4) Alpine meadows respond more strongly to permafrost changes than alpine grasslands. Overall, water loss caused by permafrost degradation may reduce the potential lengthening of the growing season under climate warming, highlighting the key role of soil water in linking permafrost and vegetation dynamics.</p>
	]]></content:encoded>

	<dc:title>Asymmetric Responses of Spring and Autumn Phenology to Permafrost Degradation in the Source Region of the Yangtze River</dc:title>
			<dc:creator>Minghan Xu</dc:creator>
			<dc:creator>Shufang Tian</dc:creator>
			<dc:creator>Qian Li</dc:creator>
			<dc:creator>Tianqi Li</dc:creator>
			<dc:creator>Xiaoqing Zhao</dc:creator>
			<dc:creator>Ruiyao Fan</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091375</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1375</prism:startingPage>
		<prism:doi>10.3390/rs18091375</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1375</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1374">

	<title>Remote Sensing, Vol. 18, Pages 1374: MPES-YOLO: A Multi-Scale Lightweight Framework with Selective Edge Enhancement for Loess Landslide Detection</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1374</link>
	<description>Loess landslides in northwestern China are highly unstable and difficult to distinguish due to sparse vegetation and their spectral and morphological similarity to the surrounding terrain. These landslides demonstrate considerable diversity in manifestation, encompassing shallow translational slides, small-scale features, partially obscured formations, and instances with irregular or poorly defined boundaries. To address the above issues, we propose MPES-YOLO, a multi-scale lightweight YOLO-based framework with selective edge enhancement to detect loess landslides. This model is based on the YOLOv8 architecture and incorporates a multi-scale partial convolution and exponential moving average (MPCE) module to improve multi-scale feature representation while reducing computational cost and enhancing small-target sensitivity. Additionally, to address ambiguous boundaries, a selective edge enhancement (SEE) module is introduced to extract authentic object edges from original images and inject them into key training layers, improving boundary perception. Finally, SIoU is adopted to improve geometric consistency for irregular landslide boundary localization. This paper first verified the basic detection performance of MPES-YOLO on the publicly available Bijie landslide dataset. Then, an experimental study was conducted in the loess landslides of Yan&amp;amp;rsquo;an City, Shaanxi Province. The mAP@0.5 was 91.9%, and the parameter quantity was reduced by 23.3% compared with the baseline model. A generalization experiment was also carried out on the landslides in the Ningxia region, with the mAP@0.5 being 97.4%. The results show that MPES-YOLO achieves a strong balance between detection accuracy and computational efficiency, providing an effective and scalable solution for automated loess landslide detection and geological disaster early warning.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1374: MPES-YOLO: A Multi-Scale Lightweight Framework with Selective Edge Enhancement for Loess Landslide Detection</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1374">doi: 10.3390/rs18091374</a></p>
	<p>Authors:
		Hanyu Cheng
		Jiali Su
		Jiangbo Xi
		Haixing Shang
		Zhen Zhang
		Bingkun Wang
		Pan Li
		</p>
	<p>Loess landslides in northwestern China are highly unstable and difficult to distinguish due to sparse vegetation and their spectral and morphological similarity to the surrounding terrain. These landslides demonstrate considerable diversity in manifestation, encompassing shallow translational slides, small-scale features, partially obscured formations, and instances with irregular or poorly defined boundaries. To address the above issues, we propose MPES-YOLO, a multi-scale lightweight YOLO-based framework with selective edge enhancement to detect loess landslides. This model is based on the YOLOv8 architecture and incorporates a multi-scale partial convolution and exponential moving average (MPCE) module to improve multi-scale feature representation while reducing computational cost and enhancing small-target sensitivity. Additionally, to address ambiguous boundaries, a selective edge enhancement (SEE) module is introduced to extract authentic object edges from original images and inject them into key training layers, improving boundary perception. Finally, SIoU is adopted to improve geometric consistency for irregular landslide boundary localization. This paper first verified the basic detection performance of MPES-YOLO on the publicly available Bijie landslide dataset. Then, an experimental study was conducted in the loess landslides of Yan&amp;amp;rsquo;an City, Shaanxi Province. The mAP@0.5 was 91.9%, and the parameter quantity was reduced by 23.3% compared with the baseline model. A generalization experiment was also carried out on the landslides in the Ningxia region, with the mAP@0.5 being 97.4%. The results show that MPES-YOLO achieves a strong balance between detection accuracy and computational efficiency, providing an effective and scalable solution for automated loess landslide detection and geological disaster early warning.</p>
	]]></content:encoded>

	<dc:title>MPES-YOLO: A Multi-Scale Lightweight Framework with Selective Edge Enhancement for Loess Landslide Detection</dc:title>
			<dc:creator>Hanyu Cheng</dc:creator>
			<dc:creator>Jiali Su</dc:creator>
			<dc:creator>Jiangbo Xi</dc:creator>
			<dc:creator>Haixing Shang</dc:creator>
			<dc:creator>Zhen Zhang</dc:creator>
			<dc:creator>Bingkun Wang</dc:creator>
			<dc:creator>Pan Li</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091374</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1374</prism:startingPage>
		<prism:doi>10.3390/rs18091374</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1374</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1373">

	<title>Remote Sensing, Vol. 18, Pages 1373: Vegetation Mapping in Heterogeneous Forest&amp;ndash;Shrub&amp;ndash;Grass Ecosystems Using Fused High-Resolution Optical and SAR Data</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1373</link>
	<description>Forest, shrubland, and grassland exhibit highly overlapping characteristics, and single-modal remote sensing data cannot simultaneously capture both spectral and structural information. Moreover, multimodal fusion learning of optical and SAR data faces challenges such as the lack of high-quality samples and difficulties in effective cross-modal feature fusion. Therefore, a high-resolution multimodal remote sensing feature dataset (GF23FSG) is constructed for the fine classification of forest, shrubland, and grassland, and a Cross-modal Adaptive Structure Fusion Network (CASFNet) is proposed. In response to the feature heterogeneity of optical and SAR, a cross-modal adaptive fusion module based on spatial alignment and a dynamic weight allocation strategy is proposed, which effectively enhances the learning of spectral&amp;amp;ndash;spectrum heterogeneous features. In addition, a multi-level auxiliary supervision mechanism is introduced to strengthen feature representation learning. Gradient constraints are further imposed on deep-level features to improve the model&amp;amp;rsquo;s ability to capture and learn deep cross-modal representations, thereby effectively mitigating representation degradation during the feature fusion process. Experiments on the self-constructed GF23FSG dataset and the publicly available SEN12MS dataset achieve OA of 77.38% and 71.84%, respectively, demonstrating superior classification performance compared with SOTA methods. In addition, comparative analysis with public land cover products and field samples further confirm the reliability and generalization performance of the proposed dataset and model for the fine classification of forest, shrubland, and grassland. This study provides a new solution for the fine classification of forest, shrubland, and grassland from multimodal remote sensing images from the perspectives of dataset construction and methodological design.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1373: Vegetation Mapping in Heterogeneous Forest&amp;ndash;Shrub&amp;ndash;Grass Ecosystems Using Fused High-Resolution Optical and SAR Data</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1373">doi: 10.3390/rs18091373</a></p>
	<p>Authors:
		Qingshuang Pang
		Zhanliang Yuan
		Xiaofei Mi
		Jian Yang
		Weibing Du
		Jian Zhang
		Jilong Zhang
		Kang Du
		Zheng Guo
		</p>
	<p>Forest, shrubland, and grassland exhibit highly overlapping characteristics, and single-modal remote sensing data cannot simultaneously capture both spectral and structural information. Moreover, multimodal fusion learning of optical and SAR data faces challenges such as the lack of high-quality samples and difficulties in effective cross-modal feature fusion. Therefore, a high-resolution multimodal remote sensing feature dataset (GF23FSG) is constructed for the fine classification of forest, shrubland, and grassland, and a Cross-modal Adaptive Structure Fusion Network (CASFNet) is proposed. In response to the feature heterogeneity of optical and SAR, a cross-modal adaptive fusion module based on spatial alignment and a dynamic weight allocation strategy is proposed, which effectively enhances the learning of spectral&amp;amp;ndash;spectrum heterogeneous features. In addition, a multi-level auxiliary supervision mechanism is introduced to strengthen feature representation learning. Gradient constraints are further imposed on deep-level features to improve the model&amp;amp;rsquo;s ability to capture and learn deep cross-modal representations, thereby effectively mitigating representation degradation during the feature fusion process. Experiments on the self-constructed GF23FSG dataset and the publicly available SEN12MS dataset achieve OA of 77.38% and 71.84%, respectively, demonstrating superior classification performance compared with SOTA methods. In addition, comparative analysis with public land cover products and field samples further confirm the reliability and generalization performance of the proposed dataset and model for the fine classification of forest, shrubland, and grassland. This study provides a new solution for the fine classification of forest, shrubland, and grassland from multimodal remote sensing images from the perspectives of dataset construction and methodological design.</p>
	]]></content:encoded>

	<dc:title>Vegetation Mapping in Heterogeneous Forest&amp;amp;ndash;Shrub&amp;amp;ndash;Grass Ecosystems Using Fused High-Resolution Optical and SAR Data</dc:title>
			<dc:creator>Qingshuang Pang</dc:creator>
			<dc:creator>Zhanliang Yuan</dc:creator>
			<dc:creator>Xiaofei Mi</dc:creator>
			<dc:creator>Jian Yang</dc:creator>
			<dc:creator>Weibing Du</dc:creator>
			<dc:creator>Jian Zhang</dc:creator>
			<dc:creator>Jilong Zhang</dc:creator>
			<dc:creator>Kang Du</dc:creator>
			<dc:creator>Zheng Guo</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091373</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1373</prism:startingPage>
		<prism:doi>10.3390/rs18091373</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1373</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1372">

	<title>Remote Sensing, Vol. 18, Pages 1372: Mainlobe Coherent Source 3D Imaging via Monopulse Ratio-Based Spatial Steering Vector and Polarization Diversity</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1372</link>
	<description>Traditional angle estimation for sum-and-difference monopulse radar systems is predominantly designed for non-coherent sources or relies on fixed closed-form solutions. However, in the presence of coherent sources, these methods often suffer from performance degradation due to data rank deficiency or unavoidable suppression of target power. To address these limitations, this paper presents a single-snapshot angle estimation method for coherent sources by leveraging the angular super-resolution and ranging capabilities of monopulse radar to achieve 3D imaging in the range-angle domain. The approach utilizes the monopulse ratio spatial steering vector as a search vector and projects the received data onto its orthogonal subspace. By exploiting the coupling characteristics between signal polarization and angle, a cost function is constructed to validate the feedback of the search vector. Theoretical analysis demonstrates that for dual-target scenarios, the cost function reaches its minimum precisely when the search vector aligns with a target&amp;amp;rsquo;s steering vector, enabling the accurate estimation of both targets&amp;amp;rsquo; angles. Furthermore, the polarization-angle coupling constraint reduces the 2D angular search space to a 1D line, significantly lowering computational complexity. Simulation results indicate that the method effectively resolves dual targets under single-snapshot conditions and maintains robust performance even with significant energy disparities. Finally, 3D localization of multiple airborne point targets is achieved by integrating 2D angular information with range data, validating the potential of the method for advanced radar imaging and positioning.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1372: Mainlobe Coherent Source 3D Imaging via Monopulse Ratio-Based Spatial Steering Vector and Polarization Diversity</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1372">doi: 10.3390/rs18091372</a></p>
	<p>Authors:
		Jiahao Tian
		Jianxiong Zhou
		Zhanling Wang
		Xiangting Wang
		Fulai Wang
		Zhiyong Song
		Ping Wang
		</p>
	<p>Traditional angle estimation for sum-and-difference monopulse radar systems is predominantly designed for non-coherent sources or relies on fixed closed-form solutions. However, in the presence of coherent sources, these methods often suffer from performance degradation due to data rank deficiency or unavoidable suppression of target power. To address these limitations, this paper presents a single-snapshot angle estimation method for coherent sources by leveraging the angular super-resolution and ranging capabilities of monopulse radar to achieve 3D imaging in the range-angle domain. The approach utilizes the monopulse ratio spatial steering vector as a search vector and projects the received data onto its orthogonal subspace. By exploiting the coupling characteristics between signal polarization and angle, a cost function is constructed to validate the feedback of the search vector. Theoretical analysis demonstrates that for dual-target scenarios, the cost function reaches its minimum precisely when the search vector aligns with a target&amp;amp;rsquo;s steering vector, enabling the accurate estimation of both targets&amp;amp;rsquo; angles. Furthermore, the polarization-angle coupling constraint reduces the 2D angular search space to a 1D line, significantly lowering computational complexity. Simulation results indicate that the method effectively resolves dual targets under single-snapshot conditions and maintains robust performance even with significant energy disparities. Finally, 3D localization of multiple airborne point targets is achieved by integrating 2D angular information with range data, validating the potential of the method for advanced radar imaging and positioning.</p>
	]]></content:encoded>

	<dc:title>Mainlobe Coherent Source 3D Imaging via Monopulse Ratio-Based Spatial Steering Vector and Polarization Diversity</dc:title>
			<dc:creator>Jiahao Tian</dc:creator>
			<dc:creator>Jianxiong Zhou</dc:creator>
			<dc:creator>Zhanling Wang</dc:creator>
			<dc:creator>Xiangting Wang</dc:creator>
			<dc:creator>Fulai Wang</dc:creator>
			<dc:creator>Zhiyong Song</dc:creator>
			<dc:creator>Ping Wang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091372</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1372</prism:startingPage>
		<prism:doi>10.3390/rs18091372</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1372</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1370">

	<title>Remote Sensing, Vol. 18, Pages 1370: SAR-Based Submesoscale Oceanic Eddy Detection Using Deep Fusion Feature Pyramid Network with Scale-Aware Learning</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1370</link>
	<description>Submesoscale oceanic eddies play a crucial role in ocean dynamics and climate systems, while Synthetic Aperture Radar (SAR) offers distinct advantages for observing these fine-scale phenomena; the advancement of automated detection algorithms is currently hindered by the lack of publicly available, high-quality benchmark datasets. To address this gap, this paper constructs a universal benchmark dataset for submesoscale eddies and presents an improved anchor-free object detection framework based on Fully Convolutional One-Stage (FCOS). We propose two key innovations: (1) a Deep Fusion Feature Pyramid Network (DF-FPN) that integrates adaptive multi-scale feature fusion directly into the pyramid construction process through deep fusion Adaptive Spatial Feature Fusion (ASFF) modules, enabling bidirectional feature enhancement and global context-aware fusion and (2) a Pixel-level Statistical Description Learning (PSDL) module that enhances feature representation by learning statistical descriptors across multiple scales. The DF-FPN replaces traditional staged optimization with an intrinsic deep fusion paradigm, significantly improving feature quality. Extensive experiments on our constructed dataset demonstrate that our method achieves 66.6% mAP, 91.3% AP50, and 80.5% AP75. These results represent a substantial improvement over the FCOS baseline and outperform other state-of-the-art detectors, providing a robust and efficient solution for operational submesoscale eddy monitoring in SAR imagery. Enhanced detection capacity of this kind offers a critical observational foundation for advancing research on upper-ocean nutrient transport, carbon cycle dynamics, and the dispersion of marine pollutants, thereby supporting broader environmental monitoring and climate-related objectives.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1370: SAR-Based Submesoscale Oceanic Eddy Detection Using Deep Fusion Feature Pyramid Network with Scale-Aware Learning</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1370">doi: 10.3390/rs18091370</a></p>
	<p>Authors:
		Songhao Peng
		Yongqiang Chen
		Chunle Wang
		</p>
	<p>Submesoscale oceanic eddies play a crucial role in ocean dynamics and climate systems, while Synthetic Aperture Radar (SAR) offers distinct advantages for observing these fine-scale phenomena; the advancement of automated detection algorithms is currently hindered by the lack of publicly available, high-quality benchmark datasets. To address this gap, this paper constructs a universal benchmark dataset for submesoscale eddies and presents an improved anchor-free object detection framework based on Fully Convolutional One-Stage (FCOS). We propose two key innovations: (1) a Deep Fusion Feature Pyramid Network (DF-FPN) that integrates adaptive multi-scale feature fusion directly into the pyramid construction process through deep fusion Adaptive Spatial Feature Fusion (ASFF) modules, enabling bidirectional feature enhancement and global context-aware fusion and (2) a Pixel-level Statistical Description Learning (PSDL) module that enhances feature representation by learning statistical descriptors across multiple scales. The DF-FPN replaces traditional staged optimization with an intrinsic deep fusion paradigm, significantly improving feature quality. Extensive experiments on our constructed dataset demonstrate that our method achieves 66.6% mAP, 91.3% AP50, and 80.5% AP75. These results represent a substantial improvement over the FCOS baseline and outperform other state-of-the-art detectors, providing a robust and efficient solution for operational submesoscale eddy monitoring in SAR imagery. Enhanced detection capacity of this kind offers a critical observational foundation for advancing research on upper-ocean nutrient transport, carbon cycle dynamics, and the dispersion of marine pollutants, thereby supporting broader environmental monitoring and climate-related objectives.</p>
	]]></content:encoded>

	<dc:title>SAR-Based Submesoscale Oceanic Eddy Detection Using Deep Fusion Feature Pyramid Network with Scale-Aware Learning</dc:title>
			<dc:creator>Songhao Peng</dc:creator>
			<dc:creator>Yongqiang Chen</dc:creator>
			<dc:creator>Chunle Wang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091370</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1370</prism:startingPage>
		<prism:doi>10.3390/rs18091370</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1370</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1371">

	<title>Remote Sensing, Vol. 18, Pages 1371: Daily Nighttime Lights for Rapid Post-Earthquake Damage Assessment: Multi-Scale and Azimuthal Differences from the Mw 7.7 Myanmar Earthquake</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1371</link>
	<description>On 28 March 2025, a Mw 7.7 earthquake struck central Myanmar, where rapid mapping of early impacts is crucial for post-earthquake assessment and emergency response. Existing nighttime light studies often emphasize single-scale brightness loss, with limited characterization of azimuthal differences within intensity zones and their coupling with population/building exposure, although these factors are essential for explaining spatially uneven earthquake impacts and for improving the interpretation of nighttime light loss patterns. This study integrates daily VIIRS nighttime lights (500 m) with USGS intensity and population/building density to build an intensity&amp;amp;ndash;azimuth framework with six directional sectors, quantify pre-/post-earthquake changes at county, patch, and pixel scales, apply bivariate LISA to detect local coupling patterns, and validate against CEMS Rapid Mapping. The results show clear scale complementarity: county aggregation robustly delineates the macro impact extent but smooths internal contrasts; pixel analysis captures fragmented disturbances yet is noise-sensitive; patch-based mapping best aligns with built-up areas at 500 m resolution and shows higher agreement with CEMS in well-lit urban areas. Azimuth&amp;amp;ndash;intensity patterns indicate more concentrated NTL reduction in north&amp;amp;ndash;south high-intensity zones (NTL = &amp;amp;minus;0.53&amp;amp;ndash;&amp;amp;minus;15.67 nW&amp;amp;middot;cm&amp;amp;minus;2&amp;amp;middot;sr&amp;amp;minus;1), with local rebounds in some east&amp;amp;ndash;west sectors. The framework provides interpretable support for rapid loss assessment and priority-based resource allocation.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1371: Daily Nighttime Lights for Rapid Post-Earthquake Damage Assessment: Multi-Scale and Azimuthal Differences from the Mw 7.7 Myanmar Earthquake</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1371">doi: 10.3390/rs18091371</a></p>
	<p>Authors:
		Zihao Wu
		Xue Li
		Xiaoyi Hu
		Yani Huang
		</p>
	<p>On 28 March 2025, a Mw 7.7 earthquake struck central Myanmar, where rapid mapping of early impacts is crucial for post-earthquake assessment and emergency response. Existing nighttime light studies often emphasize single-scale brightness loss, with limited characterization of azimuthal differences within intensity zones and their coupling with population/building exposure, although these factors are essential for explaining spatially uneven earthquake impacts and for improving the interpretation of nighttime light loss patterns. This study integrates daily VIIRS nighttime lights (500 m) with USGS intensity and population/building density to build an intensity&amp;amp;ndash;azimuth framework with six directional sectors, quantify pre-/post-earthquake changes at county, patch, and pixel scales, apply bivariate LISA to detect local coupling patterns, and validate against CEMS Rapid Mapping. The results show clear scale complementarity: county aggregation robustly delineates the macro impact extent but smooths internal contrasts; pixel analysis captures fragmented disturbances yet is noise-sensitive; patch-based mapping best aligns with built-up areas at 500 m resolution and shows higher agreement with CEMS in well-lit urban areas. Azimuth&amp;amp;ndash;intensity patterns indicate more concentrated NTL reduction in north&amp;amp;ndash;south high-intensity zones (NTL = &amp;amp;minus;0.53&amp;amp;ndash;&amp;amp;minus;15.67 nW&amp;amp;middot;cm&amp;amp;minus;2&amp;amp;middot;sr&amp;amp;minus;1), with local rebounds in some east&amp;amp;ndash;west sectors. The framework provides interpretable support for rapid loss assessment and priority-based resource allocation.</p>
	]]></content:encoded>

	<dc:title>Daily Nighttime Lights for Rapid Post-Earthquake Damage Assessment: Multi-Scale and Azimuthal Differences from the Mw 7.7 Myanmar Earthquake</dc:title>
			<dc:creator>Zihao Wu</dc:creator>
			<dc:creator>Xue Li</dc:creator>
			<dc:creator>Xiaoyi Hu</dc:creator>
			<dc:creator>Yani Huang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091371</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1371</prism:startingPage>
		<prism:doi>10.3390/rs18091371</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1371</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1368">

	<title>Remote Sensing, Vol. 18, Pages 1368: Daily-Scale Meteorological Normalization of Surface Solar Radiation in Varying Pollution Levels: A Statistical Case Study in Beijing (2015&amp;ndash;2019)</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1368</link>
	<description>Surface solar radiation at the ground is affected by aerosols, clouds, and atmospheric moisture, as well as by circulation-related conditions that influence cloud formation and pollutant transport. In daily observations, these influences are mixed, which makes pollution-related variability difficult to interpret. We analyzed data from Beijing station 54511 (2015&amp;amp;ndash;2019), including daily integrated radiation components and collocated meteorological and pollution variables. We used wavelet coherence, pollution-stratified association analysis, and gray relational analysis, and compared two meteorological normalization methods: multiple linear regression (MLR) and random forest (RF). The results show that meteorological&amp;amp;ndash;radiation relationships vary systematically across pollution levels, indicating substantial meteorological confounding in daily radiation analyses. Among the radiation components, DR shows the clearest pollution-dependent shift in its relationship with RH, while several direct components become less sensitive to cloud cover under heavier pollution. RF reproduced daily radiation components with strong predictive performance (R2 = 0.83&amp;amp;ndash;0.88), and the meteorologically adjusted anomalies from RF were consistent with those from MLR (r = 0.63&amp;amp;ndash;0.78 across components). These findings suggest that both MLR and RF can be effectively used to normalize meteorological effects in daily station records. The analysis supports routine interpretation of day-to-day surface radiation variability and can be extended to multi-site studies and finer temporal resolution.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1368: Daily-Scale Meteorological Normalization of Surface Solar Radiation in Varying Pollution Levels: A Statistical Case Study in Beijing (2015&amp;ndash;2019)</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1368">doi: 10.3390/rs18091368</a></p>
	<p>Authors:
		Tong Wu
		Zhigang Li
		Xueying Zhou
		</p>
	<p>Surface solar radiation at the ground is affected by aerosols, clouds, and atmospheric moisture, as well as by circulation-related conditions that influence cloud formation and pollutant transport. In daily observations, these influences are mixed, which makes pollution-related variability difficult to interpret. We analyzed data from Beijing station 54511 (2015&amp;amp;ndash;2019), including daily integrated radiation components and collocated meteorological and pollution variables. We used wavelet coherence, pollution-stratified association analysis, and gray relational analysis, and compared two meteorological normalization methods: multiple linear regression (MLR) and random forest (RF). The results show that meteorological&amp;amp;ndash;radiation relationships vary systematically across pollution levels, indicating substantial meteorological confounding in daily radiation analyses. Among the radiation components, DR shows the clearest pollution-dependent shift in its relationship with RH, while several direct components become less sensitive to cloud cover under heavier pollution. RF reproduced daily radiation components with strong predictive performance (R2 = 0.83&amp;amp;ndash;0.88), and the meteorologically adjusted anomalies from RF were consistent with those from MLR (r = 0.63&amp;amp;ndash;0.78 across components). These findings suggest that both MLR and RF can be effectively used to normalize meteorological effects in daily station records. The analysis supports routine interpretation of day-to-day surface radiation variability and can be extended to multi-site studies and finer temporal resolution.</p>
	]]></content:encoded>

	<dc:title>Daily-Scale Meteorological Normalization of Surface Solar Radiation in Varying Pollution Levels: A Statistical Case Study in Beijing (2015&amp;amp;ndash;2019)</dc:title>
			<dc:creator>Tong Wu</dc:creator>
			<dc:creator>Zhigang Li</dc:creator>
			<dc:creator>Xueying Zhou</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091368</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1368</prism:startingPage>
		<prism:doi>10.3390/rs18091368</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1368</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1369">

	<title>Remote Sensing, Vol. 18, Pages 1369: Efficient Compressed Sensing-Based Backprojection Approach for Small Drone-Borne W-Band SAR Imaging</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1369</link>
	<description>Small drone-borne W-band synthetic aperture radar (SAR) systems are highly susceptible to motion errors that conventional navigation sensors and phase-based autofocus algorithms cannot effectively resolve due to phase wrapping. This paper presents a sensor-independent imaging framework to robustly suppress these errors. First, joint time-frequency analysis is employed to identify and discard motion-corrupted pulses. Subsequently, a compressed sensing-based backprojection algorithm reconstructs high-resolution images from the remaining sparse dataset. To alleviate the substantial memory burden of matrix-based compressed sensing, the reconstruction is reformulated iteratively. Experimental results confirm that the proposed method maintains structural integrity even when up to 60% of the received pulses are corrupted and demonstrates robust focusing down to an SNR of &amp;amp;minus;25 dB. This approach provides a practical, memory-efficient, and cost-effective solution for SAR platforms.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1369: Efficient Compressed Sensing-Based Backprojection Approach for Small Drone-Borne W-Band SAR Imaging</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1369">doi: 10.3390/rs18091369</a></p>
	<p>Authors:
		In-Hyeok Lee
		Min-Gon Cho
		Hyun-Dong Kim
		Kyung-Tae Kim
		</p>
	<p>Small drone-borne W-band synthetic aperture radar (SAR) systems are highly susceptible to motion errors that conventional navigation sensors and phase-based autofocus algorithms cannot effectively resolve due to phase wrapping. This paper presents a sensor-independent imaging framework to robustly suppress these errors. First, joint time-frequency analysis is employed to identify and discard motion-corrupted pulses. Subsequently, a compressed sensing-based backprojection algorithm reconstructs high-resolution images from the remaining sparse dataset. To alleviate the substantial memory burden of matrix-based compressed sensing, the reconstruction is reformulated iteratively. Experimental results confirm that the proposed method maintains structural integrity even when up to 60% of the received pulses are corrupted and demonstrates robust focusing down to an SNR of &amp;amp;minus;25 dB. This approach provides a practical, memory-efficient, and cost-effective solution for SAR platforms.</p>
	]]></content:encoded>

	<dc:title>Efficient Compressed Sensing-Based Backprojection Approach for Small Drone-Borne W-Band SAR Imaging</dc:title>
			<dc:creator>In-Hyeok Lee</dc:creator>
			<dc:creator>Min-Gon Cho</dc:creator>
			<dc:creator>Hyun-Dong Kim</dc:creator>
			<dc:creator>Kyung-Tae Kim</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091369</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1369</prism:startingPage>
		<prism:doi>10.3390/rs18091369</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1369</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1366">

	<title>Remote Sensing, Vol. 18, Pages 1366: Deep Learning Insights into Seamless Reconstruction of XCO2 in China: Spatiotemporal Patterns and Driving Mechanisms</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1366</link>
	<description>Accurate quantification of atmospheric column-averaged dry-air CO2 mole fractions (XCO2) is pivotal for quantifying carbon sources and supporting China&amp;amp;rsquo;s dual carbon goals. However, existing satellite observations are limited by spatiotemporal gaps due to orbital constraints and atmospheric conditions. To bridge these gaps, we utilized a deep learning framework featuring a dual self-attention mechanism, Air-Transformer, to capture complex long-range spatiotemporal dependencies and non-linear interactions among variables. Utilizing OCO-2 retrievals and multi-source data, this approach generated a spatiotemporally consistent, daily 0.1&amp;amp;deg; XCO2 dataset over China during 2015&amp;amp;ndash;2020. Cross-validation demonstrates superior accuracy (R2 = 0.98), with robust performance confirmed by spatial and temporal validation and ground-based TCCON benchmarks. The estimated full-coverage outputs reveal a national mean annual increase of 2.68 ppm, characterized by a distinct east-high/west-low pattern. Interpretable analysis based on Shapley Additive Explanations (SHAP) elucidates the non-linear interactions between XCO2 and environmental drivers and exhibits significant regional heterogeneity. This spatiotemporally consistent and interpretable XCO2 dataset offers vital data support for regional carbon monitoring and differentiated policy-making.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1366: Deep Learning Insights into Seamless Reconstruction of XCO2 in China: Spatiotemporal Patterns and Driving Mechanisms</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1366">doi: 10.3390/rs18091366</a></p>
	<p>Authors:
		Weiqing Wang
		Danyang Li
		Chu Ren
		Xiaoyan Dai
		Liguo Zhou
		</p>
	<p>Accurate quantification of atmospheric column-averaged dry-air CO2 mole fractions (XCO2) is pivotal for quantifying carbon sources and supporting China&amp;amp;rsquo;s dual carbon goals. However, existing satellite observations are limited by spatiotemporal gaps due to orbital constraints and atmospheric conditions. To bridge these gaps, we utilized a deep learning framework featuring a dual self-attention mechanism, Air-Transformer, to capture complex long-range spatiotemporal dependencies and non-linear interactions among variables. Utilizing OCO-2 retrievals and multi-source data, this approach generated a spatiotemporally consistent, daily 0.1&amp;amp;deg; XCO2 dataset over China during 2015&amp;amp;ndash;2020. Cross-validation demonstrates superior accuracy (R2 = 0.98), with robust performance confirmed by spatial and temporal validation and ground-based TCCON benchmarks. The estimated full-coverage outputs reveal a national mean annual increase of 2.68 ppm, characterized by a distinct east-high/west-low pattern. Interpretable analysis based on Shapley Additive Explanations (SHAP) elucidates the non-linear interactions between XCO2 and environmental drivers and exhibits significant regional heterogeneity. This spatiotemporally consistent and interpretable XCO2 dataset offers vital data support for regional carbon monitoring and differentiated policy-making.</p>
	]]></content:encoded>

	<dc:title>Deep Learning Insights into Seamless Reconstruction of XCO2 in China: Spatiotemporal Patterns and Driving Mechanisms</dc:title>
			<dc:creator>Weiqing Wang</dc:creator>
			<dc:creator>Danyang Li</dc:creator>
			<dc:creator>Chu Ren</dc:creator>
			<dc:creator>Xiaoyan Dai</dc:creator>
			<dc:creator>Liguo Zhou</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091366</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1366</prism:startingPage>
		<prism:doi>10.3390/rs18091366</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1366</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1367">

	<title>Remote Sensing, Vol. 18, Pages 1367: Automated Mapping of Patched Cropland Parcels Using Bi-Temporal UAS Elevation and Spectral Features at Cadastral Level</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1367</link>
	<description>Cropland parcels are fundamental units in agricultural production, and their precise delineation is critical for cadastral management and precision agriculture. However, heterogeneous agricultural landscapes with fragmented patches, complex land cover, and indistinct boundaries pose significant challenges for automated parcel delineation. Unmanned aerial systems (UASs) offer flexible, high-resolution multi-temporal spectral and elevation data, providing potential opportunities for mapping patched parcels. This study proposed an automated method for mapping patched cropland parcels using centimeter-level digital surface models (DSMs) and digital orthophoto maps (DOMs), validated at three typical sites in the Sichuan Basin. The method integrates (1) threshold segmentation of topographic relief to distinguish field surfaces from borders; (2) vegetation removal using a visible-band difference vegetation index (VDVI) mask; and (3) morphological refinement to produce high-precision vectorized field polygons. Results show that integrating bi-temporal UAS elevation and spectral data enables accurate, automated field extraction. Area-based mapping accuracy reached 98.1%, with an overall accuracy (OA) of 96.1% and a Kappa coefficient (KC) of 0.92. Field-count correctness was 93.3%, and the relative error of boundary length was 4.55%. Notably, parcels with regular shapes achieved even higher accuracy, with OA of 99.1% and KC of 0.98. By leveraging UAS-based elevation and spectral data, the proposed method can offer an alternative way to precise delineation of patched field boundary and provides reliable technical support for cadastral mapping and cropland surveys in agricultural regions.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1367: Automated Mapping of Patched Cropland Parcels Using Bi-Temporal UAS Elevation and Spectral Features at Cadastral Level</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1367">doi: 10.3390/rs18091367</a></p>
	<p>Authors:
		Xiaoshan Yong
		Jianyong Zhang
		Yu Zhao
		Qian Cui
		Shijie Qiao
		Yanjie Liu
		Yugang Cao
		Wu Xiao
		</p>
	<p>Cropland parcels are fundamental units in agricultural production, and their precise delineation is critical for cadastral management and precision agriculture. However, heterogeneous agricultural landscapes with fragmented patches, complex land cover, and indistinct boundaries pose significant challenges for automated parcel delineation. Unmanned aerial systems (UASs) offer flexible, high-resolution multi-temporal spectral and elevation data, providing potential opportunities for mapping patched parcels. This study proposed an automated method for mapping patched cropland parcels using centimeter-level digital surface models (DSMs) and digital orthophoto maps (DOMs), validated at three typical sites in the Sichuan Basin. The method integrates (1) threshold segmentation of topographic relief to distinguish field surfaces from borders; (2) vegetation removal using a visible-band difference vegetation index (VDVI) mask; and (3) morphological refinement to produce high-precision vectorized field polygons. Results show that integrating bi-temporal UAS elevation and spectral data enables accurate, automated field extraction. Area-based mapping accuracy reached 98.1%, with an overall accuracy (OA) of 96.1% and a Kappa coefficient (KC) of 0.92. Field-count correctness was 93.3%, and the relative error of boundary length was 4.55%. Notably, parcels with regular shapes achieved even higher accuracy, with OA of 99.1% and KC of 0.98. By leveraging UAS-based elevation and spectral data, the proposed method can offer an alternative way to precise delineation of patched field boundary and provides reliable technical support for cadastral mapping and cropland surveys in agricultural regions.</p>
	]]></content:encoded>

	<dc:title>Automated Mapping of Patched Cropland Parcels Using Bi-Temporal UAS Elevation and Spectral Features at Cadastral Level</dc:title>
			<dc:creator>Xiaoshan Yong</dc:creator>
			<dc:creator>Jianyong Zhang</dc:creator>
			<dc:creator>Yu Zhao</dc:creator>
			<dc:creator>Qian Cui</dc:creator>
			<dc:creator>Shijie Qiao</dc:creator>
			<dc:creator>Yanjie Liu</dc:creator>
			<dc:creator>Yugang Cao</dc:creator>
			<dc:creator>Wu Xiao</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091367</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1367</prism:startingPage>
		<prism:doi>10.3390/rs18091367</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1367</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1365">

	<title>Remote Sensing, Vol. 18, Pages 1365: Unraveling the Spectral&amp;ndash;Spatial Mechanisms of Mineral Identification: A Case Study on CASI Data Using SpectralFormer and Traditional Classifiers</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1365</link>
	<description>Traditional diagnostic spectroscopy provides a physically interpretable basis for mineral identification. However, how modern classifiers balance spectral and spatial information remains insufficiently understood. This study investigates this issue using CASI airborne hyperspectral data from the Liuyuan area, China. A geologically constrained ground-truth dataset was constructed based on expert knowledge and a semi-automatic Spectral Hourglass workflow. We evaluated representative shallow machine learning methods and deep learning models, including a three-dimensional convolutional neural network (3D-CNN), Vision Transformer (ViT), and SpectralFormer. The Support Vector Machine (SVM) achieved the highest overall accuracy but showed a strong bias toward dominant background classes and failed to reliably detect rare minerals such as jarosite. Deep learning models improved class balance by incorporating broader spectral features. However, excessive spatial aggregation reduced their sensitivity to small and fragmented alteration zones. SpectralFormer models hyperspectral data as ordered spectral sequences and showed more stable performance for spectrally similar and rare minerals. Multi-scale experiments reveal a spectral-dominant discrimination mechanism. Increasing the spectral receptive field improves classification up to an optimal level. In contrast, overly large spatial patches introduce background interference and obscure diagnostic absorption features. These findings highlight the fundamental role of spectral continuity in airborne hyperspectral alteration mineral mapping and clarify the trade-offs involved in integrating spatial context.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1365: Unraveling the Spectral&amp;ndash;Spatial Mechanisms of Mineral Identification: A Case Study on CASI Data Using SpectralFormer and Traditional Classifiers</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1365">doi: 10.3390/rs18091365</a></p>
	<p>Authors:
		Huilin Yang
		Kai Qin
		Yuxi Hao
		Ming Li
		Ling Zhu
		Yuechao Yang
		Yingjun Zhao
		</p>
	<p>Traditional diagnostic spectroscopy provides a physically interpretable basis for mineral identification. However, how modern classifiers balance spectral and spatial information remains insufficiently understood. This study investigates this issue using CASI airborne hyperspectral data from the Liuyuan area, China. A geologically constrained ground-truth dataset was constructed based on expert knowledge and a semi-automatic Spectral Hourglass workflow. We evaluated representative shallow machine learning methods and deep learning models, including a three-dimensional convolutional neural network (3D-CNN), Vision Transformer (ViT), and SpectralFormer. The Support Vector Machine (SVM) achieved the highest overall accuracy but showed a strong bias toward dominant background classes and failed to reliably detect rare minerals such as jarosite. Deep learning models improved class balance by incorporating broader spectral features. However, excessive spatial aggregation reduced their sensitivity to small and fragmented alteration zones. SpectralFormer models hyperspectral data as ordered spectral sequences and showed more stable performance for spectrally similar and rare minerals. Multi-scale experiments reveal a spectral-dominant discrimination mechanism. Increasing the spectral receptive field improves classification up to an optimal level. In contrast, overly large spatial patches introduce background interference and obscure diagnostic absorption features. These findings highlight the fundamental role of spectral continuity in airborne hyperspectral alteration mineral mapping and clarify the trade-offs involved in integrating spatial context.</p>
	]]></content:encoded>

	<dc:title>Unraveling the Spectral&amp;amp;ndash;Spatial Mechanisms of Mineral Identification: A Case Study on CASI Data Using SpectralFormer and Traditional Classifiers</dc:title>
			<dc:creator>Huilin Yang</dc:creator>
			<dc:creator>Kai Qin</dc:creator>
			<dc:creator>Yuxi Hao</dc:creator>
			<dc:creator>Ming Li</dc:creator>
			<dc:creator>Ling Zhu</dc:creator>
			<dc:creator>Yuechao Yang</dc:creator>
			<dc:creator>Yingjun Zhao</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091365</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1365</prism:startingPage>
		<prism:doi>10.3390/rs18091365</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1365</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1364">

	<title>Remote Sensing, Vol. 18, Pages 1364: The Role of Solar-Induced Chlorophyll Fluorescence (SIF) in the Mechanistic Simulation of Eco-Hydrological Processes</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1364</link>
	<description>Accurate quantification of ecohydrological processes is essential for effective water and carbon management in terrestrial ecosystems. Traditional simulations mainly rely on mechanistic models, yet their accuracy is often limited by inconsistencies in representing physical processes and uncertainties in parameterization. Integrating remote sensing signals offers a promising way to reduce these uncertainties and enhance model applicability. In this study, in-situ observations from a wheat cropland in the Guanzhong Plain were used to simulate gross primary productivity (GPP) and latent heat flux (LE) by comparing a forward model (STEMMUS-SCOPE) with a remote sensing-driven inverse model (STEMMUS-MLR). We further examined the role of solar-induced chlorophyll fluorescence (SIF), an emerging proxy for photosynthesis, as an input to improve mechanistic modeling of GPP and LE. Results show that STEMMUS-MLR outperformed STEMMUS-SCOPE in estimating water and carbon fluxes, demonstrating that incorporating SIF effectively reduces bias associated with uncertainties in parameters and forcing data. The contribution of SIF was quantified using Random Forest regression and Shapley additive explanations (SHAP), revealing that SIF markedly reduced the dependence of GPP and LE simulations on shortwave radiation (SW), air temperature (Ta), and leaf area index (LAI). These findings highlight the critical role of SIF in ecohydrological modeling of semi-arid cropland ecosystems and provide a scientific basis for advancing process understanding and improving the precision management of water and carbon budgets in terrestrial ecosystems.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1364: The Role of Solar-Induced Chlorophyll Fluorescence (SIF) in the Mechanistic Simulation of Eco-Hydrological Processes</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1364">doi: 10.3390/rs18091364</a></p>
	<p>Authors:
		Aofan Cui
		Yunfei Wang
		Qiting Zuo
		Xinyu Mao
		Linlin Li
		Jingjing Yang
		Xiongbiao Peng
		Zhunqiao Liu
		Xiaoliang Lu
		Qiang Yu
		Huanjie Cai
		Yijian Zeng
		Zhongbo Su
		</p>
	<p>Accurate quantification of ecohydrological processes is essential for effective water and carbon management in terrestrial ecosystems. Traditional simulations mainly rely on mechanistic models, yet their accuracy is often limited by inconsistencies in representing physical processes and uncertainties in parameterization. Integrating remote sensing signals offers a promising way to reduce these uncertainties and enhance model applicability. In this study, in-situ observations from a wheat cropland in the Guanzhong Plain were used to simulate gross primary productivity (GPP) and latent heat flux (LE) by comparing a forward model (STEMMUS-SCOPE) with a remote sensing-driven inverse model (STEMMUS-MLR). We further examined the role of solar-induced chlorophyll fluorescence (SIF), an emerging proxy for photosynthesis, as an input to improve mechanistic modeling of GPP and LE. Results show that STEMMUS-MLR outperformed STEMMUS-SCOPE in estimating water and carbon fluxes, demonstrating that incorporating SIF effectively reduces bias associated with uncertainties in parameters and forcing data. The contribution of SIF was quantified using Random Forest regression and Shapley additive explanations (SHAP), revealing that SIF markedly reduced the dependence of GPP and LE simulations on shortwave radiation (SW), air temperature (Ta), and leaf area index (LAI). These findings highlight the critical role of SIF in ecohydrological modeling of semi-arid cropland ecosystems and provide a scientific basis for advancing process understanding and improving the precision management of water and carbon budgets in terrestrial ecosystems.</p>
	]]></content:encoded>

	<dc:title>The Role of Solar-Induced Chlorophyll Fluorescence (SIF) in the Mechanistic Simulation of Eco-Hydrological Processes</dc:title>
			<dc:creator>Aofan Cui</dc:creator>
			<dc:creator>Yunfei Wang</dc:creator>
			<dc:creator>Qiting Zuo</dc:creator>
			<dc:creator>Xinyu Mao</dc:creator>
			<dc:creator>Linlin Li</dc:creator>
			<dc:creator>Jingjing Yang</dc:creator>
			<dc:creator>Xiongbiao Peng</dc:creator>
			<dc:creator>Zhunqiao Liu</dc:creator>
			<dc:creator>Xiaoliang Lu</dc:creator>
			<dc:creator>Qiang Yu</dc:creator>
			<dc:creator>Huanjie Cai</dc:creator>
			<dc:creator>Yijian Zeng</dc:creator>
			<dc:creator>Zhongbo Su</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091364</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1364</prism:startingPage>
		<prism:doi>10.3390/rs18091364</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1364</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1361">

	<title>Remote Sensing, Vol. 18, Pages 1361: Individual Tree Species Classification in a Mining Area of the Yellow River Basin Using UAV-Based LiDAR, Hyperspectral, and RGB Data</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1361</link>
	<description>The Yellow River Basin contains abundant coal resources; however, its ecological environment is inherently fragile, and vegetation degradation has been further intensified by extensive mining activities. Accurate classification of individual tree species in mining-affected areas is therefore essential for assessing ecological conditions and establishing a scientific foundation for targeted restoration and sustainable management. To address this need, an evaluated machine learning framework was developed and evaluated for individual tree species classification in a coal mining area of the Yellow River Basin using integrated unmanned aerial vehicle (UAV) data. A comprehensive feature set was constructed by extracting 278 attributes per tree. These attributes included 224 spectral bands and 29 hyperspectral indices derived from hyperspectral imagery, 24 textural metrics obtained from RGB orthophotos, and one canopy height feature generated from a LiDAR-derived model. Based on ground-truth data from 1095 individual trees, seven machine learning algorithms were trained and systematically compared: Random Forest (RF), Support Vector Machine (SVM), K-Nearest Neighbors (KNN), Decision Tree (DT), Gradient Boosting (GB), Logistic Regression (LR), and XGBoost. Statistical significance testing using 5 &amp;amp;times; 5 repeated cross-validation, together with the Friedman test and post hoc Nemenyi test, and additional model stability analysis consistently identified XGBoost as the optimal classifier. On an independent test set, XGBoost achieved high accuracy (Overall Accuracy = 0.897, Kappa = 0.811) with an efficient training time of 2.36 s. Further analysis demonstrated the critical and complementary roles of hyperspectral and structural features in species discrimination. The optimized model was subsequently applied to generate a detailed wall-to-wall tree species map across the entire mining area. Overall, this study presents a statistically informed comparison of classifiers for multi-source feature-based species discrimination and delivers an evaluated and practical pipeline for effective vegetation monitoring. The proposed framework provides a scientific tool for assessing and managing ecological recovery in complex mining environments, particularly within ecologically sensitive regions such as the Yellow River Basin.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1361: Individual Tree Species Classification in a Mining Area of the Yellow River Basin Using UAV-Based LiDAR, Hyperspectral, and RGB Data</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1361">doi: 10.3390/rs18091361</a></p>
	<p>Authors:
		Guo Wang
		Sheng Nie
		Xiaohuan Xi
		Cheng Wang
		Hongtao Wang
		</p>
	<p>The Yellow River Basin contains abundant coal resources; however, its ecological environment is inherently fragile, and vegetation degradation has been further intensified by extensive mining activities. Accurate classification of individual tree species in mining-affected areas is therefore essential for assessing ecological conditions and establishing a scientific foundation for targeted restoration and sustainable management. To address this need, an evaluated machine learning framework was developed and evaluated for individual tree species classification in a coal mining area of the Yellow River Basin using integrated unmanned aerial vehicle (UAV) data. A comprehensive feature set was constructed by extracting 278 attributes per tree. These attributes included 224 spectral bands and 29 hyperspectral indices derived from hyperspectral imagery, 24 textural metrics obtained from RGB orthophotos, and one canopy height feature generated from a LiDAR-derived model. Based on ground-truth data from 1095 individual trees, seven machine learning algorithms were trained and systematically compared: Random Forest (RF), Support Vector Machine (SVM), K-Nearest Neighbors (KNN), Decision Tree (DT), Gradient Boosting (GB), Logistic Regression (LR), and XGBoost. Statistical significance testing using 5 &amp;amp;times; 5 repeated cross-validation, together with the Friedman test and post hoc Nemenyi test, and additional model stability analysis consistently identified XGBoost as the optimal classifier. On an independent test set, XGBoost achieved high accuracy (Overall Accuracy = 0.897, Kappa = 0.811) with an efficient training time of 2.36 s. Further analysis demonstrated the critical and complementary roles of hyperspectral and structural features in species discrimination. The optimized model was subsequently applied to generate a detailed wall-to-wall tree species map across the entire mining area. Overall, this study presents a statistically informed comparison of classifiers for multi-source feature-based species discrimination and delivers an evaluated and practical pipeline for effective vegetation monitoring. The proposed framework provides a scientific tool for assessing and managing ecological recovery in complex mining environments, particularly within ecologically sensitive regions such as the Yellow River Basin.</p>
	]]></content:encoded>

	<dc:title>Individual Tree Species Classification in a Mining Area of the Yellow River Basin Using UAV-Based LiDAR, Hyperspectral, and RGB Data</dc:title>
			<dc:creator>Guo Wang</dc:creator>
			<dc:creator>Sheng Nie</dc:creator>
			<dc:creator>Xiaohuan Xi</dc:creator>
			<dc:creator>Cheng Wang</dc:creator>
			<dc:creator>Hongtao Wang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091361</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1361</prism:startingPage>
		<prism:doi>10.3390/rs18091361</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1361</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1363">

	<title>Remote Sensing, Vol. 18, Pages 1363: Explainable Artificial Intelligence for Estimating Surface Deformation in Landslide Areas with Incomplete SAR Data</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1363</link>
	<description>In landslide-prone areas, spatial gaps in InSAR-derived deformation maps caused by incomplete SAR coverage hinder continuous surface deformation assessment and limit reliable landslide analysis. To address this problem, we propose an explainable AI (XAI) framework that integrates SBAS-InSAR, ensemble machine learning, and Shapley Additive exPlanations (SHAP) to estimate surface deformation in SAR-scarce regions. Geological and engineering factors, including protective measures, distance to roads, and land use, were combined with remote sensing and field data to build a comprehensive dataset. Four ensemble models (LightGBM, XGBoost, Random Forest, and CatBoost) were trained and evaluated, with XGBoost achieving the best performance (R2 = 0.816, RMSE = 6.85 mm, MAE = 4.27 mm). Validation against two GNSS benchmarks confirmed sub-millimeter accuracy (0.6 mm and 0.3 mm). Both XGBoost and CatBoost delineated continuous deformation patterns consistent with field-observed damage. SHAP analysis provided model interpretability, highlighting elevation and human-engineering factors as key drivers: areas farther from roads and under cultivation were more prone to downslope movement, while damaged protective works exhibited greater deformation. By coupling InSAR with XAI, this study achieves accurate and interpretable surface deformation estimation in data-scarce regions, advancing landslide assessment and early warning applications.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1363: Explainable Artificial Intelligence for Estimating Surface Deformation in Landslide Areas with Incomplete SAR Data</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1363">doi: 10.3390/rs18091363</a></p>
	<p>Authors:
		Xiao Feng
		Yang Wang
		Juan Du
		Bo Chai
		Zijie Hu
		Chao Zhou
		</p>
	<p>In landslide-prone areas, spatial gaps in InSAR-derived deformation maps caused by incomplete SAR coverage hinder continuous surface deformation assessment and limit reliable landslide analysis. To address this problem, we propose an explainable AI (XAI) framework that integrates SBAS-InSAR, ensemble machine learning, and Shapley Additive exPlanations (SHAP) to estimate surface deformation in SAR-scarce regions. Geological and engineering factors, including protective measures, distance to roads, and land use, were combined with remote sensing and field data to build a comprehensive dataset. Four ensemble models (LightGBM, XGBoost, Random Forest, and CatBoost) were trained and evaluated, with XGBoost achieving the best performance (R2 = 0.816, RMSE = 6.85 mm, MAE = 4.27 mm). Validation against two GNSS benchmarks confirmed sub-millimeter accuracy (0.6 mm and 0.3 mm). Both XGBoost and CatBoost delineated continuous deformation patterns consistent with field-observed damage. SHAP analysis provided model interpretability, highlighting elevation and human-engineering factors as key drivers: areas farther from roads and under cultivation were more prone to downslope movement, while damaged protective works exhibited greater deformation. By coupling InSAR with XAI, this study achieves accurate and interpretable surface deformation estimation in data-scarce regions, advancing landslide assessment and early warning applications.</p>
	]]></content:encoded>

	<dc:title>Explainable Artificial Intelligence for Estimating Surface Deformation in Landslide Areas with Incomplete SAR Data</dc:title>
			<dc:creator>Xiao Feng</dc:creator>
			<dc:creator>Yang Wang</dc:creator>
			<dc:creator>Juan Du</dc:creator>
			<dc:creator>Bo Chai</dc:creator>
			<dc:creator>Zijie Hu</dc:creator>
			<dc:creator>Chao Zhou</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091363</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1363</prism:startingPage>
		<prism:doi>10.3390/rs18091363</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1363</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1360">

	<title>Remote Sensing, Vol. 18, Pages 1360: Deriving Deflection of the Vertical and Gravity Anomaly from SWOT/KaRIn Data Using an Optimized Discretization Method</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1360</link>
	<description>The Surface Water and Ocean Topography (SWOT) mission carries a Ka-band interferometric radar altimeter (KaRIn), which enables high-resolution wide-swath measurements of sea surface height, providing new opportunities for deriving high-precision marine gravity fields. The discretization method used by the Scripps Institution of Oceanography (SIO) is one of the simplest methods for deriving deflections of the vertical (DOV), as it avoids parameter estimation and complex mathematical procedures. However, this method only uses adjacent observations for first-order differentiation and ignores diagonal directions, resulting in relatively low data utilization for SWOT/KaRIn data. The optimized discretization method is proposed to take advantage of the two-dimensional characteristics of KaRIn data. Multi-directional data is introduced to estimate the DOV (SWOT_DOV), and the numerical differentiation strategy is extended to higher orders. These significantly improve the solution quality. The standard deviation (STD) of the differences between SWOT_DOV and north_32.1 is 1.60 &amp;amp;mu;rad, and that with east_32.1 is 2.02 &amp;amp;mu;rad. Gravity anomalies are further derived using the inverse Vening-Meinesz formula. Validation using NCEI shipborne gravity data indicates an STD of 3.85 mGal. Further analyses considering seafloor topography gradient, depth, and offshore distance demonstrate that SWOT/KaRIn data have a stable capability to restore high-precision marine gravity field features.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1360: Deriving Deflection of the Vertical and Gravity Anomaly from SWOT/KaRIn Data Using an Optimized Discretization Method</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1360">doi: 10.3390/rs18091360</a></p>
	<p>Authors:
		Hengyang Guo
		Xiaoyun Wan
		Xing Wu
		</p>
	<p>The Surface Water and Ocean Topography (SWOT) mission carries a Ka-band interferometric radar altimeter (KaRIn), which enables high-resolution wide-swath measurements of sea surface height, providing new opportunities for deriving high-precision marine gravity fields. The discretization method used by the Scripps Institution of Oceanography (SIO) is one of the simplest methods for deriving deflections of the vertical (DOV), as it avoids parameter estimation and complex mathematical procedures. However, this method only uses adjacent observations for first-order differentiation and ignores diagonal directions, resulting in relatively low data utilization for SWOT/KaRIn data. The optimized discretization method is proposed to take advantage of the two-dimensional characteristics of KaRIn data. Multi-directional data is introduced to estimate the DOV (SWOT_DOV), and the numerical differentiation strategy is extended to higher orders. These significantly improve the solution quality. The standard deviation (STD) of the differences between SWOT_DOV and north_32.1 is 1.60 &amp;amp;mu;rad, and that with east_32.1 is 2.02 &amp;amp;mu;rad. Gravity anomalies are further derived using the inverse Vening-Meinesz formula. Validation using NCEI shipborne gravity data indicates an STD of 3.85 mGal. Further analyses considering seafloor topography gradient, depth, and offshore distance demonstrate that SWOT/KaRIn data have a stable capability to restore high-precision marine gravity field features.</p>
	]]></content:encoded>

	<dc:title>Deriving Deflection of the Vertical and Gravity Anomaly from SWOT/KaRIn Data Using an Optimized Discretization Method</dc:title>
			<dc:creator>Hengyang Guo</dc:creator>
			<dc:creator>Xiaoyun Wan</dc:creator>
			<dc:creator>Xing Wu</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091360</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1360</prism:startingPage>
		<prism:doi>10.3390/rs18091360</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1360</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1362">

	<title>Remote Sensing, Vol. 18, Pages 1362: Geothermal Resource Exploration Using Multi-Temporal Infrared Remote Sensing Data Based on Annual Temperature Variation Model</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1362</link>
	<description>Thermal infrared remote sensing offers a cost-effective means of regional geothermal reconnaissance, yet a fundamental challenge remains: isolating the weak geothermal surface signal (typically 1&amp;amp;ndash;3 &amp;amp;deg;C) from dominant surface noise introduced by seasonal temperature cycles (annual amplitude &amp;amp;gt; 20 &amp;amp;deg;C), topographic variability, land cover heterogeneity, and irregular cloud-affected satellite sampling. Conventional single-scene or arithmetic-mean approaches are highly susceptible to these confounding factors and frequently produce pseudo-anomalies that obscure genuine geothermal targets. To overcome this limitation, we propose a physics-based time-series framework in which a nonlinear annual temperature variation model, T(t) = T0 + A&amp;amp;middot;sin(2&amp;amp;pi;t/&amp;amp;tau; + &amp;amp;phi;), is fitted to multi-temporal Landsat 8 thermal infrared data via the Levenberg&amp;amp;ndash;Marquardt algorithm. Applied to ~50 cloud-free scenes (2021&amp;amp;ndash;2022) processed on the Google Earth Engine over the Shanxi Graben System, northern China, the model simultaneously retrieves the background temperature parameter T0 and seasonal amplitude A&amp;amp;mdash;two physically interpretable quantities that encode distinct geothermal signatures more robustly than simple temporal statistics. Sub-regional corrections for the elevation (&amp;amp;minus;4 &amp;amp;deg;C/100 m above 800 m), aspect (R2 &amp;amp;gt; 0.95 in piecewise linear segments), and slope further suppress topographic pseudo-anomalies prior to anomaly extraction. Over known high-temperature geothermal fields (Tianzhen and Yanggao; &amp;amp;gt;100 &amp;amp;deg;C at 100 m depth), the method reveals clear T0 offsets of +1&amp;amp;ndash;2 &amp;amp;deg;C (3&amp;amp;ndash;5% relative) and amplitude deficits of ~2 K (5&amp;amp;ndash;10% relative) relative to the background, with model-fitted T0 values averaging ~2 &amp;amp;deg;C higher than arithmetic means due to the correction for seasonal sampling bias. Combined with 5 km fault-proximity buffers, extracted anomaly zones align well spatially with known geothermal sites and major structural corridors of the graben system. However, deeper low-temperature systems (45&amp;amp;ndash;50 &amp;amp;deg;C at 300&amp;amp;ndash;500 m depth) produce ambiguous signals below the ~1.5 K detection threshold, indicating inherent limitations for deeply buried resources. The fully reproducible, training-data-free workflow is implementable via open satellite archives and cloud computing platforms, making it a transferable low-cost tool for structurally controlled geothermal reconnaissance across extensional basins worldwide.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1362: Geothermal Resource Exploration Using Multi-Temporal Infrared Remote Sensing Data Based on Annual Temperature Variation Model</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1362">doi: 10.3390/rs18091362</a></p>
	<p>Authors:
		Meihua Wei
		Guangzheng Jiang
		Luyu Zou
		Xiaoyi Wen
		Zhenyu Li
		</p>
	<p>Thermal infrared remote sensing offers a cost-effective means of regional geothermal reconnaissance, yet a fundamental challenge remains: isolating the weak geothermal surface signal (typically 1&amp;amp;ndash;3 &amp;amp;deg;C) from dominant surface noise introduced by seasonal temperature cycles (annual amplitude &amp;amp;gt; 20 &amp;amp;deg;C), topographic variability, land cover heterogeneity, and irregular cloud-affected satellite sampling. Conventional single-scene or arithmetic-mean approaches are highly susceptible to these confounding factors and frequently produce pseudo-anomalies that obscure genuine geothermal targets. To overcome this limitation, we propose a physics-based time-series framework in which a nonlinear annual temperature variation model, T(t) = T0 + A&amp;amp;middot;sin(2&amp;amp;pi;t/&amp;amp;tau; + &amp;amp;phi;), is fitted to multi-temporal Landsat 8 thermal infrared data via the Levenberg&amp;amp;ndash;Marquardt algorithm. Applied to ~50 cloud-free scenes (2021&amp;amp;ndash;2022) processed on the Google Earth Engine over the Shanxi Graben System, northern China, the model simultaneously retrieves the background temperature parameter T0 and seasonal amplitude A&amp;amp;mdash;two physically interpretable quantities that encode distinct geothermal signatures more robustly than simple temporal statistics. Sub-regional corrections for the elevation (&amp;amp;minus;4 &amp;amp;deg;C/100 m above 800 m), aspect (R2 &amp;amp;gt; 0.95 in piecewise linear segments), and slope further suppress topographic pseudo-anomalies prior to anomaly extraction. Over known high-temperature geothermal fields (Tianzhen and Yanggao; &amp;amp;gt;100 &amp;amp;deg;C at 100 m depth), the method reveals clear T0 offsets of +1&amp;amp;ndash;2 &amp;amp;deg;C (3&amp;amp;ndash;5% relative) and amplitude deficits of ~2 K (5&amp;amp;ndash;10% relative) relative to the background, with model-fitted T0 values averaging ~2 &amp;amp;deg;C higher than arithmetic means due to the correction for seasonal sampling bias. Combined with 5 km fault-proximity buffers, extracted anomaly zones align well spatially with known geothermal sites and major structural corridors of the graben system. However, deeper low-temperature systems (45&amp;amp;ndash;50 &amp;amp;deg;C at 300&amp;amp;ndash;500 m depth) produce ambiguous signals below the ~1.5 K detection threshold, indicating inherent limitations for deeply buried resources. The fully reproducible, training-data-free workflow is implementable via open satellite archives and cloud computing platforms, making it a transferable low-cost tool for structurally controlled geothermal reconnaissance across extensional basins worldwide.</p>
	]]></content:encoded>

	<dc:title>Geothermal Resource Exploration Using Multi-Temporal Infrared Remote Sensing Data Based on Annual Temperature Variation Model</dc:title>
			<dc:creator>Meihua Wei</dc:creator>
			<dc:creator>Guangzheng Jiang</dc:creator>
			<dc:creator>Luyu Zou</dc:creator>
			<dc:creator>Xiaoyi Wen</dc:creator>
			<dc:creator>Zhenyu Li</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091362</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1362</prism:startingPage>
		<prism:doi>10.3390/rs18091362</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1362</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1359">

	<title>Remote Sensing, Vol. 18, Pages 1359: Spatiotemporal Dynamics and Driving Patterns of Forest Fires in Yunnan Province, China: An Empirical Study Based on Event-Level Reconstruction from Multi-Source Remote Sensing (2012&amp;ndash;2024)</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1359</link>
	<description>Pixel-based Active Fire Spot (AFS) statistics alone are insufficient for characterizing forest fire activity in fragmented mountainous agroforestry regions because cross-sensor differences, geometric distortion, and discontinuous satellite overpasses can fragment physically continuous fires into multiple detections. To address this problem, we developed a reconstruction framework that combines optical&amp;amp;ndash;thermal cross-validation with multi-level spatio-temporal clustering to identify physically independent fires in Yunnan Province, China. Starting from 497,834 raw AFSs detected during 2012&amp;amp;ndash;2024, the framework removed unusable detections, aggregated the retained AFSs, and identified 41,215 validated Forest Fire Events (FFEs). The reconstructed database revealed clear temporal, spatial, and topographic heterogeneity. Fire activity was strongly concentrated in the late dry season, with 32.8% of all FFEs occurring during the main spring fire window. Daytime FFEs accounted for 82.8% of all FFEs, but nocturnal activity increased substantially in some years, reaching 20.7% in 2023. Persistence showed a long-tailed structure under both observation frameworks, although the operational thresholds differed between 2012&amp;amp;ndash;2017 (105 min) and 2018&amp;amp;ndash;2024 (75 min). Regionally, Southeast and Southwest Yunnan concentrated most reconstructed FFEs, whereas Northwest and Central Yunnan showed much higher CFRP per event. Topographically, fire energy was concentrated mainly on gentle-to-moderate slopes, and nighttime fires were centered 215.03 m higher than daytime fires. The typology analysis further showed that event frequency and physical fire impact were not distributed proportionally across fire types. Random Forest validation indicated high reproducibility of the rule-based typology system (Macro-F1 = 0.9935; Weighted-F1 = 0.9964), whereas the first two principal components explained 42.65% of the total variance. These results show that event-level reconstruction provides a stronger basis than AFS counts alone for understanding fire heterogeneity and supporting zone-specific fire management in Yunnan.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1359: Spatiotemporal Dynamics and Driving Patterns of Forest Fires in Yunnan Province, China: An Empirical Study Based on Event-Level Reconstruction from Multi-Source Remote Sensing (2012&amp;ndash;2024)</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1359">doi: 10.3390/rs18091359</a></p>
	<p>Authors:
		Hang Deng
		Junfan Zhao
		Lan Wang
		Fan Zhao
		</p>
	<p>Pixel-based Active Fire Spot (AFS) statistics alone are insufficient for characterizing forest fire activity in fragmented mountainous agroforestry regions because cross-sensor differences, geometric distortion, and discontinuous satellite overpasses can fragment physically continuous fires into multiple detections. To address this problem, we developed a reconstruction framework that combines optical&amp;amp;ndash;thermal cross-validation with multi-level spatio-temporal clustering to identify physically independent fires in Yunnan Province, China. Starting from 497,834 raw AFSs detected during 2012&amp;amp;ndash;2024, the framework removed unusable detections, aggregated the retained AFSs, and identified 41,215 validated Forest Fire Events (FFEs). The reconstructed database revealed clear temporal, spatial, and topographic heterogeneity. Fire activity was strongly concentrated in the late dry season, with 32.8% of all FFEs occurring during the main spring fire window. Daytime FFEs accounted for 82.8% of all FFEs, but nocturnal activity increased substantially in some years, reaching 20.7% in 2023. Persistence showed a long-tailed structure under both observation frameworks, although the operational thresholds differed between 2012&amp;amp;ndash;2017 (105 min) and 2018&amp;amp;ndash;2024 (75 min). Regionally, Southeast and Southwest Yunnan concentrated most reconstructed FFEs, whereas Northwest and Central Yunnan showed much higher CFRP per event. Topographically, fire energy was concentrated mainly on gentle-to-moderate slopes, and nighttime fires were centered 215.03 m higher than daytime fires. The typology analysis further showed that event frequency and physical fire impact were not distributed proportionally across fire types. Random Forest validation indicated high reproducibility of the rule-based typology system (Macro-F1 = 0.9935; Weighted-F1 = 0.9964), whereas the first two principal components explained 42.65% of the total variance. These results show that event-level reconstruction provides a stronger basis than AFS counts alone for understanding fire heterogeneity and supporting zone-specific fire management in Yunnan.</p>
	]]></content:encoded>

	<dc:title>Spatiotemporal Dynamics and Driving Patterns of Forest Fires in Yunnan Province, China: An Empirical Study Based on Event-Level Reconstruction from Multi-Source Remote Sensing (2012&amp;amp;ndash;2024)</dc:title>
			<dc:creator>Hang Deng</dc:creator>
			<dc:creator>Junfan Zhao</dc:creator>
			<dc:creator>Lan Wang</dc:creator>
			<dc:creator>Fan Zhao</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091359</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1359</prism:startingPage>
		<prism:doi>10.3390/rs18091359</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1359</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1358">

	<title>Remote Sensing, Vol. 18, Pages 1358: DEMC: A Diffusion-Enhanced Mutual Consistency Framework for Cross-Domain Object Detection in Optical and SAR Imagery</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1358</link>
	<description>Cross-domain object detection from optical to Synthetic Aperture Radar (SAR) imagery addresses the challenges of SAR data scarcity and high annotation costs, enabling crucial capabilities for persistent maritime surveillance and reconnaissance. However, the substantial modality gap resulting from distinct imaging mechanisms and severe coherent speckle noise significantly hampers knowledge transfer. Existing Unsupervised Domain Adaptation (UDA) methods, which primarily rely on adversarial feature alignment or static pseudo-labeling, struggle to replicate the physical backscattering properties of SAR data and often fall prey to confirmation bias due to intense background clutter. To overcome these limitations, this paper introduces the Diffusion-Enhanced Mutual Consistency (DEMC) framework. DEMC introduces a novel two-stage adaptation paradigm. The first stage, the Diffusion-Based Domain Alignment (DBDA) module, generates a physics-aware intermediate domain. By integrating step-efficient diffusion generation with physical refinement, this module effectively reduces the cross-modal visual discrepancy while preserving the semantic structure of the optical source. In the second stage, this paper tackles the pervasive issue of pseudo-label noise with the Dual-Student Mutual Verification (DSMV) mechanism. Guided by Cross-Agent Spatial Consensus (CASC) and Adaptive Thresholding (AIT), this mechanism dynamically refines pseudo-labels through geometric overlap validation, effectively recovering faint, low-contrast targets that would typically be discarded by standard thresholds. Extensive evaluations across four benchmark tasks (HRSC2016/ShipRSImageNet to SSDD/HRSID) demonstrate that DEMC establishes a new state-of-the-art. Notably, the framework significantly enhances detection recall and reduces omission errors in complex coastal environments, offering a robust solution for zero-tolerance, all-weather surveillance tasks.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1358: DEMC: A Diffusion-Enhanced Mutual Consistency Framework for Cross-Domain Object Detection in Optical and SAR Imagery</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1358">doi: 10.3390/rs18091358</a></p>
	<p>Authors:
		Cheng Luo
		Yueting Zhang
		Jiayi Guo
		Guangyao Zhou
		Hongjian You
		Peifeng Li
		Xia Ning
		</p>
	<p>Cross-domain object detection from optical to Synthetic Aperture Radar (SAR) imagery addresses the challenges of SAR data scarcity and high annotation costs, enabling crucial capabilities for persistent maritime surveillance and reconnaissance. However, the substantial modality gap resulting from distinct imaging mechanisms and severe coherent speckle noise significantly hampers knowledge transfer. Existing Unsupervised Domain Adaptation (UDA) methods, which primarily rely on adversarial feature alignment or static pseudo-labeling, struggle to replicate the physical backscattering properties of SAR data and often fall prey to confirmation bias due to intense background clutter. To overcome these limitations, this paper introduces the Diffusion-Enhanced Mutual Consistency (DEMC) framework. DEMC introduces a novel two-stage adaptation paradigm. The first stage, the Diffusion-Based Domain Alignment (DBDA) module, generates a physics-aware intermediate domain. By integrating step-efficient diffusion generation with physical refinement, this module effectively reduces the cross-modal visual discrepancy while preserving the semantic structure of the optical source. In the second stage, this paper tackles the pervasive issue of pseudo-label noise with the Dual-Student Mutual Verification (DSMV) mechanism. Guided by Cross-Agent Spatial Consensus (CASC) and Adaptive Thresholding (AIT), this mechanism dynamically refines pseudo-labels through geometric overlap validation, effectively recovering faint, low-contrast targets that would typically be discarded by standard thresholds. Extensive evaluations across four benchmark tasks (HRSC2016/ShipRSImageNet to SSDD/HRSID) demonstrate that DEMC establishes a new state-of-the-art. Notably, the framework significantly enhances detection recall and reduces omission errors in complex coastal environments, offering a robust solution for zero-tolerance, all-weather surveillance tasks.</p>
	]]></content:encoded>

	<dc:title>DEMC: A Diffusion-Enhanced Mutual Consistency Framework for Cross-Domain Object Detection in Optical and SAR Imagery</dc:title>
			<dc:creator>Cheng Luo</dc:creator>
			<dc:creator>Yueting Zhang</dc:creator>
			<dc:creator>Jiayi Guo</dc:creator>
			<dc:creator>Guangyao Zhou</dc:creator>
			<dc:creator>Hongjian You</dc:creator>
			<dc:creator>Peifeng Li</dc:creator>
			<dc:creator>Xia Ning</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091358</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1358</prism:startingPage>
		<prism:doi>10.3390/rs18091358</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1358</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1357">

	<title>Remote Sensing, Vol. 18, Pages 1357: Topside Ionospheric Models Revealed by Automatically Detected Relative and Absolute Swarm-A/C Perturbations</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1357</link>
	<description>Differing from previous work on ionospheric models only using a relative method, in this paper, stereoscopic ionospheric models are innovatively constructed utilizing both relative and absolute automatic plasma perturbations. Firstly, ionospheric perturbations are globally searched from electron density data measured for 10 years by Swarm-A/C satellites via automatic detection software. In total, 621,999 Swarm-A perturbations and 630,668 Swarm-C ones are obtained, respectively. Then, the variation for each perturbation is calculated in two ways: via the relative method and absolute method. To check possible discrepancy between ionospheric models under these two different calculations, seasonal ionospheric models have been globally established using relative and absolute perturbations for both satellites. The results show that both kinds of models for each satellite can comprehensively reveal the main ionospheric structures, like EIA, WSA/MSNA, the mid-latitude trough and the auroral anomaly zone. Relatively, the EIA always shows its significance in equinox under calculation methods due to strong ionospheric irregularities caused by seasonal variation, but it is more obvious under the absolute method than relative one because of its higher background density. Comparatively, the auroral anomaly zone is predominantly filled with relatively large perturbations and is particularly conspicuous, especially in winter, due to its low background density. By contrast, mid-latitude structures, such as WSA/MSNA and mid-latitude trough, are comparatively affected less under these dual methods. At the same time, the interhemispheric asymmetry of EIA phenomena, as well as latitudinal WN4/3, is also significantly distinguished by seasonal ionospheric models. The occurrence probabilities of perturbations as a function of various variation magnitudes are also examined and the results demonstrate that the percentages of all variation segments vary widely with seasonal changes but this uneven fluctuation is more pronounced in summer under relative calculation and in winter under absolute calculation. Small fluctuations with relative variation &amp;amp;Delta;Vr &amp;amp;lt; 10% or absolute &amp;amp;Delta;Va &amp;amp;lt; 104 m&amp;amp;minus;3 always demonstrate significance in each group of seasonal perturbations while their percentage changes in different ways, decreasing in the order of summer, equinox and winter under the relative method and increasing under the absolute method. The measurements performed by Swarm-A/C demonstrate excellent consistency during the period considered.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1357: Topside Ionospheric Models Revealed by Automatically Detected Relative and Absolute Swarm-A/C Perturbations</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1357">doi: 10.3390/rs18091357</a></p>
	<p>Authors:
		Tianyu Liu
		Mei Li
		Hongzhu Yan
		Feng Gao
		Xiliang Liu
		Yajing Gao
		</p>
	<p>Differing from previous work on ionospheric models only using a relative method, in this paper, stereoscopic ionospheric models are innovatively constructed utilizing both relative and absolute automatic plasma perturbations. Firstly, ionospheric perturbations are globally searched from electron density data measured for 10 years by Swarm-A/C satellites via automatic detection software. In total, 621,999 Swarm-A perturbations and 630,668 Swarm-C ones are obtained, respectively. Then, the variation for each perturbation is calculated in two ways: via the relative method and absolute method. To check possible discrepancy between ionospheric models under these two different calculations, seasonal ionospheric models have been globally established using relative and absolute perturbations for both satellites. The results show that both kinds of models for each satellite can comprehensively reveal the main ionospheric structures, like EIA, WSA/MSNA, the mid-latitude trough and the auroral anomaly zone. Relatively, the EIA always shows its significance in equinox under calculation methods due to strong ionospheric irregularities caused by seasonal variation, but it is more obvious under the absolute method than relative one because of its higher background density. Comparatively, the auroral anomaly zone is predominantly filled with relatively large perturbations and is particularly conspicuous, especially in winter, due to its low background density. By contrast, mid-latitude structures, such as WSA/MSNA and mid-latitude trough, are comparatively affected less under these dual methods. At the same time, the interhemispheric asymmetry of EIA phenomena, as well as latitudinal WN4/3, is also significantly distinguished by seasonal ionospheric models. The occurrence probabilities of perturbations as a function of various variation magnitudes are also examined and the results demonstrate that the percentages of all variation segments vary widely with seasonal changes but this uneven fluctuation is more pronounced in summer under relative calculation and in winter under absolute calculation. Small fluctuations with relative variation &amp;amp;Delta;Vr &amp;amp;lt; 10% or absolute &amp;amp;Delta;Va &amp;amp;lt; 104 m&amp;amp;minus;3 always demonstrate significance in each group of seasonal perturbations while their percentage changes in different ways, decreasing in the order of summer, equinox and winter under the relative method and increasing under the absolute method. The measurements performed by Swarm-A/C demonstrate excellent consistency during the period considered.</p>
	]]></content:encoded>

	<dc:title>Topside Ionospheric Models Revealed by Automatically Detected Relative and Absolute Swarm-A/C Perturbations</dc:title>
			<dc:creator>Tianyu Liu</dc:creator>
			<dc:creator>Mei Li</dc:creator>
			<dc:creator>Hongzhu Yan</dc:creator>
			<dc:creator>Feng Gao</dc:creator>
			<dc:creator>Xiliang Liu</dc:creator>
			<dc:creator>Yajing Gao</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091357</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1357</prism:startingPage>
		<prism:doi>10.3390/rs18091357</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1357</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1355">

	<title>Remote Sensing, Vol. 18, Pages 1355: Distributed Latent Representation Clustering for Efficient Multi-Satellite Image Compression</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1355</link>
	<description>With the increasing number and enhanced sensing capabilities of satellites, the volume of satellite imagery has substantially surpassed the available bandwidth of satellite-to-ground links. Recently, with the adoption of commercial on-board GPUs, Learned Image Compression (LIC) offers the potential to mitigate this bottleneck by virtue of its superior rate&amp;amp;ndash;distortion performance over traditional codecs. However, existing LIC solutions operate in isolation on single satellites and underutilize the overlapping observations, which limits further gains in compression performance. In this paper, we propose Distributed Latent Representation Clustering (DLRC), which represents the first attempt to integrate real-time multi-satellite observation redundancy elimination into LIC. DLRC first introduces a local latent representation clustering mechanism. It discretizes the latent representation of LIC into compact cluster signatures on each satellite with lightweight computational overhead. Subsequently, DLRC presents a global cluster signature synchronization strategy. By exchanging signatures with negligible communication overhead, it enables multiple satellites to identify globally redundant local observations on a per-signature basis. By coding and downlinking only the latent representation corresponding to globally unique signatures, DLRC achieves non-redundant downlink in a training-free paradigm while remaining compatible with existing LIC architectures. Through extensive experiments, we demonstrate that DLRC achieves efficient bits per pixel reduction compared to independent LIC solutions while maintaining comparable reconstruction quality.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1355: Distributed Latent Representation Clustering for Efficient Multi-Satellite Image Compression</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1355">doi: 10.3390/rs18091355</a></p>
	<p>Authors:
		Xiandong Lu
		Xingyu Guan
		Pengcheng Wang
		Zhiming Cai
		Yonghe Zhang
		</p>
	<p>With the increasing number and enhanced sensing capabilities of satellites, the volume of satellite imagery has substantially surpassed the available bandwidth of satellite-to-ground links. Recently, with the adoption of commercial on-board GPUs, Learned Image Compression (LIC) offers the potential to mitigate this bottleneck by virtue of its superior rate&amp;amp;ndash;distortion performance over traditional codecs. However, existing LIC solutions operate in isolation on single satellites and underutilize the overlapping observations, which limits further gains in compression performance. In this paper, we propose Distributed Latent Representation Clustering (DLRC), which represents the first attempt to integrate real-time multi-satellite observation redundancy elimination into LIC. DLRC first introduces a local latent representation clustering mechanism. It discretizes the latent representation of LIC into compact cluster signatures on each satellite with lightweight computational overhead. Subsequently, DLRC presents a global cluster signature synchronization strategy. By exchanging signatures with negligible communication overhead, it enables multiple satellites to identify globally redundant local observations on a per-signature basis. By coding and downlinking only the latent representation corresponding to globally unique signatures, DLRC achieves non-redundant downlink in a training-free paradigm while remaining compatible with existing LIC architectures. Through extensive experiments, we demonstrate that DLRC achieves efficient bits per pixel reduction compared to independent LIC solutions while maintaining comparable reconstruction quality.</p>
	]]></content:encoded>

	<dc:title>Distributed Latent Representation Clustering for Efficient Multi-Satellite Image Compression</dc:title>
			<dc:creator>Xiandong Lu</dc:creator>
			<dc:creator>Xingyu Guan</dc:creator>
			<dc:creator>Pengcheng Wang</dc:creator>
			<dc:creator>Zhiming Cai</dc:creator>
			<dc:creator>Yonghe Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091355</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1355</prism:startingPage>
		<prism:doi>10.3390/rs18091355</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1355</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1356">

	<title>Remote Sensing, Vol. 18, Pages 1356: Multi-Temporal InSAR and Machine Learning for Geohazard Monitoring: A Systematic Review with Emphasis on Noise Mitigation and Model Transferability</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1356</link>
	<description>Interferometric Synthetic Aperture Radar (InSAR) enables regional monitoring of ground deformation, but operational geohazard analysis remains challenged by atmospheric artefacts, temporal decorrelation, and the need for scalable interpretation of multi-temporal products. A systematic review was conducted through searches in Scopus and Web of Science, resulting in 135 peer-reviewed scientific articles on the integration of Machine Learning (ML) and Deep Learning (DL) with multi-temporal InSAR (MT-InSAR). The literature is dominated by applications to landslides and land subsidence, with additional studies addressing volcanic unrest and other deformation-related hazards. Persistent Scatterer (PS) and Small-Baseline Subset (SBAS) approaches are frequently used to derive deformation time series, which are then coupled with ML/DL for the detection and mapping of active phenomena and for short-horizon forecasting. Convolutional architectures, such as Convolutional Neural Networks (CNNs), are commonly reported for spatial recognition tasks, while recurrent models like Long Short-Term Memory (LSTM) networks are often applied to time-series prediction. Reported benefits include improved automation and predictive performance, although sensitivity to noise sources remains a challenge. Overall, the evidence supports AI-enabled InSAR workflows for scalable geohazard monitoring, while highlighting the need for standardized benchmarks and systematic transferability assessment. This review provides a roadmap for transitioning from research prototypes to operational early-warning systems.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1356: Multi-Temporal InSAR and Machine Learning for Geohazard Monitoring: A Systematic Review with Emphasis on Noise Mitigation and Model Transferability</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1356">doi: 10.3390/rs18091356</a></p>
	<p>Authors:
		Alex Alonso-Díaz
		Miguel Fontes
		Ana Cláudia Teixeira
		Shimon Wdowinski
		Joaquim J. Sousa
		</p>
	<p>Interferometric Synthetic Aperture Radar (InSAR) enables regional monitoring of ground deformation, but operational geohazard analysis remains challenged by atmospheric artefacts, temporal decorrelation, and the need for scalable interpretation of multi-temporal products. A systematic review was conducted through searches in Scopus and Web of Science, resulting in 135 peer-reviewed scientific articles on the integration of Machine Learning (ML) and Deep Learning (DL) with multi-temporal InSAR (MT-InSAR). The literature is dominated by applications to landslides and land subsidence, with additional studies addressing volcanic unrest and other deformation-related hazards. Persistent Scatterer (PS) and Small-Baseline Subset (SBAS) approaches are frequently used to derive deformation time series, which are then coupled with ML/DL for the detection and mapping of active phenomena and for short-horizon forecasting. Convolutional architectures, such as Convolutional Neural Networks (CNNs), are commonly reported for spatial recognition tasks, while recurrent models like Long Short-Term Memory (LSTM) networks are often applied to time-series prediction. Reported benefits include improved automation and predictive performance, although sensitivity to noise sources remains a challenge. Overall, the evidence supports AI-enabled InSAR workflows for scalable geohazard monitoring, while highlighting the need for standardized benchmarks and systematic transferability assessment. This review provides a roadmap for transitioning from research prototypes to operational early-warning systems.</p>
	]]></content:encoded>

	<dc:title>Multi-Temporal InSAR and Machine Learning for Geohazard Monitoring: A Systematic Review with Emphasis on Noise Mitigation and Model Transferability</dc:title>
			<dc:creator>Alex Alonso-Díaz</dc:creator>
			<dc:creator>Miguel Fontes</dc:creator>
			<dc:creator>Ana Cláudia Teixeira</dc:creator>
			<dc:creator>Shimon Wdowinski</dc:creator>
			<dc:creator>Joaquim J. Sousa</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091356</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>1356</prism:startingPage>
		<prism:doi>10.3390/rs18091356</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1356</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1354">

	<title>Remote Sensing, Vol. 18, Pages 1354: PFMGAN: A Generative Adversarial Network with Physics Fusion for DTM Generation from Martian Monocular Images</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1354</link>
	<description>High-resolution digital terrain models (DTMs) are essential for Martian scientific exploration and engineering missions. However, traditional methods for Martian DTM generation are constrained by limitations in coverage and production efficiency, while the implicit treatment of illumination in existing deep learning methods leads to reconstructions lacking physical interpretability. To address this, we propose a Physics Fusion generative adversarial network based on the Mix Transformer architecture (PFMGAN) that incorporates physical prior knowledge to reconstruct high-precision DTMs from monocular images. This design significantly improves the accuracy and robustness of DTMs by enabling the network to effectively interpret grayscale variations caused by surface albedo and terrain undulations. Experimental results across various Martian landforms demonstrate that, by explicitly inputting solar angles and leveraging an Albedo-Aware Attention (AAA) module, PFMGAN achieves superior accuracy and robustness compared to other baseline models, with up to a 50% improvement in reconstruction accuracy in complex terrains. Furthermore, multi-scale (0.25&amp;amp;ndash;6 m/pixel) experimental results indicate that the proposed model is highly adaptable to reconstruction tasks across different spatial scales, consistently delivering high-quality topographic products. The results demonstrate the immense potential of PFMGAN for large-scale, high-precision Martian terrain reconstruction by leveraging the vast archive of monocular imagery.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1354: PFMGAN: A Generative Adversarial Network with Physics Fusion for DTM Generation from Martian Monocular Images</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1354">doi: 10.3390/rs18091354</a></p>
	<p>Authors:
		Ziyang Zou
		Yi Zhou
		Chao Li
		Zhibao Dong
		</p>
	<p>High-resolution digital terrain models (DTMs) are essential for Martian scientific exploration and engineering missions. However, traditional methods for Martian DTM generation are constrained by limitations in coverage and production efficiency, while the implicit treatment of illumination in existing deep learning methods leads to reconstructions lacking physical interpretability. To address this, we propose a Physics Fusion generative adversarial network based on the Mix Transformer architecture (PFMGAN) that incorporates physical prior knowledge to reconstruct high-precision DTMs from monocular images. This design significantly improves the accuracy and robustness of DTMs by enabling the network to effectively interpret grayscale variations caused by surface albedo and terrain undulations. Experimental results across various Martian landforms demonstrate that, by explicitly inputting solar angles and leveraging an Albedo-Aware Attention (AAA) module, PFMGAN achieves superior accuracy and robustness compared to other baseline models, with up to a 50% improvement in reconstruction accuracy in complex terrains. Furthermore, multi-scale (0.25&amp;amp;ndash;6 m/pixel) experimental results indicate that the proposed model is highly adaptable to reconstruction tasks across different spatial scales, consistently delivering high-quality topographic products. The results demonstrate the immense potential of PFMGAN for large-scale, high-precision Martian terrain reconstruction by leveraging the vast archive of monocular imagery.</p>
	]]></content:encoded>

	<dc:title>PFMGAN: A Generative Adversarial Network with Physics Fusion for DTM Generation from Martian Monocular Images</dc:title>
			<dc:creator>Ziyang Zou</dc:creator>
			<dc:creator>Yi Zhou</dc:creator>
			<dc:creator>Chao Li</dc:creator>
			<dc:creator>Zhibao Dong</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091354</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1354</prism:startingPage>
		<prism:doi>10.3390/rs18091354</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1354</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1351">

	<title>Remote Sensing, Vol. 18, Pages 1351: MultTransNet: A Novel Multimodal Transformer Network for Retrieving Significant Wave Height Using GNSS-R Data</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1351</link>
	<description>Significant Wave Height (SWH) is a critical parameter for ocean observation. SWH retrieval using GNSS-R data faces challenges including difficult feature selection, insufficient temporal dependency modeling, and limitations due to single-modality data. This paper proposes a novel Multimodal Transformer Network (MultTransNet) to enhance the accuracy of GNSS-R SWH retrieval. To optimize the feature set, we designed an XGBoost-based iterative feature selection module that effectively eliminates redundant features. To capture complex temporal dependencies and global context, the model employs a Transformer encoder utilizing its self-attention mechanism. Furthermore, to overcome the constraints of single-modality data, we innovatively fused 2D DDM image data with 1D auxiliary parameters, enabling multi-source information integration. Simulation results show that the Transformer architecture reduces Root Mean Square Error (RMSE) by 8.91% and increases Correlation Coefficient (CC) by 4.05% compared to a conventional Deep Neural Network (DNN) model. More significantly, the proposed multimodal algorithm further improves retrieval accuracy by 27.05% (RMSE reduction) and 7.21% (CC increase) compared to its single-modality Transformer counterpart, demonstrating superior performance, especially in complex sea-state conditions.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1351: MultTransNet: A Novel Multimodal Transformer Network for Retrieving Significant Wave Height Using GNSS-R Data</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1351">doi: 10.3390/rs18091351</a></p>
	<p>Authors:
		Yinghua Cui
		Min Cai
		Yuxuan Du
		Shanbao He
		</p>
	<p>Significant Wave Height (SWH) is a critical parameter for ocean observation. SWH retrieval using GNSS-R data faces challenges including difficult feature selection, insufficient temporal dependency modeling, and limitations due to single-modality data. This paper proposes a novel Multimodal Transformer Network (MultTransNet) to enhance the accuracy of GNSS-R SWH retrieval. To optimize the feature set, we designed an XGBoost-based iterative feature selection module that effectively eliminates redundant features. To capture complex temporal dependencies and global context, the model employs a Transformer encoder utilizing its self-attention mechanism. Furthermore, to overcome the constraints of single-modality data, we innovatively fused 2D DDM image data with 1D auxiliary parameters, enabling multi-source information integration. Simulation results show that the Transformer architecture reduces Root Mean Square Error (RMSE) by 8.91% and increases Correlation Coefficient (CC) by 4.05% compared to a conventional Deep Neural Network (DNN) model. More significantly, the proposed multimodal algorithm further improves retrieval accuracy by 27.05% (RMSE reduction) and 7.21% (CC increase) compared to its single-modality Transformer counterpart, demonstrating superior performance, especially in complex sea-state conditions.</p>
	]]></content:encoded>

	<dc:title>MultTransNet: A Novel Multimodal Transformer Network for Retrieving Significant Wave Height Using GNSS-R Data</dc:title>
			<dc:creator>Yinghua Cui</dc:creator>
			<dc:creator>Min Cai</dc:creator>
			<dc:creator>Yuxuan Du</dc:creator>
			<dc:creator>Shanbao He</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091351</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1351</prism:startingPage>
		<prism:doi>10.3390/rs18091351</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1351</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1353">

	<title>Remote Sensing, Vol. 18, Pages 1353: Updates to the CYGNSS Ocean Surface Heat Flux Product</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1353</link>
	<description>The initial development of the Cyclone Global Navigation Satellite System (CYGNSS) Ocean Surface Heat Flux Product, shortly after the satellite mission began, quickly became a valuable tool for analyzing and monitoring latent and sensible heat fluxes over tropical and subtropical oceans. It helps improve understanding of their influence on tropical and extratropical cyclones, tropical convection, atmospheric rivers, and more. Since its first release, the product has been updated with new ancillary input data (such as temperature and humidity), algorithm adjustments to incorporate equivalent neutral winds from CYGNSS, and the addition of local solar time to support diurnal analysis. As a mature mission and data product, CYGNSS provides important climatological and long-term insights into the tropical and subtropical oceans, filling gaps where in situ observations and data from other remote sensing instruments are limited. This paper outlines the updates and changes made to the CYGNSS Fluxes since its inception, compares the current dataset with in situ data, and discusses CYGNSS&amp;amp;rsquo;s long-term observations of ocean surface heat fluxes in the tropical and subtropical regions.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1353: Updates to the CYGNSS Ocean Surface Heat Flux Product</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1353">doi: 10.3390/rs18091353</a></p>
	<p>Authors:
		Juan A. Crespo
		Shakeel Asharaf
		Anthony Russel
		Dorina Twigg
		Derek J. Posselt
		</p>
	<p>The initial development of the Cyclone Global Navigation Satellite System (CYGNSS) Ocean Surface Heat Flux Product, shortly after the satellite mission began, quickly became a valuable tool for analyzing and monitoring latent and sensible heat fluxes over tropical and subtropical oceans. It helps improve understanding of their influence on tropical and extratropical cyclones, tropical convection, atmospheric rivers, and more. Since its first release, the product has been updated with new ancillary input data (such as temperature and humidity), algorithm adjustments to incorporate equivalent neutral winds from CYGNSS, and the addition of local solar time to support diurnal analysis. As a mature mission and data product, CYGNSS provides important climatological and long-term insights into the tropical and subtropical oceans, filling gaps where in situ observations and data from other remote sensing instruments are limited. This paper outlines the updates and changes made to the CYGNSS Fluxes since its inception, compares the current dataset with in situ data, and discusses CYGNSS&amp;amp;rsquo;s long-term observations of ocean surface heat fluxes in the tropical and subtropical regions.</p>
	]]></content:encoded>

	<dc:title>Updates to the CYGNSS Ocean Surface Heat Flux Product</dc:title>
			<dc:creator>Juan A. Crespo</dc:creator>
			<dc:creator>Shakeel Asharaf</dc:creator>
			<dc:creator>Anthony Russel</dc:creator>
			<dc:creator>Dorina Twigg</dc:creator>
			<dc:creator>Derek J. Posselt</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091353</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Technical Note</prism:section>
	<prism:startingPage>1353</prism:startingPage>
		<prism:doi>10.3390/rs18091353</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1353</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1350">

	<title>Remote Sensing, Vol. 18, Pages 1350: Stability Evaluation of Vegetation-Covered Highway Slopes Employing Integrated CR-InSAR and Finite Element Simulation</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1350</link>
	<description>Highway slopes susceptible to landslides are typically reinforced by vegetation cover and the application of concrete frame beams, but vegetation cover may degrade the accuracy of InSAR deformation monitoring. We installed artificial corner reflectors (CRs) on the frame beams and assessed the stability of the vegetated slope using finite element simulation constrained by InSAR deformation data. A study was conducted on a typical landslide-risk slope within the K87 + 391.5&amp;amp;ndash;K87 + 565 section of the Guihuang highway, which is reinforced with cast-in-place and prefabricated concrete beams. Experimental results demonstrate that two adjacent corner reflectors (CRs) on the two types of frame beams of the slope can be successfully identified, with deformation rates ranging from 0.1 to 0.4 mm/y, and the root mean square error (RMSE) of discrepancies between CR-InSAR measurements and slope displacement monitoring sensors is less than 0.3 mm. Meanwhile, the current strength reduction factor values for slopes reinforced with cast-in-place and prefabricated concrete beams, as constrained by InSAR multi-dimensional deformation, are 0.11 and 0.12, respectively which are much lower than the critical strength reduction factors of 1.28 and 1.22 corresponding to full coalescence of plastic strain from the slope toe to the slope crest, which indicates that the cast-in-place and prefabricated frame beams exhibit comparable support performance.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1350: Stability Evaluation of Vegetation-Covered Highway Slopes Employing Integrated CR-InSAR and Finite Element Simulation</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1350">doi: 10.3390/rs18091350</a></p>
	<p>Authors:
		Wei Peng
		Jiachen Zhou
		Junhui Zhang
		Jun Zhu
		Xuemin Xing
		Shiping Zhang
		</p>
	<p>Highway slopes susceptible to landslides are typically reinforced by vegetation cover and the application of concrete frame beams, but vegetation cover may degrade the accuracy of InSAR deformation monitoring. We installed artificial corner reflectors (CRs) on the frame beams and assessed the stability of the vegetated slope using finite element simulation constrained by InSAR deformation data. A study was conducted on a typical landslide-risk slope within the K87 + 391.5&amp;amp;ndash;K87 + 565 section of the Guihuang highway, which is reinforced with cast-in-place and prefabricated concrete beams. Experimental results demonstrate that two adjacent corner reflectors (CRs) on the two types of frame beams of the slope can be successfully identified, with deformation rates ranging from 0.1 to 0.4 mm/y, and the root mean square error (RMSE) of discrepancies between CR-InSAR measurements and slope displacement monitoring sensors is less than 0.3 mm. Meanwhile, the current strength reduction factor values for slopes reinforced with cast-in-place and prefabricated concrete beams, as constrained by InSAR multi-dimensional deformation, are 0.11 and 0.12, respectively which are much lower than the critical strength reduction factors of 1.28 and 1.22 corresponding to full coalescence of plastic strain from the slope toe to the slope crest, which indicates that the cast-in-place and prefabricated frame beams exhibit comparable support performance.</p>
	]]></content:encoded>

	<dc:title>Stability Evaluation of Vegetation-Covered Highway Slopes Employing Integrated CR-InSAR and Finite Element Simulation</dc:title>
			<dc:creator>Wei Peng</dc:creator>
			<dc:creator>Jiachen Zhou</dc:creator>
			<dc:creator>Junhui Zhang</dc:creator>
			<dc:creator>Jun Zhu</dc:creator>
			<dc:creator>Xuemin Xing</dc:creator>
			<dc:creator>Shiping Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091350</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1350</prism:startingPage>
		<prism:doi>10.3390/rs18091350</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1350</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1352">

	<title>Remote Sensing, Vol. 18, Pages 1352: Effect of Baseline Definition on Post-Fire Resilience Metrics Derived from Landsat Time Series in Pinus halepensis</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1352</link>
	<description>Wildfires have historically shaped Mediterranean ecosystems, fostering the adaptation of fire-resilient species such as Pinus halepensis Mill. Assessing post-fire resilience is essential to understand landscape recovery and guide forest management. This requires evaluating the speed, intensity, and trajectory of vegetation recovery relative to a defined baseline, although the influence of control point selection and baseline configuration remains unclear, despite its critical role in shaping the interpretation of recovery dynamics. This study proposes a methodological framework to assess the resilience of P. halepensis using 14-year Landsat time series following wildfire events, combined with image segmentation algorithms and Object-Based Image Analysis (GEOBIA). The analysis integrates two complementary vectors: (i) temporal evolution of NDVI and (ii) spectral probability of assignment to P. halepensis. Results indicate that NDVI suggests an average vegetation recovery time of seven years; however, spectral probability remains below 40% during this period, indicating slower tree cover recovery. Field inventories confirm that full recovery requires more than 15 years, with early stages dominated by shrublands, mainly Quercus coccifera. These findings show that NDVI alone overestimates resilience and that control selection and baseline configuration strongly influence assessments. GEOBIA enhances the ecological precision of resilience evaluation.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1352: Effect of Baseline Definition on Post-Fire Resilience Metrics Derived from Landsat Time Series in Pinus halepensis</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1352">doi: 10.3390/rs18091352</a></p>
	<p>Authors:
		Pedro Martín-Ortiz
		Cristian Iranzo
		Daniel Borini Alves
		Raquel Montorio
		Fernando Pérez-Cabello
		</p>
	<p>Wildfires have historically shaped Mediterranean ecosystems, fostering the adaptation of fire-resilient species such as Pinus halepensis Mill. Assessing post-fire resilience is essential to understand landscape recovery and guide forest management. This requires evaluating the speed, intensity, and trajectory of vegetation recovery relative to a defined baseline, although the influence of control point selection and baseline configuration remains unclear, despite its critical role in shaping the interpretation of recovery dynamics. This study proposes a methodological framework to assess the resilience of P. halepensis using 14-year Landsat time series following wildfire events, combined with image segmentation algorithms and Object-Based Image Analysis (GEOBIA). The analysis integrates two complementary vectors: (i) temporal evolution of NDVI and (ii) spectral probability of assignment to P. halepensis. Results indicate that NDVI suggests an average vegetation recovery time of seven years; however, spectral probability remains below 40% during this period, indicating slower tree cover recovery. Field inventories confirm that full recovery requires more than 15 years, with early stages dominated by shrublands, mainly Quercus coccifera. These findings show that NDVI alone overestimates resilience and that control selection and baseline configuration strongly influence assessments. GEOBIA enhances the ecological precision of resilience evaluation.</p>
	]]></content:encoded>

	<dc:title>Effect of Baseline Definition on Post-Fire Resilience Metrics Derived from Landsat Time Series in Pinus halepensis</dc:title>
			<dc:creator>Pedro Martín-Ortiz</dc:creator>
			<dc:creator>Cristian Iranzo</dc:creator>
			<dc:creator>Daniel Borini Alves</dc:creator>
			<dc:creator>Raquel Montorio</dc:creator>
			<dc:creator>Fernando Pérez-Cabello</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091352</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1352</prism:startingPage>
		<prism:doi>10.3390/rs18091352</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1352</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1349">

	<title>Remote Sensing, Vol. 18, Pages 1349: UGDMoE: An Uncertainty-Guided Mixture-of-Experts Decoder for Open-Vocabulary Remote Sensing Segmentation</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1349</link>
	<description>Rapid urbanization and the rapid accumulation of multi-source and multi-temporal Earth observation data are creating an increasing demand for remote sensing models that can flexibly support fine-grained monitoring beyond fixed label taxonomies. Open-vocabulary remote sensing image semantic segmentation (OVRSIS) aims to segment text-specified categories beyond a fixed label space with vision&amp;amp;ndash;language foundation models. However, dense remote sensing scenes make pixel&amp;amp;ndash;text matching highly vulnerable to semantic confusion and misalignment, owing to extreme scale variation, thin structures, repetitive textures, and prompt sensitivity. To address these challenges, we propose UGDMoE, an uncertainty-guided mixture-of-experts framework for OVRSIS. First, we design a domain-specific MoE decoder with three geometrically specialized experts&amp;amp;mdash;for slender structures, mid-scale objects, and large-region context&amp;amp;mdash;routed by the alignment-risk cue U0. Second, we introduce a lightweight prompt&amp;amp;ndash;response estimation strategy that quantifies prediction dispersion across semantically equivalent prompts to derive U0 in an annotation-free manner. Third, we develop prompt ensemble-based likelihood calibration (PELC), which takes the shared alignment-risk cue U0 as input to calibrate prompt-specific logits before refinement. Finally, we design a lightweight uncertainty-aware structure refinement module that, guided by U0, selectively fuses early visual features with segmentation logits to restore boundary continuity and connectivity of thin structures. We conduct extensive experiments on eight OVRSIS benchmarks under cross-dataset evaluation protocols. Trained on DLRSD, it achieves 46.97 m-mIoU and 63.31 m-mACC, surpassing the strongest baseline by 0.76 and 0.62 points; trained on iSAID, it reaches 37.47 m-mIoU and 58.52 m-mACC, improving over the strongest competitor by 0.71 and 0.61 points. UGDMoE consistently achieves state-of-the-art performance and remains robust under training-source changes.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1349: UGDMoE: An Uncertainty-Guided Mixture-of-Experts Decoder for Open-Vocabulary Remote Sensing Segmentation</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1349">doi: 10.3390/rs18091349</a></p>
	<p>Authors:
		Wenqiu Qu
		Guifei Jing
		Qiang Yuan
		Zhushenyu Guo
		Jianfeng Zhang
		</p>
	<p>Rapid urbanization and the rapid accumulation of multi-source and multi-temporal Earth observation data are creating an increasing demand for remote sensing models that can flexibly support fine-grained monitoring beyond fixed label taxonomies. Open-vocabulary remote sensing image semantic segmentation (OVRSIS) aims to segment text-specified categories beyond a fixed label space with vision&amp;amp;ndash;language foundation models. However, dense remote sensing scenes make pixel&amp;amp;ndash;text matching highly vulnerable to semantic confusion and misalignment, owing to extreme scale variation, thin structures, repetitive textures, and prompt sensitivity. To address these challenges, we propose UGDMoE, an uncertainty-guided mixture-of-experts framework for OVRSIS. First, we design a domain-specific MoE decoder with three geometrically specialized experts&amp;amp;mdash;for slender structures, mid-scale objects, and large-region context&amp;amp;mdash;routed by the alignment-risk cue U0. Second, we introduce a lightweight prompt&amp;amp;ndash;response estimation strategy that quantifies prediction dispersion across semantically equivalent prompts to derive U0 in an annotation-free manner. Third, we develop prompt ensemble-based likelihood calibration (PELC), which takes the shared alignment-risk cue U0 as input to calibrate prompt-specific logits before refinement. Finally, we design a lightweight uncertainty-aware structure refinement module that, guided by U0, selectively fuses early visual features with segmentation logits to restore boundary continuity and connectivity of thin structures. We conduct extensive experiments on eight OVRSIS benchmarks under cross-dataset evaluation protocols. Trained on DLRSD, it achieves 46.97 m-mIoU and 63.31 m-mACC, surpassing the strongest baseline by 0.76 and 0.62 points; trained on iSAID, it reaches 37.47 m-mIoU and 58.52 m-mACC, improving over the strongest competitor by 0.71 and 0.61 points. UGDMoE consistently achieves state-of-the-art performance and remains robust under training-source changes.</p>
	]]></content:encoded>

	<dc:title>UGDMoE: An Uncertainty-Guided Mixture-of-Experts Decoder for Open-Vocabulary Remote Sensing Segmentation</dc:title>
			<dc:creator>Wenqiu Qu</dc:creator>
			<dc:creator>Guifei Jing</dc:creator>
			<dc:creator>Qiang Yuan</dc:creator>
			<dc:creator>Zhushenyu Guo</dc:creator>
			<dc:creator>Jianfeng Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091349</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1349</prism:startingPage>
		<prism:doi>10.3390/rs18091349</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1349</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2072-4292/18/9/1348">

	<title>Remote Sensing, Vol. 18, Pages 1348: Integrating Thermodynamic Priors and Spatiotemporal Features into a Physics-Guided Deep Learning Framework for Cloud Radar Clear-Air Echo Identification</title>
	<link>https://www.mdpi.com/2072-4292/18/9/1348</link>
	<description>Accurate echo classification is crucial for Millimeter-wave Cloud Radar (MMCR) data quality control. Existing approaches, however, often struggle to generalize across complex scenes or lack physical interpretability. Here we propose PhySNet, a physics-guided network that combines thermodynamic priors with spatiotemporal radar features, embedding physical information across the full pipeline from feature extraction to final outputs. Based on the coupling between the lifting condensation level (LCL) and daytime clear-air echo heights, and the lagged correlation between nocturnal clear-air echo heights and their daytime counterparts, we design a physics-constrained gating block (PCGB). The PCGB extracts thermodynamic states and evolution trends from collocated surface observations, generating a clear-air echo probability map that weights the initial radar features. Building on this, we add a parallel regression branch of effective-clutter-height (ECH). This branch fuses thermodynamic features with radar spatiotemporal features, enabling the model to learn to predict the clear-air echo boundary. Finally, we apply an adaptive height filter using the predicted ECH sequence to refine the classification results. Evaluated on a multi-region, multi-season dataset from China, PhySNet achieves a probability of detection (POD) of 98.28% for meteorological echoes and 95.87% for clear-air echoes, outperforming conventional methods. By coupling data-driven learning with physical rules, our approach provides a high-accuracy, interpretable solution for cloud radar clear-air echo identification.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Remote Sensing, Vol. 18, Pages 1348: Integrating Thermodynamic Priors and Spatiotemporal Features into a Physics-Guided Deep Learning Framework for Cloud Radar Clear-Air Echo Identification</b></p>
	<p>Remote Sensing <a href="https://www.mdpi.com/2072-4292/18/9/1348">doi: 10.3390/rs18091348</a></p>
	<p>Authors:
		Jiapeng Wang
		Shuzhen Hu
		Jie Huang
		Jiakun Yuan
		Ruotong Yan
		Qinglei Zhang
		Aoli Yang
		</p>
	<p>Accurate echo classification is crucial for Millimeter-wave Cloud Radar (MMCR) data quality control. Existing approaches, however, often struggle to generalize across complex scenes or lack physical interpretability. Here we propose PhySNet, a physics-guided network that combines thermodynamic priors with spatiotemporal radar features, embedding physical information across the full pipeline from feature extraction to final outputs. Based on the coupling between the lifting condensation level (LCL) and daytime clear-air echo heights, and the lagged correlation between nocturnal clear-air echo heights and their daytime counterparts, we design a physics-constrained gating block (PCGB). The PCGB extracts thermodynamic states and evolution trends from collocated surface observations, generating a clear-air echo probability map that weights the initial radar features. Building on this, we add a parallel regression branch of effective-clutter-height (ECH). This branch fuses thermodynamic features with radar spatiotemporal features, enabling the model to learn to predict the clear-air echo boundary. Finally, we apply an adaptive height filter using the predicted ECH sequence to refine the classification results. Evaluated on a multi-region, multi-season dataset from China, PhySNet achieves a probability of detection (POD) of 98.28% for meteorological echoes and 95.87% for clear-air echoes, outperforming conventional methods. By coupling data-driven learning with physical rules, our approach provides a high-accuracy, interpretable solution for cloud radar clear-air echo identification.</p>
	]]></content:encoded>

	<dc:title>Integrating Thermodynamic Priors and Spatiotemporal Features into a Physics-Guided Deep Learning Framework for Cloud Radar Clear-Air Echo Identification</dc:title>
			<dc:creator>Jiapeng Wang</dc:creator>
			<dc:creator>Shuzhen Hu</dc:creator>
			<dc:creator>Jie Huang</dc:creator>
			<dc:creator>Jiakun Yuan</dc:creator>
			<dc:creator>Ruotong Yan</dc:creator>
			<dc:creator>Qinglei Zhang</dc:creator>
			<dc:creator>Aoli Yang</dc:creator>
		<dc:identifier>doi: 10.3390/rs18091348</dc:identifier>
	<dc:source>Remote Sensing</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Remote Sensing</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1348</prism:startingPage>
		<prism:doi>10.3390/rs18091348</prism:doi>
	<prism:url>https://www.mdpi.com/2072-4292/18/9/1348</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
    
<cc:License rdf:about="https://creativecommons.org/licenses/by/4.0/">
	<cc:permits rdf:resource="https://creativecommons.org/ns#Reproduction" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#Distribution" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#DerivativeWorks" />
</cc:License>

</rdf:RDF>
