Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • lpaillet/tmp_processing
  • arouxel/tmp_processing
2 results
Show changes
Commits on Source (5)
......@@ -99,6 +99,106 @@ def SA_regularization_new(panchro,list_comp_img,r_step, c_step, list_of_filt_cub
return recons_img
def SA_regularization_newwave(panchro, list_comp_img, r_step, c_step, list_of_filt_cubes=None, list_of_masks=None, w0=None,
noise=0):
"""
Inputs:
- panchro, 2D array : panchromatic image of the scene
- list_comp_img, list of 2D arrays : list of compressed measurements
- r_step, int: amount of row pixels for the rectangular regions
- c_step, int: amount of column pixels for the rectangular regions
- list_of_filt_cubes, list of 3D array of size n_row x n_col x n_band: filtering DMD cubes used
- list_of_masks, list of 2D array of size n_row x (n_col+n_band): list of the mask for each acquisition
- w0, float: central wavelength that isn't shifted when we create a filtering cube
- noise, float: standard deviation of a gaussian noise whose mean is the mean of the image, applied to the input image
Output:
- recons_img, 3D array, size n_row x n_col x n_band: reconstructed hyperspectral cube
"""
n_row, n_col, n_band = list_of_filt_cubes[0].shape
if list_of_filt_cubes == None:
list_of_filt_cubes = [create_filt_cube(mask, [n_row, n_col, n_band], w0) for mask in list_of_masks]
temp_image = np.zeros((n_row, n_col, n_band))
# Reconstruct every region
for r in range(0, n_row, r_step):
for c in range(0, n_col, c_step):
cur_region_row = list(range(r, r + r_step))
cur_region_col = list(range(c, c + c_step))
if r + r_step > n_row and c + c_step > n_col:
# print(r,c)
temp_image[np.ix_(list(range(r, n_row)), list(range(c, n_col)))] = np.zeros(n_band)
elif r + r_step > n_row and c + c_step <= n_col:
# print(r,c)
temp_image[np.ix_(list(range(r, n_row)), cur_region_col)] = np.zeros(n_band)
elif r + r_step <= n_row and c + c_step > n_col:
temp_image[np.ix_(cur_region_row, list(range(c, n_col)))] = np.zeros(n_band)
else:
panchro_region = panchro[np.ix_(cur_region_row, cur_region_col)]
s = get_spectrum_region_newwave(list_comp_img, panchro_region, cur_region_row, cur_region_col, list_of_filt_cubes)
temp_image[np.ix_(cur_region_row, cur_region_col)] = np.dot(panchro_region.reshape((len(cur_region_row)*len(cur_region_col), 1)),
s.reshape((1, n_band))
).reshape((len(cur_region_row), len(cur_region_col), n_band))
recons_img = temp_image
return recons_img
def get_spectrum_region_newwave(list_comp_imgs, panchro_region, region_row, region_col, list_of_filt_cubes, mu_lambda=1.):
"""
Get the spectrum of the region. Works only for square regions for now
Inputs:
- list_comp_imgs, list of 2D arrays : list of compressed measurements
- panchro_img, 2D array : panchromatic image of the scene
- region_row, 1D array: list of the region rows
- region_col, 1D array: list of the region columns
- list_of_filt_cubes, list of 3D array: filtering DMD cubes used
- mu_lambda, float: parameter of Tikhonov's regularization
Output:
- s_hat, 1D array: spectrum of every pixel in the region
"""
n_band = list_of_filt_cubes[0].shape[2]
# Cube for the region
DMD_cube_K = np.vstack([cube[np.ix_(region_row, region_col)].copy() for cube in list_of_filt_cubes])
G_mat = DMD_cube_K.reshape((len(list_of_filt_cubes) * len(region_col) * len(region_row), n_band)) * np.tile(
panchro_region.flatten(), len(list_of_filt_cubes))[:,np.newaxis] # G matrix, based on filtering cube and panchromatic intensities, size R x n_band
m_concat = np.zeros(len(list_comp_imgs)*panchro_region.size)
for idx,comp_img in enumerate(list_comp_imgs):
compressed_img = comp_img[np.ix_(region_row, region_col)]
flat_comp_img = compressed_img.flatten()
starting_idx = idx*len(region_col) * len(region_row)
m_concat[starting_idx:starting_idx+len(region_col) * len(region_row)] = flat_comp_img
np.save("m_concat.npy",m_concat)
gamma_inv = np.nan_to_num(np.diag((1 / m_concat)), copy=False, nan=0.0) # Covariance matrix of Gaussian noise
finite_diff = np.diag(-1 * np.ones(n_band), 0) + np.diag(np.ones(n_band - 1),
1) # Finite difference along the bands
mat_to_inv = np.nan_to_num(
(np.transpose(G_mat) @ gamma_inv @ G_mat + mu_lambda * np.transpose(finite_diff) @ finite_diff), copy=False,
nan=0.0)
s_hat = np.linalg.solve(mat_to_inv, np.transpose(G_mat) @ gamma_inv @ m_concat) # Reconstruction of region's spectrum
return s_hat
def get_spectrum_region_init(img, panchro_img, region_row, region_col, list_of_filt_cubes, mu_lambda=1.):
"""
Get the spectrum of the region. Works only for square regions for now
......@@ -223,7 +323,7 @@ if __name__ == '__main__':
## PARAMS ##
##############################
hyp_file = 'paviaU' # Hyperspectral cube to consider
hyp_file = 'datasets/PaviaU' # Hyperspectral cube to consider
if hyp_file == 'paviaU':
meta_data = scipy.io.loadmat('./datasets/PaviaU/PaviaU.mat')
......
import matplotlib.pyplot as plt
import scipy.io
import numpy as np
import h5py
def SA_regularization_leo_corrected(img, r_step, c_step, list_of_filt_cubes=None, list_of_masks=None, w0=None, noise=0):
"""
Inputs:
- img, 3D array, size n_row x n_col x n_band: hyperspectral image we want to reconstruct
- r_step, int: amount of row pixels for the rectangular regions
- c_step, int: amount of column pixels for the rectangular regions
- list_of_filt_cubes, list of 3D array of size n_row x n_col x n_band: filtering DMD cubes used
- list_of_masks, list of 2D array of size n_row x (n_col+n_band): list of the mask for each acquisition
- w0, float: central wavelength that isn't shifted when we create a filtering cube
- noise, float: standard deviation of a gaussian noise whose mean is the mean of the image, applied to the input image
Output:
- recons_img, 3D array, size n_row x n_col x n_band: reconstructed hyperspectral cube
"""
n_row, n_col, n_band = img.shape
recons_img = np.zeros((n_row, n_col, n_band))
noisy_image = img + np.mean(img) * np.random.normal(0, noise, (n_row, n_col, n_band)) # Add noise
panchro_img = np.sum(noisy_image, axis=2)
if list_of_filt_cubes == None:
list_of_filt_cubes = [create_filt_cube(mask, [n_row, n_col, n_band], w0) for mask in list_of_masks]
np.save("filt_cubes.npy",list_of_filt_cubes)
temp_image = np.zeros((n_row, n_col, n_band))
# Reconstruct every region
for r in range(0, n_row, r_step):
for c in range(0, n_col, c_step):
cur_region_row = list(range(r, r + r_step))
cur_region_col = list(range(c, c + c_step))
if r+r_step > n_row and c+c_step > n_col:
# print(r,c)
temp_image[np.ix_(list(range(r,n_row)), list(range(c,n_col)))] = np.zeros(n_band)
elif r+r_step > n_row and c+c_step <= n_col:
# print(r,c)
temp_image[np.ix_(list(range(r, n_row)), cur_region_col)] = np.zeros(n_band)
elif r+r_step <= n_row and c+c_step > n_col:
temp_image[np.ix_(cur_region_row, list(range(c,n_col)))] = np.zeros(n_band)
else:
panchro_region = panchro_img[np.ix_(cur_region_row, cur_region_col)]
s = get_spectrum_region_init(noisy_image, panchro_region, cur_region_row, cur_region_col, list_of_filt_cubes)
temp_image[np.ix_(cur_region_row, cur_region_col)] = np.dot(panchro_region.reshape((len(cur_region_row)*len(cur_region_col), 1)),
s.reshape((1, n_band))
).reshape((len(cur_region_row), len(cur_region_col), n_band))
recons_img = temp_image
return recons_img
def SA_regularization_new(panchro,list_comp_img,r_step, c_step, list_of_filt_cubes = None, list_of_masks = None, w0 = None, noise=0):
"""
Inputs:
- panchro, 2D array : panchromatic image of the scene
- list_comp_img, list of 2D arrays : list of compressed measurements
- r_step, int: amount of row pixels for the rectangular regions
- c_step, int: amount of column pixels for the rectangular regions
- list_of_filt_cubes, list of 3D array of size n_row x n_col x n_band: filtering DMD cubes used
- list_of_masks, list of 2D array of size n_row x (n_col+n_band): list of the mask for each acquisition
- w0, float: central wavelength that isn't shifted when we create a filtering cube
- noise, float: standard deviation of a gaussian noise whose mean is the mean of the image, applied to the input image
Output:
- recons_img, 3D array, size n_row x n_col x n_band: reconstructed hyperspectral cube
"""
n_row, n_col, n_band = list_of_filt_cubes[0].shape
if list_of_filt_cubes == None:
list_of_filt_cubes = [create_filt_cube(mask,[n_row, n_col, n_band], w0) for mask in list_of_masks]
temp_image = np.zeros((n_row, n_col, n_band))
# Reconstruct every region
for r in range(0, n_row, r_step):
for c in range(0, n_col, c_step):
cur_region_row = list(range(r,r+r_step))
cur_region_col = list(range(c, c+c_step))
if r+r_step > n_row and c+c_step > n_col:
# print(r,c)
temp_image[np.ix_(list(range(r,n_row)), list(range(c,n_col)))] = np.zeros(n_band)
elif r+r_step > n_row and c+c_step <= n_col:
# print(r,c)
temp_image[np.ix_(list(range(r, n_row)), cur_region_col)] = np.zeros(n_band)
elif r+r_step <= n_row and c+c_step > n_col:
temp_image[np.ix_(cur_region_row, list(range(c,n_col)))] = np.zeros(n_band)
else:
panchro_region = panchro[np.ix_(cur_region_row, cur_region_col)]
s = get_spectrum_region_init(list_comp_img, panchro_region, cur_region_row, cur_region_col, list_of_filt_cubes)
temp_image[np.ix_(cur_region_row, cur_region_col)] = np.dot(panchro_region.reshape((len(cur_region_row)*len(cur_region_col), 1)),
s.reshape((1, n_band))
).reshape((len(cur_region_row), len(cur_region_col), n_band))
recons_img = temp_image
return recons_img
def get_spectrum_region_init(img, panchro_region, region_row, region_col, list_of_filt_cubes, mu_lambda=1.):
"""
Get the spectrum of the region. Works only for square regions for now
Inputs:
- img, 3D array: groundtruth hyperspectral image we want to filter
- panchro_region, 2D array: panchromatic image of the considered region
- region_row, 1D array: list of the region rows
- region_col, 1D array: list of the region columns
- list_of_filt_cubes, list of 3D array: filtering DMD cubes used
- mu_lambda, float: parameter of Tikhonov's regularization
Output:
- s_hat, 1D array: spectrum of every pixel in the region
"""
n_band = img.shape[2]
# Cube for the region
DMD_cube_K = np.vstack([cube[np.ix_(region_row, region_col)].copy() for cube in list_of_filt_cubes])
# G matrix, based on filtering cube and panchromatic intensities, size KR x n_band
G_mat = DMD_cube_K.reshape((len(list_of_filt_cubes)*len(region_col)*len(region_row), n_band)) \
* np.tile(panchro_region.flatten(), len(list_of_filt_cubes))[:, np.newaxis]
# Measurements of the spectral intensity of each pixel in the region, size KR
m = np.sum(DMD_cube_K.reshape((len(list_of_filt_cubes)*len(region_col)*len(region_row), n_band)) *
np.tile(img[np.ix_(region_row, region_col)].reshape(len(region_row)*len(region_col), n_band), (len(list_of_filt_cubes),1)), axis=1)
np.save("m.npy", m)
gamma_inv = np.nan_to_num(np.diag((1/m)), copy=False, nan=0.0) # Covariance matrix of Gaussian noise
finite_diff = np.diag(-1*np.ones(n_band), 0) + np.diag(np.ones(n_band-1), 1) # Finite difference along the bands
mat_to_inv = np.nan_to_num((np.transpose(G_mat) @ gamma_inv @ G_mat + mu_lambda*np.transpose(finite_diff) @ finite_diff), copy=False, nan=0.0)
s_hat = np.linalg.solve(mat_to_inv, np.transpose(G_mat) @ gamma_inv @ m) # Reconstruction of region's spectrum
return s_hat
def get_spectrum_region_new(list_comp_imgs, panchro_region, region_row, region_col, list_of_filt_cubes, mu_lambda=1.):
"""
Get the spectrum of the region. Works only for square regions for now
Inputs:
- list_comp_imgs, list of 2D arrays : list of compressed measurements
- panchro_region, 2D array : panchromatic image of the considered image
- region_row, 1D array: list of the region rows
- region_col, 1D array: list of the region columns
- list_of_filt_cubes, list of 3D array: filtering DMD cubes used
- mu_lambda, float: parameter of Tikhonov's regularization
Output:
- s_hat, 1D array: spectrum of every pixel in the region
"""
n_band = list_of_filt_cubes[0].shape[2]
# Cube for the region
DMD_cube_K = np.vstack([cube[np.ix_(region_row, region_col)].copy() for cube in list_of_filt_cubes])
# G matrix, based on filtering cube and panchromatic intensities, size KR x n_band
G_mat = DMD_cube_K.reshape((len(list_of_filt_cubes)*len(region_col)*len(region_row), n_band)) \
* np.tile(panchro_region.flatten(), len(list_of_filt_cubes))[:, np.newaxis]
m_concat = np.zeros(len(list_comp_imgs)*panchro_region.size)
for idx,comp_img in enumerate(list_comp_imgs):
compressed_img = comp_img[np.ix_(region_row, region_col)]
flat_comp_img = compressed_img.flatten()
starting_idx = idx*len(region_col) * len(region_row)
m_concat[starting_idx:starting_idx+len(region_col) * len(region_row)] = flat_comp_img
gamma_inv = np.nan_to_num(np.diag((1 / m_concat)), copy=False, nan=0.0) # Covariance matrix of Gaussian noise
finite_diff = np.diag(-1 * np.ones(n_band), 0) + np.diag(np.ones(n_band - 1),
1) # Finite difference along the bands
mat_to_inv = np.nan_to_num(
(np.transpose(G_mat) @ gamma_inv @ G_mat + mu_lambda * np.transpose(finite_diff) @ finite_diff), copy=False,
nan=0.0)
s_hat = np.linalg.solve(mat_to_inv, np.transpose(G_mat) @ gamma_inv @ m_concat) # Reconstruction of region's spectrum
return s_hat
def create_filt_cube(DMD_mask, shape, w0 = None):
"""
Compute a DMD cube based on a mask design
Inputs:
- DMD_mask, 2D array: size n_row x (n_col + n_band), mask design of the cube
- shape, array of size 3 [number of rows, number of cols, number of spectral bands]: shape of the resulting filtering cube
- w0, float: central wavelength that isn't shifted
Output:
- DMD_cube, 3D array, size n_row x n_col x n_band: filtering cube
"""
[n_row, n_col, n_band] = shape
DMD_cube = np.zeros((n_row, n_col, n_band))
if w0 is None:
w0 = n_band//2 # Wavelength that isn't shifted
for c in range(n_col):
for b in range(n_band):
if (c-b+w0 > -1) & (c-b+w0 <n_col+n_band):
DMD_cube[:, c, b] = DMD_mask[:, c -b + w0]
return DMD_cube
if __name__ == '__main__':
##############################
## PARAMS ##
##############################
hyp_file = 'PaviaU' # Hyperspectral cube to consider
if hyp_file == 'PaviaU':
meta_data = scipy.io.loadmat('./datasets/PaviaU/PaviaU.mat')
img = meta_data['paviaU']
elif hyp_file== 'lego_wall_3d':
meta_data = scipy.io.loadmat('/home/lpaillet/Documents/Codes/data/Hyperspectral Data/3D Lego Bricks/GroundTruth.mat')
img = meta_data['GroundTruth']
elif hyp_file== 'lego_wall':
meta_data = scipy.io.loadmat('/home/lpaillet/Documents/Codes/data/Hyperspectral Data/LEGO Bricks Wall/GroundTruth.mat')
img = meta_data['GroundTruth']
else :
print('Wrong file name')
n_row, n_col, n_band = img.shape
file_to_save = 'SA_recons_cube.h5' # File where the reconstructed hyperspectral image will be saved
r_step = 5 # Width of the rectangles for the reconstruction
c_step = 5 # Height of the rectangles for the reconstruction
# Create masks
DMD_mask_1 = np.random.choice([0, 1], size=(n_row, n_col+n_band), p=[0.8, 0.2])
DMD_mask_2 = np.random.choice([0, 1], size=(n_row, n_col+n_band), p=[0.8, 0.2])
DMD_mask_3 = np.random.choice([0, 1], size=(n_row, n_col+n_band), p=[0.8, 0.2])
DMD_mask_4 = np.random.choice([0, 1], size=(n_row, n_col+n_band), p=[0.8, 0.2])
DMD_mask_5 = np.random.choice([0, 1], size=(n_row, n_col+n_band), p=[0.8, 0.2])
list_of_masks = [DMD_mask_1, DMD_mask_2, DMD_mask_3, DMD_mask_4, DMD_mask_5]
list_of_filt_cubes = None
noise = 0. # Standard deviation of the noise added to the image
w0 = None # Central wavelength not shifted when constructing filtering cubes
############################
## EXEC ##
############################
recons_img = SA_regularization_leo_corrected(img, r_step, c_step, list_of_filt_cubes, list_of_masks, w0, noise)
with h5py.File(file_to_save, 'w') as f:
dset = f.create_dataset('reconstructed_cube', (n_row, n_col, n_band))
dset[...] = recons_img
plt.imshow(recons_img[:,:,50])
plt.show()
from SAregularization import *
import matplotlib.pyplot as plt
def generate_spectral_angular_map_optimized(recons_scene, interpolated_scene):
# Calculate norms for each pair of corresponding vectors in recons_scene and interpolated_scene
recons_norms = np.linalg.norm(recons_scene, axis=-1)
interpolated_norms = np.linalg.norm(interpolated_scene, axis=-1)
scene_name = "./data/interpolated_scene.h5"
# Calculate dot product for each pair of corresponding vectors in recons_scene and interpolated_scene
dot_products = np.einsum('ijk,ijk->ij', recons_scene, interpolated_scene)
# Calculate cosines of angles between corresponding vectors in recons_scene and interpolated_scene
cosines = dot_products / (recons_norms * interpolated_norms)
# Calculate Spectral Angular Mapper (SAM)
sam = (2 / np.pi) * np.abs(np.arccos(cosines))
return sam
# scene_nm = "sam_test_2_paviau"
directory = f"./F_fluocompact_SAM_ROM_01/"
scene_name = directory + "interpolated_scene.h5"
with h5py.File(scene_name, 'r') as f:
scene = f['interpolated_scene'][...]
wavelengths_file = directory + "wavelengths.h5"
with h5py.File(wavelengths_file, 'r') as f:
wavelengths = f['wavelengths'][...]
file_to_save = 'SA_recons_cube_with_SIMCA.h5' # File where the reconstructed hyperspectral image will be saved
r_step = 5 # Width of the rectangles for the reconstruction
c_step = 5 # Height of the rectangles for the reconstruction
c_step = 5 # Height of the rectangles for the reconstruction
noise = 0.00 # Standard deviation of the noise added to the image
w0 = None # Central wavelength not shifted when constructing filtering cubes
nb_of_acq = 1
nb_of_acq = 10
list_of_filt_cubes = []
......@@ -19,9 +46,9 @@ list_of_masks = []
list_comp_img = []
for i in range(nb_of_acq):
mask_name = f"./data/mask_{i}.h5"
filtering_cube_name = f"./data/filtering_cube_{i}.h5"
comp_img_name = f"./data/measurement_{i}.h5"
mask_name = directory + f"mask_{i}.h5"
filtering_cube_name = directory + f"filtering_cube_{i}.h5"
comp_img_name = directory + f"measurement_{i}.h5"
with h5py.File(filtering_cube_name, 'r') as f:
list_of_filt_cubes.append(f['filtering_cube'][...])
......@@ -32,17 +59,55 @@ for i in range(nb_of_acq):
with h5py.File(comp_img_name,'r') as img:
list_comp_img.append(img['measurement'][...])
list_of_filt_cubes = [np.nan_to_num(list_of_filt_cubes[i]) for i in range(len(list_of_filt_cubes))]
panchro_name = f"./data/panchro.h5"
panchro_name = directory + "panchro.h5"
with h5py.File(panchro_name, 'r') as img:
panchro = img['panchromatic_image'][...]
recons_scene_1 = SA_regularization_new(panchro=panchro,list_comp_img=list_comp_img, r_step=r_step, c_step=c_step, list_of_filt_cubes=list_of_filt_cubes, w0=w0, noise=noise)
recons_scene_2 = SA_regularization_init(scene, r_step, c_step, list_of_filt_cubes, w0, noise)
n_row, n_col, n_band = scene.shape
recons_scene_3 = SA_regularization_newwave(panchro=panchro,list_comp_img=list_comp_img, r_step=r_step, c_step=c_step, list_of_filt_cubes=list_of_filt_cubes, w0=w0, noise=noise)
nb_row, nb_col, nb_wavelength = scene.shape
point_1_idx = [24, 52]
point_2_idx = [150, 132]
point_3_idx = [101, 103]
point_4_idx = [163, 165]
fig, ax = plt.subplots(1, 2, figsize=(15, 8))
ax[0].imshow(np.sum(scene,axis=2))
ax[0].scatter(point_1_idx[1],point_1_idx[0],label="pixel 1",c="r")
ax[0].scatter(point_2_idx[1],point_2_idx[0],label="pixel 2",c="b")
ax[0].scatter(point_3_idx[1],point_3_idx[0],label="pixel 3",c="g")
ax[0].scatter(point_4_idx[1],point_4_idx[0],label="pixel 4",c="grey")
ax[1].scatter(wavelengths,recons_scene_3[point_1_idx[0], point_1_idx[1], :],label="pixel 1",c="r")
ax[1].plot(wavelengths,scene[point_1_idx[0], point_1_idx[1], :],label="pixel 1 scene",c="r")
ax[1].scatter(wavelengths,recons_scene_3[point_2_idx[0], point_2_idx[1], :],label="pixel 2",c="b")
ax[1].plot(wavelengths,scene[point_2_idx[0], point_2_idx[1], :],label="pixel 2 scene",c="b")
ax[1].scatter(wavelengths,recons_scene_3[point_3_idx[0], point_3_idx[1], :],label="pixel 3", c="g")
ax[1].plot(wavelengths,scene[point_3_idx[0], point_3_idx[1], :],label="pixel 3 scene",c="g")
fig , ax = plt.subplots(1,2)
ax[0].imshow(recons_scene_1[:,:,20])
ax[1].imshow(recons_scene_2[:,:,20])
ax[1].scatter(wavelengths,recons_scene_3[point_4_idx[0], point_4_idx[1], :],label="pixel 4",c="grey")
ax[1].plot(wavelengths,scene[point_4_idx[0], point_4_idx[1], :],label="pixel 4 scene", c="grey")
plt.legend()
plt.savefig("reconstructed_spectra.svg")
plt.show()
sam = generate_spectral_angular_map_optimized(recons_scene_3, scene)
plt.imshow(sam,vmin=0,vmax=1)
plt.colorbar()
plt.savefig("sam.svg")
plt.show()
\ No newline at end of file