-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbilateral_filter.h
106 lines (87 loc) · 4.09 KB
/
bilateral_filter.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
#pragma once
#include "helpers.h"
#include <cmath>
#include <map>
const float DEPTH_SIGMA = 0.1;
const float SPACE_SIGMA = 2.0;
template<typename T>
int getImageOffset(const Image<T>& image, int x, int y)
{
return x + (y * image.width);
}
std::vector<float> getGaussianKernel(float aperture_size, float sigma = SPACE_SIGMA)
{
std::vector<float> kernel;
int kernel_size = 2 * aperture_size + 1;
kernel.reserve(kernel_size * kernel_size);
float sum = 0.0f;
for (int x = -aperture_size; x <= aperture_size; ++x)
{
for (int y = -aperture_size; y <= aperture_size; ++y)
{
float value = exp(-(x * x + y * y) / (2 * sigma * sigma));
kernel.push_back(value);
sum += value;
}
}
for (int i = 0; i < kernel.size(); ++i)
{
kernel[i] /= sum;
}
return kernel;
}
float gaussian_kernel(float mean, float x, float sigma=DEPTH_SIGMA)
{
return exp(- (x-mean) * (x-mean) / (2 * sigma * sigma));
}
float gaussian_kernel_2d(float x, float y, float sigma)
{
return exp(-(x * x + y * y) / (2 * sigma * sigma));
}
ImageRGB cross_bilateral_filter(const ImageRGB& input_rgb_image, const ImageFloat& depth_map, int focus_x, int focus_y, int aperture_size, float tolerance = 0.0f)
{
auto output_image = ImageRGB(input_rgb_image.width, input_rgb_image.height);
// compute spatial kernel once (it will be the same for every pixel)
auto spatial_kernel = getGaussianKernel(aperture_size);
#pragma omp parallel for shared(output_image)
for (int xy = 0; xy < output_image.width*output_image.height; ++xy)
{
int x = xy % output_image.width;
int y = xy / output_image.width;
// initialize normalization factor as the wieght of the pixel itself (1)
float normalization_factor = gaussian_kernel(depth_map.data[getImageOffset(depth_map, focus_x, focus_y)], depth_map.data[getImageOffset(depth_map, x, y)]);
// insert it in the output image
output_image.data[getImageOffset(output_image, x, y)] = normalization_factor * input_rgb_image.data[getImageOffset(input_rgb_image, x, y)];
#pragma omp parallel for shared(output_image)
for (int x_a = -aperture_size; x_a < aperture_size; ++x_a)
{
#pragma omp parallel for shared(output_image)
for (int y_a = -aperture_size; y_a < aperture_size; ++y_a)
{
int x_i = x + x_a;
int y_i = y + y_a;
// check if the pixel is out of bounds
if (x_i < 0 || x_i >= output_image.width || y_i < 0 || y_i >= output_image.height)
{
continue;
}
// get the spatial weight from the kernel
float spatial_weight = spatial_kernel[(x_a + aperture_size) * (2 * aperture_size + 1) + (y_a + aperture_size)];
// get the depth weight
float depth_weight = pow(1-gaussian_kernel(depth_map.data[getImageOffset(depth_map, focus_x, focus_y)], depth_map.data[getImageOffset(depth_map, x_i, y_i)]), 2);
// compute the weight and add it to the normalization factor
float weight = spatial_weight * depth_weight;
normalization_factor += weight;
// add the weighted pixel to the output image
output_image.data[getImageOffset(output_image, x, y)].r += weight * input_rgb_image.data[getImageOffset(input_rgb_image, x_i, y_i)].r;
output_image.data[getImageOffset(output_image, x, y)].g += weight * input_rgb_image.data[getImageOffset(input_rgb_image, x_i, y_i)].g;
output_image.data[getImageOffset(output_image, x, y)].b += weight * input_rgb_image.data[getImageOffset(input_rgb_image, x_i, y_i)].b;
}
}
// normalize the output pixel
output_image.data[getImageOffset(output_image, x, y)].r /= normalization_factor;
output_image.data[getImageOffset(output_image, x, y)].g /= normalization_factor;
output_image.data[getImageOffset(output_image, x, y)].b /= normalization_factor;
}
return output_image;
}