高斯模糊的算法能夠看阮一峯寫的這篇文章,高斯模糊之因此叫高斯模糊,是由於它運用了高斯的正態分佈的密度函數:html
一維形式: git
二維形式: github
相關計算步驟:算法
咱們在 shader 中還原一個拼多多版簡易版的高斯模糊算法:函數
void main() {
vec2 st = gl_FragCoord / iResulution;
vec4 color = vec4(0.0);
const int coreSize = 3; // 取 3x3 像素網格
vec2 texelOffset = vec2(1.)/vec2(375., 667.); // 每一個素紋的間距
float kernel[9]; // 卷積核,每一個像素點的權重
kernel[0] = 1.; kernel[1] = 2.; kernel[2] = 1.;
kernel[3] = 2.; kernel[4] = 4.; kernel[5] = 2.;
kernel[6] = 1.; kernel[7] = 2.; kernel[8] = 1.;
int index = 0;
for(int y=0; y<coreSize; y++)
{
for(int x = 0; x<coreSize; x++)
{
// 這是核心,依次取9個像素點的色值
vec4 currentColor = texture2D(inputImageTexture, st + vec2(float(-1+x)*texelOffset.x, float(-1+y)*texelOffset.y));
// 將顏色值和權重相乘,就是卷積運算
if (index == 0) { color += currentColor * kernel[0]; }
else if (index == 1) { color += currentColor * kernel[1]; }
else if (index == 2) { color += currentColor * kernel[2]; }
else if (index == 3) { color += currentColor * kernel[3]; }
else if (index == 4) { color += currentColor * kernel[4]; }
else if (index == 5) { color += currentColor * kernel[5]; }
else if (index == 6) { color += currentColor * kernel[6]; }
else if (index == 7) { color += currentColor * kernel[7]; }
else if (index == 8) { color += currentColor * kernel[8]; }
index++;
}
}
// 除以權重總和
color /= 16.0;
gl_FragColor=color;
}
複製代碼
效果不明顯,由於咱們的權重沒有很準確,並且咱們的圖片尺寸比較大(750x1334),若是隻對 3x3 的網格進行模糊,效果不夠明顯,下面優化下:post
void main() {
vec2 st = gl_FragCoord / iResulution;
vec4 color = vec4(0.0);
const int coreSize = 3;
// 把素紋間隔放大,這裏作法比較粗暴。合理的作法是把 3x3 變成 9x9 或更大
vec2 texelOffset = vec2(2.)/vec2(375., 667.);
// 代入高斯正態分佈函數計算出來的權重值
float kernel[9];
kernel[0] = .0947416; kernel[1] = .118318; kernel[2] = .0947416;
kernel[3] = .118318; kernel[4] = .147761; kernel[5] = .118318;
kernel[6] = .0947416; kernel[7] = .118318; kernel[8] = .0947416;
int index = 0;
for(int y=0; y<coreSize; y++)
{
for(int x = 0; x<coreSize; x++)
{
vec4 currentColor = texture2D(inputImageTexture, st + vec2(float(-1+x)*texelOffset.x, float(-1+y)*texelOffset.y));
if (index == 0) { color += currentColor * kernel[0]; }
else if (index == 1) { color += currentColor * kernel[1]; }
else if (index == 2) { color += currentColor * kernel[2]; }
else if (index == 3) { color += currentColor * kernel[3]; }
else if (index == 4) { color += currentColor * kernel[4]; }
else if (index == 5) { color += currentColor * kernel[5]; }
else if (index == 6) { color += currentColor * kernel[6]; }
else if (index == 7) { color += currentColor * kernel[7]; }
else if (index == 8) { color += currentColor * kernel[8]; }
index++;
}
}
// 上面的權重已經進行了加權平均,因此這一步不須要了
// color /= 16.0;
gl_FragColor = color;
}
複製代碼
只要咱們把 3x3 的網格放大,如 9x9 / 16x16,或者直接放大像素間距,均可以增長模糊效果。性能
然而上面的實現方式性能是比較差的。由於遍歷的成本過高了。一般拆成兩個一維向量,這樣時間複雜度就由NxNxWxH
降低爲2xNxWxH
(W爲圖像的寬,H爲圖像的高)。優化
咱們以 5x5 的卷積核,垂直方向取樣爲例:ui
void main() {
vec2 iResolution = vec2(375., 667.);
float offset[6];
offset[1] = 0.; offset[2] = 1.; offset[3] = 2.; offset[4] = 3.; offset[5] = 4.;
float weight[6];
weight[1] = 0.2270270270; weight[2] = 0.1945945946; weight[3] = 0.1216216216;
weight[4] = 0.0540540541; weight[5] = 0.0162162162;
vec4 color = texture2D(inputImageTexture, vec2(gl_FragCoord)/iResolution) * weight[0];
for (int i=1; i<=5; i++) {
color +=
texture2D(inputImageTexture, (vec2(gl_FragCoord)+vec2(0.0, offset[i]))/iResolution)
* weight[i];
color +=
texture2D(inputImageTexture, (vec2(gl_FragCoord)-vec2(0.0, offset[i]))/iResolution)
* weight[i];
}
gl_FragColor = color;
}
複製代碼
水平方向也是同理的,不過上述方式依舊須要進行 9 次的texture2D()
紋理採樣操做,這篇文章中介紹了一種線性採樣的方式,經過對權重和間距的處理,把 9 次紋理採樣操做減小到 5 次:spa
void main() {
vec2 iResolution = vec2(375., 667.);
float offset[4];
offset[1] = 0.; offset[2] = 1.3846153846; offset[3] = 3.2307692308;
float weight[4];
weight[1] = 0.2270270270; weight[2] = 0.3162162162; weight[3] = 0.0702702703;
vec4 color = texture2D(inputImageTexture, vec2(gl_FragCoord)/iResolution) * weight[0];
// 垂直
for (int i=1; i<=3; i++) {
color +=
texture2D(inputImageTexture, (vec2(gl_FragCoord)+vec2(0.0, offset[i]))/iResolution)
* weight[i];
color +=
texture2D(inputImageTexture, (vec2(gl_FragCoord)-vec2(0.0, offset[i]))/iResolution)
* weight[i];
}
vec4 color2 = texture2D(inputImageTexture, vec2(gl_FragCoord)/iResolution) * weight[0];
// 水平
for (int i=1; i<=3; i++) {
color2 +=
texture2D(inputImageTexture, (vec2(gl_FragCoord)+vec2(offset[i], 0.0))/iResolution)
* weight[i];
color2 +=
texture2D(inputImageTexture, (vec2(gl_FragCoord)-vec2(offset[i], 0.0))/iResolution)
* weight[i];
}
gl_FragColor = mix(color, color2, .5);
}
複製代碼
肉眼是沒法區分出差別的,但性能會提高:
讓圖片更加模糊的另一種方式是經過對 framebuffer 屢次應用模糊函數來增強模糊效果。
// 來自 https://github.com/Jam3/glsl-fast-gaussian-blur
// 只包含一個方向,須要本身疊加
// 3x3
vec4 blur5(sampler2D image, vec2 uv, vec2 resolution, vec2 direction) {
vec4 color = vec4(0.0);
vec2 off1 = vec2(1.3333333333333333) * direction;
color += texture2D(image, uv) * 0.29411764705882354;
color += texture2D(image, uv + (off1 / resolution)) * 0.35294117647058826;
color += texture2D(image, uv - (off1 / resolution)) * 0.35294117647058826;
return color;
}
// 5x5
vec4 blur9(sampler2D image, vec2 uv, vec2 resolution, vec2 direction) {
vec4 color = vec4(0.0);
vec2 off1 = vec2(1.3846153846) * direction;
vec2 off2 = vec2(3.2307692308) * direction;
color += texture2D(image, uv) * 0.2270270270;
color += texture2D(image, uv + (off1 / resolution)) * 0.3162162162;
color += texture2D(image, uv - (off1 / resolution)) * 0.3162162162;
color += texture2D(image, uv + (off2 / resolution)) * 0.0702702703;
color += texture2D(image, uv - (off2 / resolution)) * 0.0702702703;
return color;
}
// 7x7
vec4 blur13(sampler2D image, vec2 uv, vec2 resolution, vec2 direction) {
vec4 color = vec4(0.0);
vec2 off1 = vec2(1.411764705882353) * direction;
vec2 off2 = vec2(3.2941176470588234) * direction;
vec2 off3 = vec2(5.176470588235294) * direction;
color += texture2D(image, uv) * 0.1964825501511404;
color += texture2D(image, uv + (off1 / resolution)) * 0.2969069646728344;
color += texture2D(image, uv - (off1 / resolution)) * 0.2969069646728344;
color += texture2D(image, uv + (off2 / resolution)) * 0.09447039785044732;
color += texture2D(image, uv - (off2 / resolution)) * 0.09447039785044732;
color += texture2D(image, uv + (off3 / resolution)) * 0.010381362401148057;
color += texture2D(image, uv - (off3 / resolution)) * 0.010381362401148057;
return color;
}
複製代碼
// 來自 https://www.shadertoy.com/view/XdfGDH
// 正態分佈機率密度函數
float normpdf(in float x, in float sigma) {
return 0.39894*exp(-0.5*x*x/(sigma*sigma))/sigma;
}
vec3 gaussianblur(int size, sampler2D texture, vec2 resolution) {
//declare stuff
const int mSize = size;
const int kSize = (mSize-1)/2;
float kernel[mSize];
vec3 final_colour = vec3(0.0);
//create the 1-D kernel
float sigma = 7.0;
float Z = 0.0;
for (int j = 0; j <= kSize; ++j)
{
kernel[kSize+j] = kernel[kSize-j] = normpdf(float(j), sigma);
}
//get the normalization factor (as the gaussian has been clamped)
for (int j = 0; j < mSize; ++j)
{
Z += kernel[j];
}
//read out the texels
for (int i=-kSize; i <= kSize; ++i)
{
for (int j=-kSize; j <= kSize; ++j)
{
final_colour += kernel[kSize+j]*kernel[kSize+i]*texture2D(texture, (gl_FragCoord.xy+vec2(float(i),float(j))) / resolution.xy).rgb;
}
}
return final_colour/(Z*Z);
}
複製代碼
// 來自:https://gl-transitions.com/editor/LinearBlur
vec4 blur(vec2 _uv, sampler2D texture) {
float disp = 0.;
float intensity = .2;
const int passes = 6;
vec4 c1 = vec4(0.0);
disp = intensity*(0.5-distance(0.5, .1));
for (int xi=0; xi<passes; xi++) {
float x = float(xi) / float(passes) - 0.5;
for (int yi=0; yi<passes; yi++) {
float y = float(yi) / float(passes) - 0.5;
vec2 v = vec2(x, y);
float d = disp;
c1 += texture2D(texture, _uv + d*v);
}
}
c1 /= float(passes*passes);
return c1;
}
複製代碼
下一篇 Shader 運動模糊 。
相關連接: