-
Notifications
You must be signed in to change notification settings - Fork 4.2k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
reduce innerproduct gemm gpu memory usage (#2618)
- Loading branch information
Showing
10 changed files
with
1,425 additions
and
228 deletions.
There are no files selected for viewing
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,180 @@ | ||
// Tencent is pleased to support the open source community by making ncnn available. | ||
// | ||
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. | ||
// | ||
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except | ||
// in compliance with the License. You may obtain a copy of the License at | ||
// | ||
// https://opensource.org/licenses/BSD-3-Clause | ||
// | ||
// Unless required by applicable law or agreed to in writing, software distributed | ||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR | ||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the | ||
// specific language governing permissions and limitations under the License. | ||
|
||
#version 450 | ||
|
||
#if NCNN_fp16_storage | ||
#extension GL_EXT_shader_16bit_storage: require | ||
#endif | ||
#if NCNN_fp16_arithmetic | ||
#extension GL_EXT_shader_explicit_arithmetic_types_float16: require | ||
#endif | ||
|
||
layout (constant_id = 0) const int bias_term = 0; | ||
layout (constant_id = 1) const int activation_type = 0; | ||
layout (constant_id = 2) const float activation_param_0 = 0; | ||
layout (constant_id = 3) const float activation_param_1 = 0; | ||
|
||
#define shape_constant_id_offset 4 | ||
layout (constant_id = shape_constant_id_offset + 0) const int dims = 0; | ||
layout (constant_id = shape_constant_id_offset + 1) const int w = 0; | ||
layout (constant_id = shape_constant_id_offset + 2) const int h = 0; | ||
layout (constant_id = shape_constant_id_offset + 3) const int c = 0; | ||
layout (constant_id = shape_constant_id_offset + 4) const int cstep = 0; | ||
|
||
layout (constant_id = shape_constant_id_offset + 5) const int outdims = 0; | ||
layout (constant_id = shape_constant_id_offset + 6) const int outw = 0; | ||
layout (constant_id = shape_constant_id_offset + 7) const int outh = 0; | ||
layout (constant_id = shape_constant_id_offset + 8) const int outc = 0; | ||
layout (constant_id = shape_constant_id_offset + 9) const int outcstep = 0; | ||
|
||
#if NCNN_image_shader | ||
layout (binding = 0) uniform unfp sampler3D bottom_blob; | ||
layout (binding = 1, imfmtc1) writeonly uniform unfp image3D top_blob; | ||
layout (binding = 2) uniform unfp sampler3D weight_blob; | ||
layout (binding = 3) uniform unfp sampler3D bias_blob; | ||
#else | ||
layout (binding = 0) readonly buffer bottom_blob { sfp bottom_blob_data[]; }; | ||
layout (binding = 1) writeonly buffer top_blob { sfp top_blob_data[]; }; | ||
#if NCNN_fp16_packed || (NCNN_fp16_storage && !NCNN_fp16_arithmetic) | ||
// GL_EXT_shader_16bit_storage does not define f16mat4 type :( | ||
layout (binding = 2) readonly buffer weight_blob { sfpvec4 weight_data[]; }; | ||
#else | ||
layout (binding = 2) readonly buffer weight_blob { sfpmat4 weight_data[]; }; | ||
#endif | ||
layout (binding = 3) readonly buffer bias_blob { sfpvec4 bias_data[]; }; | ||
#endif | ||
|
||
layout (push_constant) uniform parameter | ||
{ | ||
int dims; | ||
int w; | ||
int h; | ||
int c; | ||
int cstep; | ||
|
||
int outdims; | ||
int outw; | ||
int outh; | ||
int outc; | ||
int outcstep; | ||
} p; | ||
|
||
void main() | ||
{ | ||
int gx = int(gl_GlobalInvocationID.x); | ||
int gy = int(gl_GlobalInvocationID.y); | ||
int gz = int(gl_GlobalInvocationID.z); | ||
|
||
if (gx * 4 >= psc(outw) || gy >= psc(outh) || gz >= 1) | ||
return; | ||
|
||
afpvec4 sum; | ||
|
||
if (bias_term == 1) | ||
{ | ||
#if NCNN_image_shader | ||
sum = image3d_ld4(bias_blob, ivec3(gx, 0, 0)); | ||
#else | ||
sum = buffer_ld4(bias_data, gx); | ||
#endif | ||
} | ||
else | ||
{ | ||
sum = afpvec4(0.f); | ||
} | ||
|
||
#if NCNN_image_shader | ||
for (int i = 0; i < psc(w) / 4; i++) | ||
{ | ||
afpvec4 v; | ||
v.r = image3d_ld1(bottom_blob, ivec3(i * 4 + 0, gy, 0)); | ||
v.g = image3d_ld1(bottom_blob, ivec3(i * 4 + 1, gy, 0)); | ||
v.b = image3d_ld1(bottom_blob, ivec3(i * 4 + 2, gy, 0)); | ||
v.a = image3d_ld1(bottom_blob, ivec3(i * 4 + 3, gy, 0)); | ||
|
||
afpmat4 k = afpmat4( | ||
image3d_ld4(weight_blob, ivec3(i * 4 + 0, gx, 0)), | ||
image3d_ld4(weight_blob, ivec3(i * 4 + 1, gx, 0)), | ||
image3d_ld4(weight_blob, ivec3(i * 4 + 2, gx, 0)), | ||
image3d_ld4(weight_blob, ivec3(i * 4 + 3, gx, 0)) | ||
); | ||
|
||
sum += v * k; | ||
} | ||
#else | ||
int v_offset = gy * psc(w); | ||
int w_offset = gx * psc(w) / 4; | ||
|
||
for (int i = 0; i < psc(w) / 4; i++) | ||
{ | ||
afpvec4 v; | ||
v.r = buffer_ld1(bottom_blob_data, v_offset + i * 4 + 0); | ||
v.g = buffer_ld1(bottom_blob_data, v_offset + i * 4 + 1); | ||
v.b = buffer_ld1(bottom_blob_data, v_offset + i * 4 + 2); | ||
v.a = buffer_ld1(bottom_blob_data, v_offset + i * 4 + 3); | ||
|
||
#if NCNN_fp16_packed || (NCNN_fp16_storage && !NCNN_fp16_arithmetic) | ||
// GL_EXT_shader_16bit_storage does not define f16mat4 type :( | ||
afpmat4 k = afpmat4( | ||
buffer_ld4(weight_data, (w_offset + i) * 4 + 0), | ||
buffer_ld4(weight_data, (w_offset + i) * 4 + 1), | ||
buffer_ld4(weight_data, (w_offset + i) * 4 + 2), | ||
buffer_ld4(weight_data, (w_offset + i) * 4 + 3) | ||
); | ||
#else | ||
afpmat4 k = afpmat4(weight_data[w_offset + i]); | ||
#endif | ||
|
||
sum += v * k; | ||
} | ||
#endif | ||
|
||
if (activation_type == 1) | ||
{ | ||
sum = max(sum, afp(0.f)); | ||
} | ||
if (activation_type == 2) | ||
{ | ||
const afp slope = afp(activation_param_0); | ||
sum = mix(sum, sum * afp(slope), lessThan(sum, afpvec4(0.f))); | ||
} | ||
if (activation_type == 3) | ||
{ | ||
const afp const_min = afp(activation_param_0); | ||
const afp const_max = afp(activation_param_1); | ||
sum = clamp(sum, const_min, const_max); | ||
} | ||
if (activation_type == 4) | ||
{ | ||
sum = afp(1.f) / (afp(1.f) + exp(-sum)); | ||
} | ||
if (activation_type == 5) | ||
{ | ||
sum = sum * tanh(log(exp(sum) + afp(1.f))); | ||
} | ||
|
||
#if NCNN_image_shader | ||
image3d_st1(top_blob, ivec3(gx * 4 + 0, gy, 0), sum.r); | ||
image3d_st1(top_blob, ivec3(gx * 4 + 1, gy, 0), sum.g); | ||
image3d_st1(top_blob, ivec3(gx * 4 + 2, gy, 0), sum.b); | ||
image3d_st1(top_blob, ivec3(gx * 4 + 3, gy, 0), sum.a); | ||
#else | ||
const int gi = gy * psc(outw) + gx * 4; | ||
buffer_st1(top_blob_data, gi + 0, sum.r); | ||
buffer_st1(top_blob_data, gi + 1, sum.g); | ||
buffer_st1(top_blob_data, gi + 2, sum.b); | ||
buffer_st1(top_blob_data, gi + 3, sum.a); | ||
#endif | ||
} |
Oops, something went wrong.