Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

#define GLSLIFY 1 breaks fragment shader added to postprocessing effect #131

Open
ajayns opened this issue Mar 20, 2020 · 0 comments
Open

Comments

@ajayns
Copy link

ajayns commented Mar 20, 2020

I'm using postprocessing with threejs library to add an effect pass using a fragment shader. In my build config, I use rollup-plugin-glslify to process and bundle shaders.

It seems to cause the program to crash with GLSL error due to the added line #define GLSLIFY 1. Once the line is removed, everything works as expected.

The console onerror:

THREE.WebGLShader: gl.getShaderInfoLog() fragment
ERROR: 0:221: '#' : invalid character
ERROR: 0:221: 'define' : syntax error
�1: precision highp float;
2: precision highp int;
3: #define HIGH_PRECISION
4: #define SHADER_NAME EffectMaterial
5: #define DEPTH_PACKING 0
6: #define ENCODE_OUTPUT 1
7: #define PERSPECTIVE_CAMERA 1
8: #define UV transformedUv
9: #define GAMMA_FACTOR 2
10: uniform mat4 viewMatrix;
11: uniform vec3 cameraPosition;
12: uniform bool isOrthographic;
13: 
14: vec4 LinearToLinear( in vec4 value ) {
15: 	return value;
16: }
17: vec4 GammaToLinear( in vec4 value, in float gammaFactor ) {
18: 	return vec4( pow( value.rgb, vec3( gammaFactor ) ), value.a );
19: }
20: vec4 LinearToGamma( in vec4 value, in float gammaFactor ) {
21: 	return vec4( pow( value.rgb, vec3( 1.0 / gammaFactor ) ), value.a );
22: }
23: vec4 sRGBToLinear( in vec4 value ) {
24: 	return vec4( mix( pow( value.rgb * 0.9478672986 + vec3( 0.0521327014 ), vec3( 2.4 ) ), value.rgb * 0.0773993808, vec3( lessThanEqual( value.rgb, vec3( 0.04045 ) ) ) ), value.a );
25: }
26: vec4 LinearTosRGB( in vec4 value ) {
27: 	return vec4( mix( pow( value.rgb, vec3( 0.41666 ) ) * 1.055 - vec3( 0.055 ), value.rgb * 12.92, vec3( lessThanEqual( value.rgb, vec3( 0.0031308 ) ) ) ), value.a );
28: }
29: vec4 RGBEToLinear( in vec4 value ) {
30: 	return vec4( value.rgb * exp2( value.a * 255.0 - 128.0 ), 1.0 );
31: }
32: vec4 LinearToRGBE( in vec4 value ) {
33: 	float maxComponent = max( max( value.r, value.g ), value.b );
34: 	float fExp = clamp( ceil( log2( maxComponent ) ), -128.0, 127.0 );
35: 	return vec4( value.rgb / exp2( fExp ), ( fExp + 128.0 ) / 255.0 );
36: }
37: vec4 RGBMToLinear( in vec4 value, in float maxRange ) {
38: 	return vec4( value.rgb * value.a * maxRange, 1.0 );
39: }
40: vec4 LinearToRGBM( in vec4 value, in float maxRange ) {
41: 	float maxRGB = max( value.r, max( value.g, value.b ) );
42: 	float M = clamp( maxRGB / maxRange, 0.0, 1.0 );
43: 	M = ceil( M * 255.0 ) / 255.0;
44: 	return vec4( value.rgb / ( M * maxRange ), M );
45: }
46: vec4 RGBDToLinear( in vec4 value, in float maxRange ) {
47: 	return vec4( value.rgb * ( ( maxRange / 255.0 ) / value.a ), 1.0 );
48: }
49: vec4 LinearToRGBD( in vec4 value, in float maxRange ) {
50: 	float maxRGB = max( value.r, max( value.g, value.b ) );
51: 	float D = max( maxRange / maxRGB, 1.0 );
52: 	D = min( floor( D ) / 255.0, 1.0 );
53: 	return vec4( value.rgb * ( D * ( 255.0 / maxRange ) ), D );
54: }
55: const mat3 cLogLuvM = mat3( 0.2209, 0.3390, 0.4184, 0.1138, 0.6780, 0.7319, 0.0102, 0.1130, 0.2969 );
56: vec4 LinearToLogLuv( in vec4 value )  {
57: 	vec3 Xp_Y_XYZp = cLogLuvM * value.rgb;
58: 	Xp_Y_XYZp = max( Xp_Y_XYZp, vec3( 1e-6, 1e-6, 1e-6 ) );
59: 	vec4 vResult;
60: 	vResult.xy = Xp_Y_XYZp.xy / Xp_Y_XYZp.z;
61: 	float Le = 2.0 * log2(Xp_Y_XYZp.y) + 127.0;
62: 	vResult.w = fract( Le );
63: 	vResult.z = ( Le - ( floor( vResult.w * 255.0 ) ) / 255.0 ) / 255.0;
64: 	return vResult;
65: }
66: const mat3 cLogLuvInverseM = mat3( 6.0014, -2.7008, -1.7996, -1.3320, 3.1029, -5.7721, 0.3008, -1.0882, 5.6268 );
67: vec4 LogLuvToLinear( in vec4 value ) {
68: 	float Le = value.z * 255.0 + value.w;
69: 	vec3 Xp_Y_XYZp;
70: 	Xp_Y_XYZp.y = exp2( ( Le - 127.0 ) / 2.0 );
71: 	Xp_Y_XYZp.z = Xp_Y_XYZp.y / value.y;
72: 	Xp_Y_XYZp.x = value.x * Xp_Y_XYZp.z;
73: 	vec3 vRGB = cLogLuvInverseM * Xp_Y_XYZp.rgb;
74: 	return vec4( max( vRGB, 0.0 ), 1.0 );
75: }
76: vec4 mapTexelToLinear( vec4 value ) { return LinearToLinear( value ); }
77: vec4 matcapTexelToLinear( vec4 value ) { return LinearToLinear( value ); }
78: vec4 envMapTexelToLinear( vec4 value ) { return LinearToLinear( value ); }
79: vec4 emissiveMapTexelToLinear( vec4 value ) { return LinearToLinear( value ); }
80: vec4 linearToOutputTexel( vec4 value ) { return LinearToLinear( value ); }
81: 
82: #define PI 3.14159265359
83: #define PI2 6.28318530718
84: #define PI_HALF 1.5707963267949
85: #define RECIPROCAL_PI 0.31830988618
86: #define RECIPROCAL_PI2 0.15915494
87: #define LOG2 1.442695
88: #define EPSILON 1e-6
89: #ifndef saturate
90: #define saturate(a) clamp( a, 0.0, 1.0 )
91: #endif
92: #define whiteComplement(a) ( 1.0 - saturate( a ) )
93: float pow2( const in float x ) { return x*x; }
94: float pow3( const in float x ) { return x*x*x; }
95: float pow4( const in float x ) { float x2 = x*x; return x2*x2; }
96: float average( const in vec3 color ) { return dot( color, vec3( 0.3333 ) ); }
97: highp float rand( const in vec2 uv ) {
98: 	const highp float a = 12.9898, b = 78.233, c = 43758.5453;
99: 	highp float dt = dot( uv.xy, vec2( a,b ) ), sn = mod( dt, PI );
100: 	return fract(sin(sn) * c);
101: }
102: #ifdef HIGH_PRECISION
103: 	float precisionSafeLength( vec3 v ) { return length( v ); }
104: #else
105: 	float max3( vec3 v ) { return max( max( v.x, v.y ), v.z ); }
106: 	float precisionSafeLength( vec3 v ) {
107: 		float maxComponent = max3( abs( v ) );
108: 		return length( v / maxComponent ) * maxComponent;
109: 	}
110: #endif
111: struct IncidentLight {
112: 	vec3 color;
113: 	vec3 direction;
114: 	bool visible;
115: };
116: struct ReflectedLight {
117: 	vec3 directDiffuse;
118: 	vec3 directSpecular;
119: 	vec3 indirectDiffuse;
120: 	vec3 indirectSpecular;
121: };
122: struct GeometricContext {
123: 	vec3 position;
124: 	vec3 normal;
125: 	vec3 viewDir;
126: #ifdef CLEARCOAT
127: 	vec3 clearcoatNormal;
128: #endif
129: };
130: vec3 transformDirection( in vec3 dir, in mat4 matrix ) {
131: 	return normalize( ( matrix * vec4( dir, 0.0 ) ).xyz );
132: }
133: vec3 inverseTransformDirection( in vec3 dir, in mat4 matrix ) {
134: 	return normalize( ( vec4( dir, 0.0 ) * matrix ).xyz );
135: }
136: vec3 projectOnPlane(in vec3 point, in vec3 pointOnPlane, in vec3 planeNormal ) {
137: 	float distance = dot( planeNormal, point - pointOnPlane );
138: 	return - distance * planeNormal + point;
139: }
140: float sideOfPlane( in vec3 point, in vec3 pointOnPlane, in vec3 planeNormal ) {
141: 	return sign( dot( point - pointOnPlane, planeNormal ) );
142: }
143: vec3 linePlaneIntersect( in vec3 pointOnLine, in vec3 lineDirection, in vec3 pointOnPlane, in vec3 planeNormal ) {
144: 	return lineDirection * ( dot( planeNormal, pointOnPlane - pointOnLine ) / dot( planeNormal, lineDirection ) ) + pointOnLine;
145: }
146: mat3 transposeMat3( const in mat3 m ) {
147: 	mat3 tmp;
148: 	tmp[ 0 ] = vec3( m[ 0 ].x, m[ 1 ].x, m[ 2 ].x );
149: 	tmp[ 1 ] = vec3( m[ 0 ].y, m[ 1 ].y, m[ 2 ].y );
150: 	tmp[ 2 ] = vec3( m[ 0 ].z, m[ 1 ].z, m[ 2 ].z );
151: 	return tmp;
152: }
153: float linearToRelativeLuminance( const in vec3 color ) {
154: 	vec3 weights = vec3( 0.2126, 0.7152, 0.0722 );
155: 	return dot( weights, color.rgb );
156: }
157: bool isPerspectiveMatrix( mat4 m ) {
158:   return m[ 2 ][ 3 ] == - 1.0;
159: }
160: vec3 packNormalToRGB( const in vec3 normal ) {
161: 	return normalize( normal ) * 0.5 + 0.5;
162: }
163: vec3 unpackRGBToNormal( const in vec3 rgb ) {
164: 	return 2.0 * rgb.xyz - 1.0;
165: }
166: const float PackUpscale = 256. / 255.;const float UnpackDownscale = 255. / 256.;
167: const vec3 PackFactors = vec3( 256. * 256. * 256., 256. * 256.,  256. );
168: const vec4 UnpackFactors = UnpackDownscale / vec4( PackFactors, 1. );
169: const float ShiftRight8 = 1. / 256.;
170: vec4 packDepthToRGBA( const in float v ) {
171: 	vec4 r = vec4( fract( v * PackFactors ), v );
172: 	r.yzw -= r.xyz * ShiftRight8;	return r * PackUpscale;
173: }
174: float unpackRGBAToDepth( const in vec4 v ) {
175: 	return dot( v, UnpackFactors );
176: }
177: vec4 encodeHalfRGBA ( vec2 v ) {
178: 	vec4 encoded = vec4( 0.0 );
179: 	const vec2 offset = vec2( 1.0 / 255.0, 0.0 );
180: 	encoded.xy = vec2( v.x, fract( v.x * 255.0 ) );
181: 	encoded.xy = encoded.xy - ( encoded.yy * offset );
182: 	encoded.zw = vec2( v.y, fract( v.y * 255.0 ) );
183: 	encoded.zw = encoded.zw - ( encoded.ww * offset );
184: 	return encoded;
185: }
186: vec2 decodeHalfRGBA( vec4 v ) {
187: 	return vec2( v.x + ( v.y / 255.0 ), v.z + ( v.w / 255.0 ) );
188: }
189: float viewZToOrthographicDepth( const in float viewZ, const in float near, const in float far ) {
190: 	return ( viewZ + near ) / ( near - far );
191: }
192: float orthographicDepthToViewZ( const in float linearClipZ, const in float near, const in float far ) {
193: 	return linearClipZ * ( near - far ) - near;
194: }
195: float viewZToPerspectiveDepth( const in float viewZ, const in float near, const in float far ) {
196: 	return (( near + viewZ ) * far ) / (( far - near ) * viewZ );
197: }
198: float perspectiveDepthToViewZ( const in float invClipZ, const in float near, const in float far ) {
199: 	return ( near * far ) / ( ( far - near ) * invClipZ - far );
200: }
201: #ifdef DITHERING
202: 	vec3 dithering( vec3 color ) {
203: 		float grid_position = rand( gl_FragCoord.xy );
204: 		vec3 dither_shift_RGB = vec3( 0.25 / 255.0, -0.25 / 255.0, 0.25 / 255.0 );
205: 		dither_shift_RGB = mix( 2.0 * dither_shift_RGB, -2.0 * dither_shift_RGB, grid_position );
206: 		return color + dither_shift_RGB;
207: 	}
208: #endif
209: uniform sampler2D inputBuffer;uniform sampler2D depthBuffer;uniform vec2 resolution;uniform vec2 texelSize;uniform float cameraNear;uniform float cameraFar;uniform float aspect;uniform float time;varying vec2 vUv;float readDepth(const in vec2 uv){
210: #if DEPTH_PACKING == 3201
211: return unpackRGBAToDepth(texture2D(depthBuffer,uv));
212: #else
213: return texture2D(depthBuffer,uv).r;
214: #endif
215: }float getViewZ(const in float depth){
216: #ifdef PERSPECTIVE_CAMERA
217: return perspectiveDepthToViewZ(depth,cameraNear,cameraFar);
218: #else
219: return orthographicDepthToViewZ(depth,cameraNear,cameraFar);
220: #endif
221: }#define GLSLIFY 1
222: uniform sampler2D e0UTexture;void e0MainUv(inout vec2 uv){vec4 tex=texture2D(e0UTexture,uv);float vx=-(tex.r*2.-1.);float vy=-(tex.g*2.-1.);float intensity=tex.b;float maxAmplitude=0.2;uv.x+=vx*intensity*maxAmplitude;uv.y+=vy*intensity*maxAmplitude;}
223: vec4 blend16(const in vec4 x,const in vec4 y,const in float opacity){return(1.0-(1.0-x)*(1.0-y))*opacity+x*(1.0-opacity);}void main(){vec2 transformedUv = vUv;
224: 	e0MainUv(UV);vec4 color0=texture2D(inputBuffer,UV);vec4 color1=vec4(0.0);gl_FragColor=color0;
225: #ifdef ENCODE_OUTPUT
226: gl_FragColor = linearToOutputTexel( gl_FragColor );
227: #endif
228: #ifdef DITHERING
229: 	gl_FragColor.rgb = dithering( gl_FragColor.rgb );
230: #endif
231: }
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant