The data structure
//--------------
// data from application vertex buffer
//--------------
struct appdata {
float3 Position : POSITION;
float4 UV : TEXCOORD0;
float4 Normal : NORMAL;
float4 Tangent : TANGENT0;
float4 Binormal : BINORMAL0;
float4 Blendweight : TEXCOORD1;
float4 Blendindices : TEXCOORD2;
};
As mentioned before, TEXCOORD1 and TEXCOORD2 are used to pass the bone weights and indices per vertex into the shader.
This is the structure filled from the vertex shader.
//--------------
// data passed from vertex shader to pixel shader
//--------------
struct vertexOutput {
float4 HPosition : POSITION;
float2 UV : TEXCOORD0;
// The following values are passed in "World" coordinates since
// it tends to be the most flexible and easy for handling
// reflections, sky lighting, and other "global" effects.
float3 LightVec : TEXCOORD1;
float3 WorldNormal : TEXCOORD2;
//float3 WorldTangent : TEXCOORD3;
//float3 WorldBinormal : TEXCOORD4;
//float3 WorldView : TEXCOORD5;
float4 PositionLightSpace : TEXCOORD5;
float3 Position : TEXCOORD6;
};
PositionLightSpace is used for the shadow map projection. What is this for? We have a shadow map, fine. And we render the object now. So we need the shadow map AND the depth for the rendered pixel at the same time, because we want to compare them. We have the
ProjMatrix, which works like a second camera during the rendering process. Calculating the final pixel and the pixel as it would be seen from the light source (we just want the depth information) at the same time is done with the
ProjMatrix. The vertex position is calculated for the current camera (World) and the light camera (ProjMatrix).
The shadow map data structure
//--------------
// shadow map
//--------------
struct IN_Depth
{
float3 Position : POSITION;
float4 Blendweight : TEXCOORD1;
float4 Blendindices : TEXCOORD2;
};
struct OUT_Depth
{
float4 HPosition : POSITION;
float2 Depth : TEXCOORD0;
};
We want to have animated shadows, thats why we use TEXCOORD1 and TEXCOORD2 here again.
Static vertex shader
Shader works with "Lambert" calculation here. We just add the calculation of the
PositionLightSpace. As you can see, for the final pixel position the "WorldViewProjection" matrix is used, and for the
PositionLightSpace, we simply use the "LightWorldViewProjection" now.
//--------------
// vertex shaders
//--------------
vertexOutput VS_Static(appdata IN)
{
vertexOutput OUT = (vertexOutput)0;
float3x3 rotate = float3x3(WorldXf[0].xyz, WorldXf[1].xyz, WorldXf[2].xyz);
OUT.WorldNormal = mul(IN.Normal,rotate).xyz;
//OUT.WorldTangent = mul(IN.Tangent,rotate).xyz;
//OUT.WorldBinormal = mul(IN.Binormal,rotate).xyz;
float4 Po = float4(IN.Position.xyz,1);
float3 Pw = mul(Po,WorldXf).xyz;
OUT.LightVec = (LightPosition - Pw);
#ifdef FLIP_TEXTURE_Y
OUT.UV = float2(IN.UV.x,(1.0-IN.UV.y));
#else /* !FLIP_TEXTURE_Y */
OUT.UV = IN.UV.xy;
#endif /* !FLIP_TEXTURE_Y */
//OUT.WorldView = normalize(ViewIXf[3].xyz - Pw);
OUT.HPosition = mul(Po,WvpXf);
OUT.Position = mul(Pw, mul(ViewXf, ProjXf));
matrix LightWVP = mul(WorldXf, ProjMatrix);
OUT.PositionLightSpace = mul( Po, LightWVP );
return OUT;
}
Bone-animation vertex shader
Here we do the same, but before lighting we transform the vertex position.
vertexOutput VS_Animated(appdata IN)
{
vertexOutput OUT = (vertexOutput)0;
// bone animate the mesh
float3 netPosition = 0, netNormal = 0;
for (int i = 0; i < 4; i++)
{
float index = IN.Blendindices[i];
float3x4 model = float3x4(boneMatrix[index][0], boneMatrix[index][1], boneMatrix[index][2]);
float3 vec3 = mul(model, float4(IN.Position, 1));
vec3 = vec3 + boneMatrix[index][3].xyz;
float3x3 rotate = float3x3(model[0].xyz, model[1].xyz, model[2].xyz);
float3 norm3 = mul(rotate, IN.Normal);
netPosition += vec3.xyz * IN.Blendweight[i];
netNormal += norm3.xyz * IN.Blendweight[i];
}
//netPosition.x = -netPosition.x;
//netNormal.x = -netNormal.x;
float4 tempPos = float4(netPosition,1.0);
float3x3 rotate = float3x3(WorldXf[0].xyz, WorldXf[1].xyz, WorldXf[2].xyz);
OUT.WorldNormal = mul(netNormal,rotate).xyz;
//OUT.WorldTangent = mul(IN.Tangent,rotate).xyz;
//OUT.WorldBinormal = mul(IN.Binormal,rotate).xyz;
float4 Po = float4(netPosition,1);
float3 Pw = mul(Po,WorldXf).xyz;
OUT.LightVec = -(LightPosition - Pw);
#ifdef FLIP_TEXTURE_Y
OUT.UV = float2(IN.UV.x,(1.0-IN.UV.y));
#else /* !FLIP_TEXTURE_Y */
OUT.UV = IN.UV.xy;
#endif /* !FLIP_TEXTURE_Y */
//OUT.WorldView = normalize(ViewIXf[3].xyz - Pw);
OUT.HPosition = mul(Po,WvpXf);
OUT.Position = mul(Pw, mul(ViewXf, ProjXf));
matrix LightWVP = mul(WorldXf, ProjMatrix);
OUT.PositionLightSpace = mul( tempPos, LightWVP );
return OUT;
}
Shadow mapping vertex and pixel shader
OK! This is really simple now ;D
OUT_Depth VS_Depth(IN_Depth IN)
{
OUT_Depth OUT;
matrix LightWVP = mul( WorldXf, ProjMatrix );
OUT.HPosition = mul( float4(IN.Position,1), LightWVP);
OUT.Depth.xy = OUT.HPosition.zw;
return OUT;
}
OUT_Depth VS_AnimatedDepth(IN_Depth IN)
{
OUT_Depth OUT;
// bone animate the shadow mesh
float3 netPosition = 0, netNormal = 0;
for (int i = 0; i < 4; i++)
{
float index = IN.Blendindices[i];
float3x4 model = float3x4(boneMatrix[index][0], boneMatrix[index][1], boneMatrix[index][2]);
float3 vec3 = mul(model, float4(IN.Position, 1));
vec3 = vec3 + boneMatrix[index][3].xyz;
netPosition += vec3.xyz * IN.Blendweight[i];
}
float4 tempPos = float4(netPosition,1.0);
matrix LightWVP = mul( WorldXf, ProjMatrix );
OUT.HPosition = mul( tempPos, LightWVP);
OUT.Depth.xy = OUT.HPosition.zw;
return OUT;
}
float4 PS_Depth(OUT_Depth IN) : COLOR
{
return (IN.Depth.x / IN.Depth.y);
//return float4(1.0f,1.0f,1.0f,1.0f);
}
Final pixel shader
This way we get our final result!
//--------------
// pixel shader functions
//--------------
void pixelshader_shadowmap( float4 PosLightSpace, out float LightAmount )
{
float2 ShadowTexCoord;
ShadowTexCoord = 0.5* PosLightSpace.xy / PosLightSpace.w + float2( 0.5, 0.5 );
ShadowTexCoord.y = 1.0f - ShadowTexCoord.y;
#ifdef REDUCE_ALIASING
float2 Texelpos = SMAP_SIZE * ShadowTexCoord;
float2 Lerps = frac( Texelpos );
float Sample[4];
float scale = 1.0/SMAP_SIZE;
float q = PosLightSpace.z/PosLightSpace.w;
Sample[0] = (tex2D( DepthMap, ShadowTexCoord )
+ SHADOW_EPSILON < q)? SHADOW_INTESITY: 1.0f;
Sample[1] = (tex2D( DepthMap, ShadowTexCoord + float2(scale, 0) )
+ SHADOW_EPSILON < q)? SHADOW_INTESITY: 1.0f;
Sample[2] = (tex2D( DepthMap, ShadowTexCoord + float2(0, scale) )
+ SHADOW_EPSILON < q)? SHADOW_INTESITY: 1.0f;
Sample[3] = (tex2D( DepthMap, ShadowTexCoord + float2(scale, scale) )
+ SHADOW_EPSILON < q)? SHADOW_INTESITY: 1.0f;
LightAmount = lerp( lerp( Sample[0], Sample[1], Lerps.x ),
lerp( Sample[2], Sample[3], Lerps.x ), Lerps.y );
#else
float DepthShadowMap = tex2D( DepthMap, ShadowTexCoord ) + SHADOW_EPSILON;
LightAmount = DepthShadowMap < PosLightSpace.z/PosLightSpace.w ? SHADOW_INTESITY : 1.0f;
#endif
};
//--------------
// pixel shaders
//--------------
float4 PS_Diffuse(vertexOutput IN) : COLOR
{
float3 Ln = normalize(IN.LightVec);
float3 Nn = normalize(IN.WorldNormal);
float ldn = dot(Ln,Nn);
ldn = max(ldn,0.0);
float3 diffuseColor = tex2D(Base,IN.UV).rgb;
float3 distVec = IN.Position - LightPosition;
float dist = length(distVec);
float att = dist / LightRange;
float attf = saturate(1-dot(att,att));
float amount = 1.0f;
pixelshader_shadowmap( IN.PositionLightSpace, amount );
float3 result = (ldn * LightColor * attf * amount + Ambient) * diffuseColor; // * LightPower
// return as float4
return float4(result,Alpha);
}
Shader technics to switch from DarkGDK
//--------------
// techniques
//--------------
technique StaticDiffuse
{
pass p1
{
VertexShader = compile vs_2_0 VS_Static();
PixelShader = compile ps_2_0 PS_Diffuse();
}
}
technique AnimatedDiffuse
{
pass p1
{
VertexShader = compile vs_2_0 VS_Animated();
PixelShader = compile ps_2_0 PS_Diffuse();
}
}
technique DepthMap
{
pass p1
{
VertexShader = compile vs_2_0 VS_Depth();
PixelShader = compile ps_2_0 PS_Depth();
}
}
technique AnimatedDepthMap
{
pass p1
{
VertexShader = compile vs_2_0 VS_AnimatedDepth();
PixelShader = compile ps_2_0 PS_Depth();
}
}
This is the complete shader in small parts. I attached the complete file for easier usage.