Unity3D - A shader that will clip a texture on iOS device - ios

I'm working with Unity shaders for the first time and I have a problem with making a shader that will draw only some part of a texture. The code below works on mac standalone build, but not on iOS device. How can I fix that?
Shader "Sprite/ClipArea"
{
Properties
{
_MainTex ("Base (RGB), Alpha (A)", 2D) = "white" {}
_Length ("Length", Range(0.0, 1.0)) = 1.0
_Width ("Width", Range(0.0, 1.0)) = 1.0
}
SubShader
{
LOD 200
Tags
{
"Queue" = "Transparent"
"IgnoreProjector" = "True"
"RenderType" = "Transparent"
}
Pass
{
Cull Off
Lighting Off
ZWrite Off
Offset -1, -1
Fog { Mode Off }
ColorMask RGB
Blend SrcAlpha OneMinusSrcAlpha
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
sampler2D _MainTex;
float4 _MainTex_ST;
float _Length;
float _Width;
struct appdata_t
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
};
struct v2f
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
};
v2f vert (appdata_t v)
{
v2f o;
o.vertex = mul(UNITY_MATRIX_MVP, v.vertex);
o.texcoord = v.texcoord;
return o;
}
half4 frag (v2f IN) : COLOR
{
if ((IN.texcoord.x<0) || (IN.texcoord.x>_Width) || (IN.texcoord.y<0) || (IN.texcoord.y>_Length))
{
clip(-1);
}
return tex2D(_MainTex, IN.texcoord);
}
ENDCG
}
}
}
Thanks!

In mobile devices, it is advised not to use clip / discard methods as it is a very expensive operation.
I have updated the fragment shader part without the clip/discard method. Here, instead of clipping, make the portions you don't want to display in transparent color, and for the rest of the portion, render as is.
if ((IN.texcoord.x<0) || (IN.texcoord.x>_Width) || (IN.texcoord.y<0) || (IN.texcoord.y>_Length))
{
half4 colorTransparent = half4(0,0,0,0) ;
return colorTransparent ;
}
else
return tex2D(_MainTex, IN.texcoord) ;

Related

Using normalized sampler coordinates in CIFilter kernel

I was walking through this tutorial on custom CIFilter:
https://medium.com/#m_tuzer/using-metal-shading-language-for-custom-cikernels-metal-swift-7bc8e7e913e6
Everything works perfectly except that the coordinates of the sampler are not normalized. So, e.g a condition like this pos.y < 0.33 doesn’t work, and the kernel uses actual image coordinates.
Since the tutorial is old there probably has been changes in CIFilter that “broke” this code. I looked through the manual for CI kernels but could not find a way to get normalized coordinates of a sampler inside the kernel.
Here is the code of the kernel:
#include <metal_stdlib>
using namespace metal;
#include <CoreImage/CoreImage.h> // (1)
float3 multiplyColors(float3, float3);
float3 multiplyColors(float3 mainColor, float3 colorMultiplier) { // (2)
float3 color = float3(0,0,0);
color.r = mainColor.r * colorMultiplier.r;
color.g = mainColor.g * colorMultiplier.g;
color.b = mainColor.b * colorMultiplier.b;
return color;
};
extern "C" { namespace coreimage { // (3)
float4 dyeInThree(sampler src, float3 redVector, float3 greenVector, float3 blueVector) {
float2 pos = src.coord();
float4 pixelColor = src.sample(pos); // (4)
float3 pixelRGB = pixelColor.rgb;
float3 color = float3(0,0,0);
if (pos.y < 0.33) { // (5)
color = multiplyColor(pixelRGB, redVector);
} else if (pos.y >= 0.33 && pos.y < 0.66) {
color = multiplyColor(pixelRGB, greenVector);
} else {
color = multiplyColor(pixelRGB, blueVector);
}
return float4(color, 1.0);
}
}}
You can translate the source coordinates into relative values using the extent of the source like this:
#include <metal_stdlib>
using namespace metal;
#include <CoreImage/CoreImage.h>
extern "C" { namespace coreimage {
float4 dyeInThree(sampler src, float3 redVector, float3 greenVector, float3 blueVector) {
float2 pos = src.coord();
float4 pixelColor = src.sample(pos);
// transform to relative coordinates
pos -= src.origin();
pos /= src.size();
float3 color = float3(0,0,0);
if (pos.y < 0.33) {
color = pixelColor.rgb * redVector;
} else if (pos.y < 0.66) {
color = pixelColor.rgb * greenVector;
} else {
color = pixelColor.rgb * blueVector;
}
return float4(color, pixelColor.a);
}
}}

Vertex color is not interpolated in the context of ID3DXLine

I've created a standard Win32 DirectX9 window and I'm rendering to it using a custom effect, however I have a problem where the colours of vertices are not interpolated in the result.
void CRender::Begin()
{
perf.begin();
// Capture device state so it can be restored later.
// We use ID3DXLine::Begin() to fix some bugs that I don't know how to fix.
mpLine->Begin();
// Setup shader
shader.Begin( static_cast<float>(FloatTime()) );
}
void CRender::End()
{
// Reverse order of Begin()
shader.End();
mpLine->End();
}
The problem here lies with mpLine->Begin(), without calling this I get a perfectly nice interpolated triangle, with it the whole triangle has the same colour as the first vertex.
Image for clarification: http://i.imgur.com/vKN4SnE.png
I am using ID3DXLine::Begin() just to set up the device state for me. The reason I am using it is because I'm rendering in the context of another program (a game) by hooking its EndScene(). The game may leave the device in an unusable state causing rendering glitches in my overlay, all these problems go away when using ID3DXLine::Begin() except vertex colours aren't interpolated any more.
Vertex declaration:
// Create the vertex declaration for use with the shaders.
static const D3DVERTEXELEMENT9 vformat[] =
{
{ 0, 0, D3DDECLTYPE_FLOAT2, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_POSITION, 0 },
{ 0, 8, D3DDECLTYPE_D3DCOLOR, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_COLOR, 0 },
{ 0, 12, D3DDECLTYPE_FLOAT2, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_TEXCOORD, 0 },
D3DDECL_END()
};
HRESULT hr = dev->CreateVertexDeclaration( vformat, &decl );
Effect source:
// Vertex shader input
struct VSIN
{
float2 coord : POSITION;
float4 color : COLOR0;
float2 tex : TEXCOORD0;
};
// Vertex shader output / Pixel shader input
struct VSOUT
{
float4 coord : POSITION;
float4 color : COLOR0;
float2 tex : TEXCOORD0;
float2 pos : TEXCOORD1;
};
uniform float2 screen;
uniform float2x4 project;
float4 vstransform( float2 coord, const float2 shift )
{
float2 final = ( mul( project, float4(coord.x,coord.y,1,1) ) + shift ) * 2 / screen;
return float4( final.x-1, 1-final.y, 0, 1 );
}
VSOUT vsfix( VSIN data )
{
VSOUT vert;
const float2 shift = { -0.5f, -0.5f };
vert.coord = vstransform( data.coord, shift );
vert.color = data.color;
vert.tex = data.tex;
vert.pos = vert.coord.xy;
return vert;
}
float4 diffuse( VSOUT vert ) : COLOR
{
float4 px = vert.color;
return px;
}
technique Diffuse
{
pass p0
{
PixelShader = compile ps_2_0 diffuse();
VertexShader = compile vs_2_0 vsfix();
}
}

Fog shader camera

I have some difficulties with my vertex-fragment fog shader in Unity. I have a good visual result but the problem is that the gradient is based on the camera's position, it moves as the camera moves. I don't know how to fix it.
Here is the shader code.
struct v2f {
float4 pos : SV_POSITION;
float4 grabUV : TEXCOORD0;
float2 uv_depth : TEXCOORD1;
float4 interpolatedRay : TEXCOORD2;
float4 screenPos : TEXCOORD3;
};
v2f vert(appdata_base v) {
v2f o;
o.pos = mul(UNITY_MATRIX_MVP, v.vertex);
o.uv_depth = v.texcoord.xy;
o.grabUV = ComputeGrabScreenPos(o.pos);
half index = v.vertex.z;
o.screenPos = ComputeScreenPos(o.pos);
o.interpolatedRay = mul(UNITY_MATRIX_MV, v.vertex);
return o;
}
sampler2D _GrabTexture;
float4 frag(v2f IN) : COLOR {
float3 uv = UNITY_PROJ_COORD(IN.grabUV);
float dpth = UNITY_SAMPLE_DEPTH(tex2Dproj(_CameraDepthTexture, uv));
dpth = LinearEyeDepth(dpth);
float4 wsPos = (IN.screenPos + dpth * IN.interpolatedRay); // Here is the problem but how to fix it
float fogVert = max(0.0, (wsPos.y - _Depth) * (_DepthScale * 0.1f));
fogVert *= fogVert;
fogVert = (exp (-fogVert));
return fogVert;
}
It seems that it's a Matrix problem
o.interpolatedRay = mul(UNITY_MATRIX_MV, v.vertex);

HLSL point sprite texture coordinates work on ATI not NVIDIA

I am really stuck on this one. My HLSL for rendering point sprites with texture coordinates for a sprite sheet works fine on all ATI cards but not on any NVIDIA cards. On NVIDIA cards the passed texture coordinates map to the whole sprite sheet rather than a portion of it. Strange but it works fine on ATI cards. Am I missing something unique to ATI cards?
Here is my shader
struct VS_INPUT
{
float4 Position : POSITION;
float4 Color : COLOR;
float4 Texture : TEXCOORD0;
//float1 Psize : PSIZE0;
};
struct VS_OUTPUT
{
float4 Position : POSITION;
float4 Color : COLOR;
float2 Texture : TEXCOORD0;
float2 Texture_zw : TEXCOORD1;
float1 Psize : PSIZE;
};
float4x4 WorldViewProj;
texture Tex <string name = "sprite_coin_test.dds";>;
sampler2D s_2D;
float offset_x=0.0;
float offset_y=0.0;
sampler S0 = sampler_state
{
Texture = (Tex);
MinFilter = ANISOTROPIC; //LINEAR;
MagFilter = ANISOTROPIC; //LINEAR;
MipFilter = LINEAR;
};
VS_OUTPUT vs_main( in VS_INPUT In )
{
VS_OUTPUT Out=(VS_OUTPUT)0; //create an output vertex
Out.Position = mul(In.Position, WorldViewProj); //apply vertex transformation
Out.Texture = In.Texture;
Out.Texture_zw = float2(In.Texture.z, In.Texture.w);
Out.Color = In.Color;
//Out.Psize = In.Psize;
Out.Psize=(Out.Position.z)*10.0;
return Out; //return output vertex
}
float4 PS_Particle_main(float2 vPos: TEXCOORD0, float2 text_zw: TEXCOORD1) : COLOR
{
vPos.x*=offset_x;
vPos.y*=offset_y;
vPos += float2(text_zw[0], text_zw[1]);
return tex2D(s_2D, vPos);
}
technique RenderVS
{
pass p0
{
AlphaBlendEnable = true;
AlphaTestEnable = false;
SrcBlend = SRCALPHA;
DestBlend = INVSRCALPHA;
POINTSPRITEENABLE = true;
POINTSCALEENABLE = true;
POINTSIZE_MIN = 1.0f;
POINTSIZE_MAX = 400.0f;
POINTSCALE_A = 1.0f;
POINTSCALE_B = 1.0f;
POINTSCALE_C = 1.0f;
ZWRITEENABLE = false;
Sampler[0] = (S0);
VertexShader = compile vs_1_1 vs_main();
PixelShader = compile ps_2_0 PS_Particle_main();
}
}
I had the same problem for a while and it costed me a lot of time. I have not found any documentation about this problematic, but with testing on ATI and NVIDIA devices I found the difference. With pointsprites ATI works all fine, it interpolates the texturecoordinates properly into TEXCOORD0. In contrast NVIDIA does nearly the same, but they write the texturecoordinates in all fields with a TEXCOORD-interpolator. So all information which you pass by texturecoordinates to the pixelshader will be overwritten. I solved this by using a COLOR-interpolator instead of a TEXCOORD-interpolator. Very strange, but it works fine for me :) In your case it would be:
struct VS_OUTPUT
{
float4 Position : POSITION;
float4 Color : COLOR0;
float2 Texture : TEXCOORD0;
float2 Texture_zw : COLOR1;
float1 Psize : PSIZE;
};

DX10 Skybox Shader

I'm trying to write a skybox shader in DX10 using the following HLSL code:
//////////////////////////////////////////////////////////////////////////
// world matrix for each rendered object
float4x4 g_mWorld;
// single cubemap texture
Texture2D g_tCubeMap;
// basic mirror texture sampler
SamplerState g_sSamplerMirror
{
Filter = MIN_MAG_MIP_POINT;
AddressU = MIRROR;
AddressV = MIRROR;
};
//////////////////////////////////////////////////////////////////////////
// pre-defined vertex formats for vertex input layouts
struct VS_INPUT
{
float3 Position : POSITION;
};
struct PS_INPUT
{
float4 SPosition : SV_POSITION;
float3 UV : TEXCOORD;
};
//////////////////////////////////////////////////////////////////////////
PS_INPUT VS_Default( VS_INPUT Input )
{
PS_INPUT Output = (PS_INPUT)0;
Output.SPosition = float4(Input.Position,1.0f);
Output.UV = normalize( mul( Output.SPosition, g_mWorld ) ).xyz;
return Output;
}
//////////////////////////////////////////////////////////////////////////
float4 PS_Default( PS_INPUT Input ) : SV_TARGET0
{
return float4( texCUBE( g_sSamplerMirror, Input.UV ) );
}
//////////////////////////////////////////////////////////////////////////
technique10 TECH_Default
{
pass
{
SetVertexShader( CompileShader( vs_4_0, VS_Default() ) );
SetPixelShader( CompileShader( ps_4_0, PS_Default() ) );
SetGeometryShader( 0 );
}
}
Which gives the error "DX-9 style intrinsics are disabled when not in dx9 compatibility mode." on line 46:
return float4( texCUBE( g_sSamplerMirror, Input.UV ) );
Is there an alternative to texCUBE? How can I fix this without enabling dx9 compatibility mode?
Since you are using Shader Model 4 you should be able to create a TextureCube object and then call the Sample method.

Resources