This commit is contained in:
2025-08-02 12:09:34 +08:00
commit e70b01cdca
2785 changed files with 575579 additions and 0 deletions

View File

@@ -0,0 +1,504 @@
---
title: ToonPostProcess
date: 2024-05-15 16:50:13
excerpt:
tags:
rating: ⭐
---
# FFT
# Bloom
Bloom主要分
- Bloom
- FFTBloom
- LensFlares
BloomThresholdClampMin = "-1.0", UIMax = "8.0"。
相关逻辑位于:
```c++
if (bBloomSetupRequiredEnabled)
{
const float BloomThreshold = View.FinalPostProcessSettings.BloomThreshold;
FBloomSetupInputs SetupPassInputs;
SetupPassInputs.SceneColor = DownsampleInput;
SetupPassInputs.EyeAdaptationBuffer = EyeAdaptationBuffer;
SetupPassInputs.EyeAdaptationParameters = &EyeAdaptationParameters;
SetupPassInputs.LocalExposureParameters = &LocalExposureParameters;
SetupPassInputs.LocalExposureTexture = CVarBloomApplyLocalExposure.GetValueOnRenderThread() ? LocalExposureTexture : nullptr;
SetupPassInputs.BlurredLogLuminanceTexture = LocalExposureBlurredLogLumTexture;
SetupPassInputs.Threshold = BloomThreshold;
SetupPassInputs.ToonThreshold = View.FinalPostProcessSettings.ToonBloomThreshold;
DownsampleInput = AddBloomSetupPass(GraphBuilder, View, SetupPassInputs);
}
```
## FFTBloom
***普通Bloom算法只能做到圆形光斑对于自定义形状的就需要使用FFTBloom。***
- FFT Bloom:https://zhuanlan.zhihu.com/p/611582936
- Unity FFT Bloom:https://github.com/AKGWSB/FFTConvolutionBloom
### 频域与卷积定理
图像可以视为二维的信号,而一个信号可以通过 **不同频率** 的 Sine & Cosine 函数的线性叠加来近似得到。对于每个频率的函数,我们乘以一个常数振幅并叠加到最终的结果上,这些振幅叫做 **频谱**。值得注意的是所有的 F_k 都是 **复数**
![](https://pic2.zhimg.com/80/v2-64b4c2d33d90816cc9bfcf875f618d9f_720w.webp)
此时频域上的每个振幅不再代表某个单个的时域样本,而是代表该频段的 Sine & Cosine 函数对时域信号的 **整体** 贡献。频域信号包含了输入图像的全部时域信息,***因此卷积定理告诉我们在时域上对信号做卷积,等同于将源图像与滤波盒图像在频域上的频谱(上图系数 V_k做简单复数 **乘法***
![](https://pic1.zhimg.com/80/v2-abc8c8d19dc3ded6c282075cc4d2f022_720w.webp)
一一对位的乘法速度是远远快于需要循环累加的朴素卷积操作。因此接下来我们的目标就是找到一种方法,建立图像信号与其频域之间的联系。在通信领域通常使用傅里叶变换来进行信号的频、时域转换
### 相关代码
- c++
- AddFFTBloomPass()
- FBloomFinalizeApplyConstantsCS Bloom计算完成
- AddTonemapPass()PassInputs.Bloom = Bloom与PassInputs.SceneColorApplyParamaters
- Shader
-
**FBloomFindKernelCenterCS**用于找到Bloom效果的核Kernel中心(纹理中找到最亮的像素)。用于在一个并记录其位置。主要通过计算Luminance来获取到中心区域而在这里的中心区域可以有多个这也代表着在最终输出的SceneColor里可以有多个【曝点光晕(Bloom)效果】
# 实用代码
代码位于DeferredShadingCommon.ush
```c++
// @param UV - UV space in the GBuffer textures (BufferSize resolution)
FGBufferData GetGBufferData(float2 UV, bool bGetNormalizedNormal = true)
{
#if GBUFFER_REFACTOR
return DecodeGBufferDataUV(UV,bGetNormalizedNormal);
#else
float4 GBufferA = Texture2DSampleLevel(SceneTexturesStruct.GBufferATexture, SceneTexturesStruct_GBufferATextureSampler, UV, 0);
float4 GBufferB = Texture2DSampleLevel(SceneTexturesStruct.GBufferBTexture, SceneTexturesStruct_GBufferBTextureSampler, UV, 0);
float4 GBufferC = Texture2DSampleLevel(SceneTexturesStruct.GBufferCTexture, SceneTexturesStruct_GBufferCTextureSampler, UV, 0);
float4 GBufferD = Texture2DSampleLevel(SceneTexturesStruct.GBufferDTexture, SceneTexturesStruct_GBufferDTextureSampler, UV, 0);
float CustomNativeDepth = Texture2DSampleLevel(SceneTexturesStruct.CustomDepthTexture, SceneTexturesStruct_CustomDepthTextureSampler, UV, 0).r;
// BufferToSceneTextureScale is necessary when translucent materials are rendered in a render target
// that has a different resolution than the scene color textures, e.g. r.SeparateTranslucencyScreenPercentage < 100.
int2 IntUV = (int2)trunc(UV * View.BufferSizeAndInvSize.xy * View.BufferToSceneTextureScale.xy);
uint CustomStencil = SceneTexturesStruct.CustomStencilTexture.Load(int3(IntUV, 0)) STENCIL_COMPONENT_SWIZZLE;
#if ALLOW_STATIC_LIGHTING
float4 GBufferE = Texture2DSampleLevel(SceneTexturesStruct.GBufferETexture, SceneTexturesStruct_GBufferETextureSampler, UV, 0);
#else
float4 GBufferE = 1;
#endif
float4 GBufferF = Texture2DSampleLevel(SceneTexturesStruct.GBufferFTexture, SceneTexturesStruct_GBufferFTextureSampler, UV, 0);
#if WRITES_VELOCITY_TO_GBUFFER
float4 GBufferVelocity = Texture2DSampleLevel(SceneTexturesStruct.GBufferVelocityTexture, SceneTexturesStruct_GBufferVelocityTextureSampler, UV, 0);
#else
float4 GBufferVelocity = 0;
#endif
float SceneDepth = CalcSceneDepth(UV);
return DecodeGBufferData(GBufferA, GBufferB, GBufferC, GBufferD, GBufferE, GBufferF, GBufferVelocity, CustomNativeDepth, CustomStencil, SceneDepth, bGetNormalizedNormal, CheckerFromSceneColorUV(UV));
#endif
}
// Minimal path for just the lighting model, used to branch around unlit pixels (skybox)
uint GetShadingModelId(float2 UV)
{
return DecodeShadingModelId(Texture2DSampleLevel(SceneTexturesStruct.GBufferBTexture, SceneTexturesStruct_GBufferBTextureSampler, UV, 0).a);
}
```
## ShadingModel判断
```c++
bool IsToonShadingModel(float2 UV)
{
uint ShadingModel = DecodeShadingModelId(Texture2DSampleLevel(SceneTexturesStruct.GBufferBTexture, SceneTexturesStruct_GBufferBTextureSampler, UV, 0).a);
return ShadingModel == SHADINGMODELID_TOONSTANDARD
|| ShadingModel == SHADINGMODELID_PREINTEGRATED_SKIN;
}
```
PS.需要Shader添加FSceneTextureShaderParameters/FSceneTextureUniformParameters。
```c++
IMPLEMENT_STATIC_UNIFORM_BUFFER_STRUCT(FSceneTextureUniformParameters, "SceneTexturesStruct", SceneTextures);
BEGIN_SHADER_PARAMETER_STRUCT(FSceneTextureShaderParameters, ENGINE_API)
SHADER_PARAMETER_RDG_UNIFORM_BUFFER(FSceneTextureUniformParameters, SceneTextures)
SHADER_PARAMETER_RDG_UNIFORM_BUFFER(FMobileSceneTextureUniformParameters, MobileSceneTextures)
END_SHADER_PARAMETER_STRUCT()
```
# ToneMapping
- UE5 官方文档High Dynamic Range Display Output:https://dev.epicgames.com/documentation/en-us/unreal-engine/high-dynamic-range-display-output-in-unreal-engine?application_version=5.3
- ACES官方文档:https://docs.acescentral.com/#aces-white-point-derivation
- https://modelviewer.dev/examples/tone-mapping
- [现代游戏图形中的sRGB18%灰-中性灰的定义](https://zhuanlan.zhihu.com/p/654557489)
- 游戏中的后处理渲染流水线、ACES、Tonemapping和 HDR:https://zhuanlan.zhihu.com/p/118272193
## ACES 色彩空间
ACES 标准定义了一些色域和色彩空间如下:
色域有:
- AP0包含所有颜色的色域
- AP1工作色域
色彩空间有:
- ACES2065-1/ACES 色彩空间,使用 AP0 色域,用于存储颜色,处理色彩转换
- ACEScg 色彩空间,使用 AP1 色域,一个线性的渲染计算工作空间
- ACEScc 色彩空间AP1 色域,指数空间,用于调色
- ACEScct 色彩空间,使用 AP1 色域,和 ACEScc 类似,只是曲线略有不同,适用于不同的场景
## UE5的ACES流程
ACES Viewing Transform在查看流程中将按以下顺序进行
- **Look Modification Transform (LMT)** - 这部分抓取应用了创意"外观"颜色分级和矫正的ACES颜色编码图像 输出由ACES和Reference Rendering TransformRRT及Output Device TransformODT渲染的图像。
- **Reference Rendering Transform (RRT)** - 之后,这部分抓取参考场景的颜色值,将它们转换为参考显示。 在此流程中,它使渲染图像不再依赖于特定显示器,反而能保证它输出到特定显示器时拥有正确而宽泛的色域和动态范围(尚未创建的图像同样如此)。
- **Output Device Transform (ODT)** - 最后这部分抓取RRT的HDR数据输出将其与它们能够显示的不同设备和色彩空间进行比对。 因此每个目标需要将其自身的ODT与Rec709、Rec2020、DCI-P3等进行比对。
## ToneMapping种类
- ShaderToy效果演示:
- https://www.shadertoy.com/view/McG3WW
- ACES
- Narkowicz 2015, "ACES Filmic Tone Mapping Curve"
- https://knarkowicz.wordpress.com/2016/01/06/aces-filmic-tone-mapping-curve/
- PBR Neutral https://modelviewer.dev/examples/tone-mapping
- Uncharted tonemapping
- http://filmicworlds.com/blog/filmic-tonemapping-operators/
- https://www.gdcvault.com/play/1012351/Uncharted-2-HDR
- AgX
- https://github.com/sobotka/AgX
- https://www.shadertoy.com/view/cd3XWr
- https://www.shadertoy.com/view/lslGzl
- https://www.shadertoy.com/view/Xstyzn
- GT-ToneMapping:https://github.com/yaoling1997/GT-ToneMapping
- 曲线:https://www.desmos.com/calculator/gslcdxvipg?lang=zh-CN
- CCA-ToneMapping:?
## UE中的相关实现
UE4版本的笔记[[UE4 ToneMapping]]
TonemapCommon.ush中的FilmToneMap()在CombineLUTsCommon()中调用。其顺序为:
1. AddCombineLUTPass() => PostProcessCombineLUTs.usf
2. AddTonemapPass() => PostProcessTonemap.usf
```c++
void AddPostProcessingPasses()
{
...
{
FRDGTextureRef ColorGradingTexture = nullptr;
if (bPrimaryView)
{
ColorGradingTexture = AddCombineLUTPass(GraphBuilder, View);
}
// We can re-use the color grading texture from the primary view.
else if (View.GetTonemappingLUT())
{
ColorGradingTexture = TryRegisterExternalTexture(GraphBuilder, View.GetTonemappingLUT());
}
else
{
const FViewInfo* PrimaryView = static_cast<const FViewInfo*>(View.Family->Views[0]);
ColorGradingTexture = TryRegisterExternalTexture(GraphBuilder, PrimaryView->GetTonemappingLUT());
}
FTonemapInputs PassInputs;
PassSequence.AcceptOverrideIfLastPass(EPass::Tonemap, PassInputs.OverrideOutput);
PassInputs.SceneColor = SceneColorSlice;
PassInputs.Bloom = Bloom;
PassInputs.SceneColorApplyParamaters = SceneColorApplyParameters;
PassInputs.LocalExposureTexture = LocalExposureTexture;
PassInputs.BlurredLogLuminanceTexture = LocalExposureBlurredLogLumTexture;
PassInputs.LocalExposureParameters = &LocalExposureParameters;
PassInputs.EyeAdaptationParameters = &EyeAdaptationParameters;
PassInputs.EyeAdaptationBuffer = EyeAdaptationBuffer;
PassInputs.ColorGradingTexture = ColorGradingTexture;
PassInputs.bWriteAlphaChannel = AntiAliasingMethod == AAM_FXAA || bProcessSceneColorAlpha;
PassInputs.bOutputInHDR = bTonemapOutputInHDR;
SceneColor = AddTonemapPass(GraphBuilder, View, PassInputs);
}
...
}
```
SHADER_PARAMETER_STRUCT_INCLUDE(FSceneTextureShaderParameters, SceneTextures)
CommonPassParameters.SceneTextures = SceneTextures.GetSceneTextureShaderParameters(View.FeatureLevel);
## PostProcessCombineLUTs.usf
相关变量更新函数位于FCachedLUTSettings::GetCombineLUTParameters()
## PostProcessTonemap.usf
## 实现方法
```c++
//BlueRose Modify
FGBufferData SamplerBuffer = GetGBufferData(UV * View.ResolutionFractionAndInv.x, false);
if (SamplerBuffer.CustomStencil > 1.0f && abs(SamplerBuffer.CustomDepth - SamplerBuffer.Depth) < 1)
{
// OutColor = SampleSceneColor(UV);
OutColor = TonemapCommonPS(UV, InVignette, GrainUV, ScreenPos, FullViewUV, SvPosition, Luminance);
}else
{
OutColor = TonemapCommonPS(UV, InVignette, GrainUV, ScreenPos, FullViewUV, SvPosition, Luminance);
}
//BlueRose Modify End
```
### TextureArray参考
FIESAtlasAddTextureCS::FParameters
- SHADER_PARAMETER_TEXTURE_ARRAY
- /Engine/Private/IESAtlas.usf
```c++
static void AddSlotsPassCS(
FRDGBuilder& GraphBuilder,
FGlobalShaderMap* ShaderMap,
const TArray<FAtlasSlot>& Slots,
FRDGTextureRef& OutAtlas)
{
FRDGTextureUAVRef AtlasTextureUAV = GraphBuilder.CreateUAV(OutAtlas);
TShaderMapRef<FIESAtlasAddTextureCS> ComputeShader(ShaderMap);
// Batch new slots into several passes
const uint32 SlotCountPerPass = 8u;
const uint32 PassCount = FMath::DivideAndRoundUp(uint32(Slots.Num()), SlotCountPerPass);
for (uint32 PassIt = 0; PassIt < PassCount; ++PassIt)
{
const uint32 SlotOffset = PassIt * SlotCountPerPass;
const uint32 SlotCount = SlotCountPerPass * (PassIt+1) <= uint32(Slots.Num()) ? SlotCountPerPass : uint32(Slots.Num()) - (SlotCountPerPass * PassIt);
FIESAtlasAddTextureCS::FParameters* Parameters = GraphBuilder.AllocParameters<FIESAtlasAddTextureCS::FParameters>();
Parameters->OutAtlasTexture = AtlasTextureUAV;
Parameters->AtlasResolution = OutAtlas->Desc.Extent;
Parameters->AtlasSliceCount = OutAtlas->Desc.ArraySize;
Parameters->ValidCount = SlotCount;
for (uint32 SlotIt = 0; SlotIt < SlotCountPerPass; ++SlotIt)
{
Parameters->InTexture[SlotIt] = GSystemTextures.BlackDummy->GetRHI();
Parameters->InSliceIndex[SlotIt].X = InvalidSlotIndex;
Parameters->InSampler[SlotIt] = TStaticSamplerState<SF_Bilinear, AM_Clamp, AM_Clamp, AM_Clamp>::GetRHI();
}
for (uint32 SlotIt = 0; SlotIt<SlotCount;++SlotIt)
{
const FAtlasSlot& Slot = Slots[SlotOffset + SlotIt];
check(Slot.SourceTexture);
Parameters->InTexture[SlotIt] = Slot.GetTextureRHI();
Parameters->InSliceIndex[SlotIt].X = Slot.SliceIndex;
}
const FIntVector DispatchCount = FComputeShaderUtils::GetGroupCount(FIntVector(Parameters->AtlasResolution.X, Parameters->AtlasResolution.Y, SlotCount), FIntVector(8, 8, 1));
FComputeShaderUtils::AddPass(GraphBuilder, RDG_EVENT_NAME("IESAtlas::AddTexture"), ComputeShader, Parameters, DispatchCount);
}
GraphBuilder.UseExternalAccessMode(OutAtlas, ERHIAccess::SRVMask);
}
```
```c++
Texture2D<float4> InTexture_0;
Texture2D<float4> InTexture_1;
Texture2D<float4> InTexture_2;
Texture2D<float4> InTexture_3;
Texture2D<float4> InTexture_4;
Texture2D<float4> InTexture_5;
Texture2D<float4> InTexture_6;
Texture2D<float4> InTexture_7;
uint4 InSliceIndex[8];
SamplerState InSampler_0;
SamplerState InSampler_1;
SamplerState InSampler_2;
SamplerState InSampler_3;
SamplerState InSampler_4;
SamplerState InSampler_5;
SamplerState InSampler_6;
SamplerState InSampler_7;
int2 AtlasResolution;
uint AtlasSliceCount;
uint ValidCount;
RWTexture2DArray<float> OutAtlasTexture;
[numthreads(8, 8, 1)]
void MainCS(uint3 DispatchThreadId : SV_DispatchThreadID)
{
if (all(DispatchThreadId.xy < uint2(AtlasResolution)))
{
const uint2 DstPixelPos = DispatchThreadId.xy;
uint DstSlice = 0;
const float2 SrcUV = (DstPixelPos + 0.5) / float2(AtlasResolution);
const uint SrcSlice = DispatchThreadId.z;
if (SrcSlice < ValidCount)
{
float Color = 0;
switch (SrcSlice)
{
case 0: Color = InTexture_0.SampleLevel(InSampler_0, SrcUV, 0).x; DstSlice = InSliceIndex[0].x; break;
case 1: Color = InTexture_1.SampleLevel(InSampler_1, SrcUV, 0).x; DstSlice = InSliceIndex[1].x; break;
case 2: Color = InTexture_2.SampleLevel(InSampler_2, SrcUV, 0).x; DstSlice = InSliceIndex[2].x; break;
case 3: Color = InTexture_3.SampleLevel(InSampler_3, SrcUV, 0).x; DstSlice = InSliceIndex[3].x; break;
case 4: Color = InTexture_4.SampleLevel(InSampler_4, SrcUV, 0).x; DstSlice = InSliceIndex[4].x; break;
case 5: Color = InTexture_5.SampleLevel(InSampler_5, SrcUV, 0).x; DstSlice = InSliceIndex[5].x; break;
case 6: Color = InTexture_6.SampleLevel(InSampler_6, SrcUV, 0).x; DstSlice = InSliceIndex[6].x; break;
case 7: Color = InTexture_7.SampleLevel(InSampler_7, SrcUV, 0).x; DstSlice = InSliceIndex[7].x; break;
}
// Ensure there is no NaN value
Color = -min(-Color, 0);
DstSlice = min(DstSlice, AtlasSliceCount-1);
OutAtlasTexture[uint3(DstPixelPos, DstSlice)] = Color;
}
}
}
```
### ToneMapping Method更换
```c++
// Tonemapped color in the AP1 gamut
float3 ToneMappedColorAP1 = FilmToneMap( ColorAP1 );
// float3 ToneMappedColorAP1 = ColorAP1;
// float3 ToneMappedColorAP1 = AGXToneMap(ColorAP1);
// float3 ToneMappedColorAP1 = GTToneMap(ColorAP1);
// float3 ToneMappedColorAP1 = PBRNeutralToneMap(ColorAP1);
```
# UE5 PostProcess 添加Pass代码
- UE4中添加自定义ComputerShader:https://zhuanlan.zhihu.com/p/413884878
- UE渲染学习2- 自定义PostProcess - Kuwahara滤镜:https://zhuanlan.zhihu.com/p/25790491262
# 在UE5添加自定义后处理Pass的方法
## 在if(bPostProcessingEnabled)代码段
1. 在AddPostProcessingPasses()(PostProcessing.cpp)中的**EPass**与**PassNames**数组中添加Pass名称枚举与Pass名称字符串。
2. 在**PassSequence.Finalize()** 之前添加上一步EPass同名的`PassSequence.SetEnabled(EPass::XXX)` 。该逻辑用于控制对应Pass是否起作用。
3. 根据步骤1中新增Pass的顺序在对应的位置添加对应的代码段。额外需要注意的Tonemap前后的Pass代码有所不同。
### Pass代码
1. `MotionBlur`之前后处理材质BL_BeforeTonemapping之前传入FScreenPassTexture SceneColor进行绘制。
2. `MotionBlur`~`Tonemap`传入FScreenPassTextureSlice SceneColorSlice进行绘制。
3. `Tonemap`之后传入FScreenPassTexture SceneColor进行绘制。
`MotionBlur`~`Tonemap`大致这么写:
```c++
if (PassSequence.IsEnabled(EPass::XXX))
{
FXXXInputs PassInputs;
PassSequence.AcceptOverrideIfLastPass(EPass::XXX, PassInputs.OverrideOutput);
PassInputs.SceneColorSlice = SceneColorSlice;
SceneColorSlice = FScreenPassTextureSlice::CreateFromScreenPassTexture(GraphBuilder, AddXXXPass(GraphBuilder, View, PassInputs));
}
```
其他位置大致这么写:
```c++
if (PassSequence.IsEnabled(EPass::XXX))
{
FXXXInputs PassInputs;
PassSequence.AcceptOverrideIfLastPass(EPass::XXX, PassInputs.OverrideOutput);
PassInputs.SceneColor = SceneColor;   
SceneColor = AddXXXPass(GraphBuilder, View, PassInputs);
}
```
### 超采样之后的UV转换
`Tonemap`之后的Pass因为超采样的关系使得ViewportResoution与BufferResoution不同所以需要使用`SHADER_PARAMETER(FScreenTransform, SvPositionToInputTextureUV)`以及`FScreenTransform::ChangeTextureBasisFromTo`计算变换比例。具体可以参考FFXAAPS。
SvPosition => ViewportUV => TextureUV
```c++
PassParameters->SvPositionToInputTextureUV = (
FScreenTransform::SvPositionToViewportUV(Output.ViewRect) *
FScreenTransform::ChangeTextureBasisFromTo(FScreenPassTextureViewport(Inputs.SceneColorSlice), FScreenTransform::ETextureBasis::ViewportUV, FScreenTransform::ETextureBasis::TextureUV));
```
之后在Shader中
```c++
Texture2D SceneColorTexture;
SamplerState SceneColorSampler;
FScreenTransform SvPositionToInputTextureUV;
void MainPS(
float4 SvPosition : SV_POSITION,
out float4 OutColor : SV_Target0)
{
float2 SceneColorUV = ApplyScreenTransform(SvPosition.xy, SvPositionToInputTextureUV);
OutColor.rgba = SceneColorTexture.SampleLevel(SceneColorSampler, UV, 0).rgba;
}
```
## 在else代码段
在else代码段中添加新Pass禁用代码段。不添加则会在切换到其他View比如材质编辑器的Preview时崩溃。例如
```c++
if(bPostProcessingEnabled){
...
...
}else
{
PassSequence.SetEnabled(EPass::XXX, false);
PassSequence.SetEnabled(EPass::MotionBlur, false);
PassSequence.SetEnabled(EPass::Tonemap, true);
PassSequence.SetEnabled(EPass::FXAA, false);
PassSequence.SetEnabled(EPass::PostProcessMaterialAfterTonemapping, false);
PassSequence.SetEnabled(EPass::VisualizeDepthOfField, false);
PassSequence.SetEnabled(EPass::VisualizeLocalExposure, false);
PassSequence.Finalize();
...
}
```
##
PS
1. `Tonemap`之后的Pass因为超采样的关系使得ViewportResoution与BufferResoution不同所以需要使用`SHADER_PARAMETER(FScreenTransform, SvPositionToInputTextureUV)`以及`FScreenTransform::ChangeTextureBasisFromTo`计算变换比例。具体可以参考FFXAAPS。
1. ToneMapPass 传入FScreenPassTextureSlice传出FScreenPassTexture。如果关闭Tonemap则直接运行`SceneColor = FScreenPassTexture(SceneColorSlice);`。
2. `MotionBlur`~`Tonemap`
3. `MotionBlur`之前后处理材质BL_BeforeTonemapping之前传入FScreenPassTexture SceneColor进行绘制。
##
```c++
FPostProcessMaterialInputs PassInputs;
PassSequence.AcceptOverrideIfLastPass(EPass::Tonemap, PassInputs.OverrideOutput);
PassInputs.SetInput(EPostProcessMaterialInput::SceneColor, FScreenPassTexture::CopyFromSlice(GraphBuilder, SceneColorSlice));
```
创建`FScreenPassTextureSlice`:
```c++
FScreenPassTextureSlice SceneColorSlice = FScreenPassTextureSlice::CreateFromScreenPassTexture(GraphBuilder, SceneColor);//FScreenPassTexture SceneColor
```
将渲染结果转换成`FScreenPassTextureSlice`:
```c++
SceneColorSlice = FScreenPassTextureSlice::CreateFromScreenPassTexture(GraphBuilder, AddToonPostProcessBeforeTonemappingPass(GraphBuilder, View, PassInputs));
```
```c++
// Allows for the scene color to be the slice of an array between temporal upscaler and tonemaper.
FScreenPassTextureSlice SceneColorSlice = FScreenPassTextureSlice::CreateFromScreenPassTexture(GraphBuilder, SceneColor);
```
## Viewport => TextureUV
```c++
PassParameters->SvPositionToInputTextureUV = (
FScreenTransform::SvPositionToViewportUV(Output.ViewRect) *
FScreenTransform::ChangeTextureBasisFromTo(FScreenPassTextureViewport(Inputs.SceneColorSlice), FScreenTransform::ETextureBasis::ViewportUV, FScreenTransform::ETextureBasis::TextureUV));
```
在Shader中
```c++
Texture2D SceneColorTexture;
SamplerState SceneColorSampler;
FScreenTransform SvPositionToInputTextureUV;
void MainPS(
float4 SvPosition : SV_POSITION,
out float4 OutColor : SV_Target0)
{
float2 SceneColorUV = ApplyScreenTransform(SvPosition.xy, SvPositionToInputTextureUV);
OutColor.rgba = SceneColorTexture.SampleLevel(SceneColorSampler, UV, 0).rgba;
}
```

View File

@@ -0,0 +1,7 @@
---
title: Toon ToneMapping
date: 2025-01-19 21:02:19
excerpt:
tags:
rating: ⭐
---

View File

@@ -0,0 +1,378 @@
---
title: Untitled
date: 2024-09-26 18:41:24
excerpt:
tags:
rating: ⭐
---
# RenderBasePass()
传入RenderBasePass()的DepthStencil逻辑如下
```c++
const FExclusiveDepthStencil::Type BasePassDepthStencilAccess =
bAllowReadOnlyDepthBasePass
? FExclusiveDepthStencil::DepthRead_StencilWrite
: FExclusiveDepthStencil::DepthWrite_StencilWrite;
```
FDeferredShadingSceneRenderer::RenderBasePass() =>
FDeferredShadingSceneRenderer::RenderBasePassInternal() =>
FBasePassMeshProcessor::TryAddMeshBatch =>
## 大致流程
1. 创建MRT并绑定、取得深度缓存。
```c++
const FExclusiveDepthStencil ExclusiveDepthStencil(BasePassDepthStencilAccess);
TStaticArray<FTextureRenderTargetBinding, MaxSimultaneousRenderTargets> BasePassTextures;
uint32 BasePassTextureCount = SceneTextures.GetGBufferRenderTargets(BasePassTextures);
Strata::AppendStrataMRTs(*this, BasePassTextureCount, BasePassTextures);
TArrayView<FTextureRenderTargetBinding> BasePassTexturesView = MakeArrayView(BasePassTextures.GetData(), BasePassTextureCount);
FRDGTextureRef BasePassDepthTexture = SceneTextures.Depth.Target;
```
2. GBuffer Clear
```c++
GraphBuilder.AddPass(RDG_EVENT_NAME("GBufferClear"), PassParameters, ERDGPassFlags::Raster,
[PassParameters, ColorLoadAction, SceneColorClearValue](FRHICommandList& RHICmdList)
{
// If no fast-clear action was used, we need to do an MRT shader clear.
if (ColorLoadAction == ERenderTargetLoadAction::ENoAction)
{
const FRenderTargetBindingSlots& RenderTargets = PassParameters->RenderTargets;
FLinearColor ClearColors[MaxSimultaneousRenderTargets];
FRHITexture* Textures[MaxSimultaneousRenderTargets];
int32 TextureIndex = 0;
RenderTargets.Enumerate([&](const FRenderTargetBinding& RenderTarget)
{
FRHITexture* TextureRHI = RenderTarget.GetTexture()->GetRHI();
ClearColors[TextureIndex] = TextureIndex == 0 ? SceneColorClearValue : TextureRHI->GetClearColor();
Textures[TextureIndex] = TextureRHI;
++TextureIndex;
});
// Clear color only; depth-stencil is fast cleared.
DrawClearQuadMRT(RHICmdList, true, TextureIndex, ClearColors, false, 0, false, 0);
}
});
```
3. RenderTargetBindingSlots
```c++
// Render targets bindings should remain constant at this point.
FRenderTargetBindingSlots BasePassRenderTargets = GetRenderTargetBindings(ERenderTargetLoadAction::ELoad, BasePassTexturesView);
BasePassRenderTargets.DepthStencil = FDepthStencilBinding(BasePassDepthTexture, ERenderTargetLoadAction::ELoad, ERenderTargetLoadAction::ELoad, ExclusiveDepthStencil);
```
4. RenderBasePassInternal()
5. RenderAnisotropyPass()
# MeshDraw
## RenderBasePassInternal()
RenderNaniteBasePass()为一个Lambda最终调用**Nanite::DrawBasePass()** 渲染Nanite物体的BasePass。其他相关渲染代码如下
```c++
SCOPE_CYCLE_COUNTER(STAT_BasePassDrawTime);
RDG_EVENT_SCOPE(GraphBuilder, "BasePass");
RDG_GPU_STAT_SCOPE(GraphBuilder, Basepass);
const bool bDrawSceneViewsInOneNanitePass = Views.Num() > 1 && Nanite::ShouldDrawSceneViewsInOneNanitePass(Views[0]);
if (bParallelBasePass)//并行渲染模式
{
RDG_WAIT_FOR_TASKS_CONDITIONAL(GraphBuilder, IsBasePassWaitForTasksEnabled());
for (int32 ViewIndex = 0; ViewIndex < Views.Num(); ++ViewIndex)
{
FViewInfo& View = Views[ViewIndex];
RDG_GPU_MASK_SCOPE(GraphBuilder, View.GPUMask);
RDG_EVENT_SCOPE_CONDITIONAL(GraphBuilder, Views.Num() > 1, "View%d", ViewIndex);
View.BeginRenderView();
const bool bLumenGIEnabled = GetViewPipelineState(View).DiffuseIndirectMethod == EDiffuseIndirectMethod::Lumen;
FMeshPassProcessorRenderState DrawRenderState;
SetupBasePassState(BasePassDepthStencilAccess, ViewFamily.EngineShowFlags.ShaderComplexity, DrawRenderState);
FOpaqueBasePassParameters* PassParameters = GraphBuilder.AllocParameters<FOpaqueBasePassParameters>();
PassParameters->View = View.GetShaderParameters();
PassParameters->ReflectionCapture = View.ReflectionCaptureUniformBuffer;
PassParameters->BasePass = CreateOpaqueBasePassUniformBuffer(GraphBuilder, View, ViewIndex, ForwardBasePassTextures, DBufferTextures, bLumenGIEnabled);
PassParameters->RenderTargets = BasePassRenderTargets;
PassParameters->RenderTargets.ShadingRateTexture = GVRSImageManager.GetVariableRateShadingImage(GraphBuilder, View, FVariableRateShadingImageManager::EVRSPassType::BasePass);
const bool bShouldRenderView = View.ShouldRenderView();
if (bShouldRenderView)
{
View.ParallelMeshDrawCommandPasses[EMeshPass::BasePass].BuildRenderingCommands(GraphBuilder, Scene->GPUScene, PassParameters->InstanceCullingDrawParams);
GraphBuilder.AddPass(
RDG_EVENT_NAME("BasePassParallel"),
PassParameters,
ERDGPassFlags::Raster | ERDGPassFlags::SkipRenderPass,
[this, &View, PassParameters](const FRDGPass* InPass, FRHICommandListImmediate& RHICmdList)
{
FRDGParallelCommandListSet ParallelCommandListSet(InPass, RHICmdList, GET_STATID(STAT_CLP_BasePass), View, FParallelCommandListBindings(PassParameters));
View.ParallelMeshDrawCommandPasses[EMeshPass::BasePass].DispatchDraw(&ParallelCommandListSet, RHICmdList, &PassParameters->InstanceCullingDrawParams);
});
}
const bool bShouldRenderViewForNanite = bNaniteEnabled && (!bDrawSceneViewsInOneNanitePass || ViewIndex == 0); // when bDrawSceneViewsInOneNanitePass, the first view should cover all the other atlased ones
if (bShouldRenderViewForNanite)
{
// Should always have a full Z prepass with Nanite
check(ShouldRenderPrePass());
//渲染Nanite物体BasePass
RenderNaniteBasePass(View, ViewIndex);
}
//渲染编辑器相关的图元物体
RenderEditorPrimitives(GraphBuilder, PassParameters, View, DrawRenderState, InstanceCullingManager);
//渲染大气
if (bShouldRenderView && View.Family->EngineShowFlags.Atmosphere)
{
FOpaqueBasePassParameters* SkyPassPassParameters = GraphBuilder.AllocParameters<FOpaqueBasePassParameters>();
SkyPassPassParameters->BasePass = PassParameters->BasePass;
SkyPassPassParameters->RenderTargets = BasePassRenderTargets;
SkyPassPassParameters->View = View.GetShaderParameters();
SkyPassPassParameters->ReflectionCapture = View.ReflectionCaptureUniformBuffer;
View.ParallelMeshDrawCommandPasses[EMeshPass::SkyPass].BuildRenderingCommands(GraphBuilder, Scene->GPUScene, SkyPassPassParameters->InstanceCullingDrawParams);
GraphBuilder.AddPass(
RDG_EVENT_NAME("SkyPassParallel"),
SkyPassPassParameters,
ERDGPassFlags::Raster | ERDGPassFlags::SkipRenderPass,
[this, &View, SkyPassPassParameters](const FRDGPass* InPass, FRHICommandListImmediate& RHICmdList)
{
FRDGParallelCommandListSet ParallelCommandListSet(InPass, RHICmdList, GET_STATID(STAT_CLP_BasePass), View, FParallelCommandListBindings(SkyPassPassParameters));
View.ParallelMeshDrawCommandPasses[EMeshPass::SkyPass].DispatchDraw(&ParallelCommandListSet, RHICmdList, &SkyPassPassParameters->InstanceCullingDrawParams);
});
}
}
}
else
{
for (int32 ViewIndex = 0; ViewIndex < Views.Num(); ++ViewIndex)
{
FViewInfo& View = Views[ViewIndex];
RDG_GPU_MASK_SCOPE(GraphBuilder, View.GPUMask);
RDG_EVENT_SCOPE_CONDITIONAL(GraphBuilder, Views.Num() > 1, "View%d", ViewIndex);
View.BeginRenderView();
const bool bLumenGIEnabled = GetViewPipelineState(View).DiffuseIndirectMethod == EDiffuseIndirectMethod::Lumen;
FMeshPassProcessorRenderState DrawRenderState;
SetupBasePassState(BasePassDepthStencilAccess, ViewFamily.EngineShowFlags.ShaderComplexity, DrawRenderState);
FOpaqueBasePassParameters* PassParameters = GraphBuilder.AllocParameters<FOpaqueBasePassParameters>();
PassParameters->View = View.GetShaderParameters();
PassParameters->ReflectionCapture = View.ReflectionCaptureUniformBuffer;
PassParameters->BasePass = CreateOpaqueBasePassUniformBuffer(GraphBuilder, View, ViewIndex, ForwardBasePassTextures, DBufferTextures, bLumenGIEnabled);
PassParameters->RenderTargets = BasePassRenderTargets;
PassParameters->RenderTargets.ShadingRateTexture = GVRSImageManager.GetVariableRateShadingImage(GraphBuilder, View, FVariableRateShadingImageManager::EVRSPassType::BasePass);
const bool bShouldRenderView = View.ShouldRenderView();
if (bShouldRenderView)
{
View.ParallelMeshDrawCommandPasses[EMeshPass::BasePass].BuildRenderingCommands(GraphBuilder, Scene->GPUScene, PassParameters->InstanceCullingDrawParams);
GraphBuilder.AddPass(
RDG_EVENT_NAME("BasePass"),
PassParameters,
ERDGPassFlags::Raster,
[this, &View, PassParameters](FRHICommandList& RHICmdList)
{
SetStereoViewport(RHICmdList, View, 1.0f);
View.ParallelMeshDrawCommandPasses[EMeshPass::BasePass].DispatchDraw(nullptr, RHICmdList, &PassParameters->InstanceCullingDrawParams);
}
);
}
const bool bShouldRenderViewForNanite = bNaniteEnabled && (!bDrawSceneViewsInOneNanitePass || ViewIndex == 0); // when bDrawSceneViewsInOneNanitePass, the first view should cover all the other atlased ones
if (bShouldRenderViewForNanite)
{
// Should always have a full Z prepass with Nanite
check(ShouldRenderPrePass());
RenderNaniteBasePass(View, ViewIndex);
}
RenderEditorPrimitives(GraphBuilder, PassParameters, View, DrawRenderState, InstanceCullingManager);
if (bShouldRenderView && View.Family->EngineShowFlags.Atmosphere)
{
FOpaqueBasePassParameters* SkyPassParameters = GraphBuilder.AllocParameters<FOpaqueBasePassParameters>();
SkyPassParameters->BasePass = PassParameters->BasePass;
SkyPassParameters->RenderTargets = BasePassRenderTargets;
SkyPassParameters->View = View.GetShaderParameters();
SkyPassParameters->ReflectionCapture = View.ReflectionCaptureUniformBuffer;
View.ParallelMeshDrawCommandPasses[EMeshPass::SkyPass].BuildRenderingCommands(GraphBuilder, Scene->GPUScene, SkyPassParameters->InstanceCullingDrawParams);
GraphBuilder.AddPass(
RDG_EVENT_NAME("SkyPass"),
SkyPassParameters,
ERDGPassFlags::Raster,
[this, &View, SkyPassParameters](FRHICommandList& RHICmdList)
{
SetStereoViewport(RHICmdList, View, 1.0f);
View.ParallelMeshDrawCommandPasses[EMeshPass::SkyPass].DispatchDraw(nullptr, RHICmdList, &SkyPassParameters->InstanceCullingDrawParams);
}
);
}
}
}
```
### SetDepthStencilStateForBasePass()
```c++
void SetDepthStencilStateForBasePass(
FMeshPassProcessorRenderState& DrawRenderState,
ERHIFeatureLevel::Type FeatureLevel,
bool bDitheredLODTransition,
const FMaterial& MaterialResource,
bool bEnableReceiveDecalOutput,
bool bForceEnableStencilDitherState)
{
const bool bMaskedInEarlyPass = (MaterialResource.IsMasked() || bDitheredLODTransition) && MaskedInEarlyPass(GShaderPlatformForFeatureLevel[FeatureLevel]);
if (bEnableReceiveDecalOutput)
{
if (bMaskedInEarlyPass)
{
SetDepthStencilStateForBasePass_Internal<false, CF_Equal>(DrawRenderState, FeatureLevel);
}
else if (DrawRenderState.GetDepthStencilAccess() & FExclusiveDepthStencil::DepthWrite)
{
SetDepthStencilStateForBasePass_Internal<true, CF_GreaterEqual>(DrawRenderState, FeatureLevel);
}
else
{
SetDepthStencilStateForBasePass_Internal<false, CF_GreaterEqual>(DrawRenderState, FeatureLevel);
}
}
else if (bMaskedInEarlyPass)
{
DrawRenderState.SetDepthStencilState(TStaticDepthStencilState<false, CF_Equal>::GetRHI());
}
if (bForceEnableStencilDitherState)
{
SetDepthStencilStateForBasePass_Internal<false, CF_Equal>(DrawRenderState, FeatureLevel);
}
}
```
## AnisotropyPass
Anisotropy的RT设置
- RenderTargetSceneTextures.GBufferF。
- DepthStencilSceneTextures.Depth.Target。**ERenderTargetLoadAction::ELoad**、**FExclusiveDepthStencil::DepthRead_StencilNop**
### 管线状态
在FAnisotropyMeshProcessor::CollectPSOInitializers()中:
```c++
ETextureCreateFlags GBufferFCreateFlags;
EPixelFormat GBufferFPixelFormat = FSceneTextures::GetGBufferFFormatAndCreateFlags(GBufferFCreateFlags);
AddRenderTargetInfo(GBufferFPixelFormat, GBufferFCreateFlags, RenderTargetsInfo);
SetupDepthStencilInfo(PF_DepthStencil, SceneTexturesConfig.DepthCreateFlags, ERenderTargetLoadAction::ELoad,
ERenderTargetLoadAction::ELoad, FExclusiveDepthStencil::DepthRead_StencilNop, RenderTargetsInfo);
```
```c++
void SetupDepthStencilInfo(
EPixelFormat DepthStencilFormat,
ETextureCreateFlags DepthStencilCreateFlags,
ERenderTargetLoadAction DepthTargetLoadAction,
ERenderTargetLoadAction StencilTargetLoadAction,
FExclusiveDepthStencil DepthStencilAccess,
FGraphicsPipelineRenderTargetsInfo& RenderTargetsInfo)
{
// Setup depth stencil state
RenderTargetsInfo.DepthStencilTargetFormat = DepthStencilFormat;
RenderTargetsInfo.DepthStencilTargetFlag = DepthStencilCreateFlags;
RenderTargetsInfo.DepthTargetLoadAction = DepthTargetLoadAction;
RenderTargetsInfo.StencilTargetLoadAction = StencilTargetLoadAction;
RenderTargetsInfo.DepthStencilAccess = DepthStencilAccess;
const ERenderTargetStoreAction StoreAction = EnumHasAnyFlags(RenderTargetsInfo.DepthStencilTargetFlag, TexCreate_Memoryless) ? ERenderTargetStoreAction::ENoAction : ERenderTargetStoreAction::EStore;
RenderTargetsInfo.DepthTargetStoreAction = RenderTargetsInfo.DepthStencilAccess.IsUsingDepth() ? StoreAction : ERenderTargetStoreAction::ENoAction;
RenderTargetsInfo.StencilTargetStoreAction = RenderTargetsInfo.DepthStencilAccess.IsUsingStencil() ? StoreAction : ERenderTargetStoreAction::ENoAction;
}
```
### ParallelRendering
AnisotropyPass支持并行渲染并行渲染的判断逻辑为
```c++
const bool bEnableParallelBasePasses = GRHICommandList.UseParallelAlgorithms() && CVarParallelBasePass.GetValueOnRenderThread();
```
看得出判断条件是:
1. 显卡是否支持并行渲染。
2. CVarr.ParallelBasePass是否开启并行渲染。
从AnisotropyPass可以看得出并行渲染与一般渲染的差别在于
1. FRenderTargetBinding绑定时的ERenderTargetLoadAction不同**并行为ELoad****普通渲染为EClear**。
2. 调用AddPass添加了**ERDGPassFlags::SkipRenderPass**标记。
3. 并行渲染会在AddPass中构建**FRDGParallelCommandListSet ParallelCommandListSet**,并作为传入**DispatchDraw**普通渲染传递nullptr。
4. 普通渲染会额外调用**SetStereoViewport(RHICmdList, View);**本质是调用RHICmdList.SetViewport来设置View。
### Code
```c++
RDG_CSV_STAT_EXCLUSIVE_SCOPE(GraphBuilder, RenderAnisotropyPass);
SCOPED_NAMED_EVENT(FDeferredShadingSceneRenderer_RenderAnisotropyPass, FColor::Emerald);
SCOPE_CYCLE_COUNTER(STAT_AnisotropyPassDrawTime);
RDG_GPU_STAT_SCOPE(GraphBuilder, RenderAnisotropyPass);
for (int32 ViewIndex = 0; ViewIndex < Views.Num(); ViewIndex++)
{
FViewInfo& View = Views[ViewIndex];
if (View.ShouldRenderView())
{
FParallelMeshDrawCommandPass& ParallelMeshPass = View.ParallelMeshDrawCommandPasses[EMeshPass::AnisotropyPass];
if (!ParallelMeshPass.HasAnyDraw())
{
continue;
}
View.BeginRenderView();
auto* PassParameters = GraphBuilder.AllocParameters<FAnisotropyPassParameters>();
PassParameters->View = View.GetShaderParameters();
PassParameters->RenderTargets.DepthStencil = FDepthStencilBinding(SceneTextures.Depth.Target, ERenderTargetLoadAction::ELoad, FExclusiveDepthStencil::DepthRead_StencilNop);
ParallelMeshPass.BuildRenderingCommands(GraphBuilder, Scene->GPUScene, PassParameters->InstanceCullingDrawParams);
if (bDoParallelPass)
{
AddClearRenderTargetPass(GraphBuilder, SceneTextures.GBufferF);
PassParameters->RenderTargets[0] = FRenderTargetBinding(SceneTextures.GBufferF, ERenderTargetLoadAction::ELoad);
GraphBuilder.AddPass(
RDG_EVENT_NAME("AnisotropyPassParallel"),
PassParameters,
ERDGPassFlags::Raster | ERDGPassFlags::SkipRenderPass,
[this, &View, &ParallelMeshPass, PassParameters](const FRDGPass* InPass, FRHICommandListImmediate& RHICmdList)
{
FRDGParallelCommandListSet ParallelCommandListSet(InPass, RHICmdList, GET_STATID(STAT_CLP_AnisotropyPass), View, FParallelCommandListBindings(PassParameters));
ParallelMeshPass.DispatchDraw(&ParallelCommandListSet, RHICmdList, &PassParameters->InstanceCullingDrawParams);
});
}
else
{
PassParameters->RenderTargets[0] = FRenderTargetBinding(SceneTextures.GBufferF, ERenderTargetLoadAction::EClear);
GraphBuilder.AddPass(
RDG_EVENT_NAME("AnisotropyPass"),
PassParameters,
ERDGPassFlags::Raster,
[this, &View, &ParallelMeshPass, PassParameters](FRHICommandList& RHICmdList)
{
SetStereoViewport(RHICmdList, View);
ParallelMeshPass.DispatchDraw(nullptr, RHICmdList, &PassParameters->InstanceCullingDrawParams);
});
}
}
}
```

View File

@@ -0,0 +1,659 @@
---
title: GBuffer&Material&BasePass
date: 2023-12-08 17:34:58
excerpt:
tags:
rating: ⭐
---
# # GBuffer
目前UE5.3会调用
- WriteGBufferInfoAutogen()
- **EncodeGBufferToMRT()**
动态生成BasePassPixelShader.usf中的**EncodeGBufferToMRT()** 的代码并且会生成一个AutogenShaderHeaders.ush文件。其路径为
`Engine\Intermediate\ShaderAutogen\PCD3D_SM5`或者`Engine\Intermediate\ShaderAutogen\PCD3D_ES3_1`
1. ***给FGBufferData添加结构体数据时需要在此添加额外代码逻辑***
2. GBuffer精度在FetchLegacyGBufferInfo()设置。
3. 是否往GBuffer中写入Velocity主要靠这个宏**WRITES_VELOCITY_TO_GBUFFER**。具体决定其数值的逻辑位于**FShaderGlobalDefines FetchShaderGlobalDefines**。主要还是靠**r.VelocityOutputPass**进行开启。
1. PS. MSAA以及VR绝对不会开启Velocity输出选项。还有就是**r.Velocity.ForceOutput**但经过测试不开启r.VelocityOutputPass依然无法输出。以及FPrimitiveSceneProxy的bAlwaysHasVelocity与bHasWorldPositionOffsetVelocity。
2. 其他相关FSR、TSR
4. 如何添加GBuffer
1. https://zhuanlan.zhihu.com/p/568775542
2. https://zhuanlan.zhihu.com/p/677772284
## UE5 GBuffer内容
[[UE GBuffer存储数据]]
```c#
OutGBufferA(MRT1) = WorldNormal/PerObjectGBufferData (GBT_Float_16_16_16_16/GBT_Unorm_11_11_10/GBT_Unorm_8_8_8_8)
OutGBufferB(MRT2) = Metallic/Specular/Roughness/EncodeShadingModelIdAndSelectiveOutputMask (GBT_Float_16_16_16_16/GBT_Unorm_8_8_8_8)
OutGBufferC(MRT3) = BaseColor/GBufferAO (GBT_Unorm_8_8_8_8)
OutGBufferD = GBuffer.CustomData (GBT_Unorm_8_8_8_8)
OutGBufferE = GBuffer.PrecomputedShadowFactors (GBT_Unorm_8_8_8_8)
TargetVelocity / OutGBufferF = velocity / tangent (默认不开启 带有深度<开启Lumen与距离场 或者 开启光线追踪> GBC_Raw_Float_16_16_16_16 不带深度 GBC_Raw_Float_16_16)
TargetSeparatedMainDirLight = SingleLayerWater相关 (有SingleLayerWater才会开启 GBC_Raw_Float_11_11_10)
OutGBufferF = Anisotropy
// 0..1, 2 bits, use CastContactShadow(GBuffer) or HasDynamicIndirectShadowCasterRepresentation(GBuffer) to extract
half PerObjectGBufferData;
```
GBuffer相关信息精度、顺序可以参考FetchLegacyGBufferInfo()。
- 不存在Velocity与Tangent:
- OutGBufferD(MRT4)
- OutGBufferD(MRT5)
- TargetSeparatedMainDirLight(MRT6)
- 存在Velocity
- TargetVelocity(MRT4)
- OutGBufferD(MRT5)
- OutGBufferE(MRT6)
- TargetSeparatedMainDirLight(MRT7)
- 存在Tangent
- OutGBufferF(MRT4)
- OutGBufferD(MRT5)
- OutGBufferE(MRT6)
- TargetSeparatedMainDirLight(MRT7)
几个动态MRT的存在条件与Shader判断宏
- OutGBufferE(PrecomputedShadowFactors)r.AllowStaticLighting = 1
- GBUFFER_HAS_PRECSHADOWFACTOR
- WRITES_PRECSHADOWFACTOR_ZERO
- WRITES_PRECSHADOWFACTOR_TO_GBUFFER
- TargetVelocity(IsUsingBasePassVelocity(Platform) || Layout == GBL_ForceVelocity) ? 1 : 0;//r.VelocityOutputPass = 1
- r.VelocityOutputPass = 1时会对骨骼物体以及WPO材质物体输出速度。因为大概率会使用距离场阴影以及VSM所以会占用GBuffer Velocity所有通道。
- GBUFFER_HAS_VELOCITY
- WRITES_VELOCITY_TO_GBUFFER
- SingleLayerWater
- 默认不会写入GBuffer需要符合以下条件const bool bNeedsSeparateMainDirLightTexture = IsWaterDistanceFieldShadowEnabled(Parameters.Platform) || IsWaterVirtualShadowMapFilteringEnabled(Parameters.Platform);
- r.Water.SingleLayer.ShadersSupportDistanceFieldShadow = 1
- r.Water.SingleLayer.ShadersSupportVSMFiltering = 1
- const bool bIsSingleLayerWater = Parameters.MaterialParameters.ShadingModels.HasShadingModel(MSM_SingleLayerWater);
- Tangentfalse目前单独使用另一组MRT来存储。
- ~~GBUFFER_HAS_TANGENT~`
### ToonGBuffer修改&数据存储
```c#
OutGBufferA:PerObjectGBufferData => 可以存储额外的有关Tonn渲染功能参数。
OutGBufferB:Metallic/Specular/Roughness =>
? / SpcularPower(控制高光亮度与Mask) / ? / ?
//ToonHairMask OffsetShadowMask/SpcularMask/SpecularValue
OutGBufferC:GBufferAO =>
ToonAO
OutGBufferD:CustomData.xyzw =>
ShadowColor.rgb / NoLOffset //ShadowColor这里可以在Material里通过主光向量、ShadowStep、Shadow羽化计算多层阴影效果。
OutGBufferE:GBuffer.PrecomputedShadowFactors.xyzw =>
ToonDataID/ ToonOutlineDataID / OutlineMask(控制Outline绘制以及Outline强度) / ToonObjectID(判断是否是一个物体)
TargetVelocity / OutGBufferF = velocity / tangent //目前先不考虑输出Velocity的情况
? / ? / ? / ?
```
ToonDataID在材质编辑器中会存在SubsurfaceColor.a中ToonOutlineDataID在材质编辑器中会存在CustomData1引脚名为ToonBufferB考虑到Subsurface有一个CurvatureMap需要使用CustomData0所以这里使用了CustomData1
蓝色协议的方案
![[蓝色协议的方案#GBuffer]]
***额外添加相关宏逻辑位于ShaderCompiler.cpp***
- **GBUFFER_HAS_TOONDATA**
### 修改GBuffer格式
- [[#ShaderMaterialDerivedHelpers.cpp中的CalculateDerivedMaterialParameters()]]控制在BasePassPixelShader.usf中的MRT宏是否为true。
- [[#BasePassRendering.cpp中ModifyBasePassCSPSCompilationEnvironment()]]控制Velocity与SingleLayerWater相关的RT精度。
- [[#GBufferInfo.cpp中的FetchLegacyGBufferInfo()]]控制GBuffer精度以及数据打包情况。
#### BasePassRendering.cpp中ModifyBasePassCSPSCompilationEnvironment()
```c++
void ModifyBasePassCSPSCompilationEnvironment()
{
...
const bool bOutputVelocity = (GBufferLayout == GBL_ForceVelocity) ||
FVelocityRendering::BasePassCanOutputVelocity(Parameters.Platform);
if (bOutputVelocity)
{
// As defined in BasePassPixelShader.usf. Also account for Strata setting velocity in slot 1 as described in FetchLegacyGBufferInfo.
const int32 VelocityIndex = Strata::IsStrataEnabled() ? 1 : (IsForwardShadingEnabled(Parameters.Platform) ? 1 : 4);
OutEnvironment.SetRenderTargetOutputFormat(VelocityIndex, PF_G16R16);
}
...
const bool bNeedsSeparateMainDirLightTexture = IsWaterDistanceFieldShadowEnabled(Parameters.Platform) || IsWaterVirtualShadowMapFilteringEnabled(Parameters.Platform);
if (bIsSingleLayerWater && bNeedsSeparateMainDirLightTexture)
{
// See FShaderCompileUtilities::FetchGBufferParamsRuntime for the details
const bool bHasTangent = false;
static const auto CVar = IConsoleManager::Get().FindTConsoleVariableDataInt(TEXT("r.AllowStaticLighting"));
bool bHasPrecShadowFactor = (CVar ? (CVar->GetValueOnAnyThread() != 0) : 1);
uint32 TargetSeparatedMainDirLight = 5;
if (bOutputVelocity == false && bHasTangent == false)
{
TargetSeparatedMainDirLight = 5;
if (bHasPrecShadowFactor)
{
TargetSeparatedMainDirLight = 6;
}
}
else if (bOutputVelocity)
{
TargetSeparatedMainDirLight = 6;
if (bHasPrecShadowFactor)
{
TargetSeparatedMainDirLight = 7;
}
}
else if (bHasTangent)
{
TargetSeparatedMainDirLight = 6;
if (bHasPrecShadowFactor)
{
TargetSeparatedMainDirLight = 7;
}
}
OutEnvironment.SetRenderTargetOutputFormat(TargetSeparatedMainDirLight, PF_FloatR11G11B10);
...
}
```
#### GBufferInfo.cpp中的FetchLegacyGBufferInfo()
控制GBuffer精度以及数据打包情况。
#### ShaderMaterialDerivedHelpers.cpp中的CalculateDerivedMaterialParameters()
```c++
else if (Mat.IS_BASE_PASS)
{
Dst.PIXELSHADEROUTPUT_BASEPASS = 1;
if (Dst.USES_GBUFFER)
{
Dst.PIXELSHADEROUTPUT_MRT0 = (!SrcGlobal.SELECTIVE_BASEPASS_OUTPUTS || Dst.NEEDS_BASEPASS_VERTEX_FOGGING || Mat.USES_EMISSIVE_COLOR || SrcGlobal.ALLOW_STATIC_LIGHTING || Mat.MATERIAL_SHADINGMODEL_SINGLELAYERWATER);
Dst.PIXELSHADEROUTPUT_MRT1 = ((!SrcGlobal.SELECTIVE_BASEPASS_OUTPUTS || !Mat.MATERIAL_SHADINGMODEL_UNLIT));
Dst.PIXELSHADEROUTPUT_MRT2 = ((!SrcGlobal.SELECTIVE_BASEPASS_OUTPUTS || !Mat.MATERIAL_SHADINGMODEL_UNLIT));
Dst.PIXELSHADEROUTPUT_MRT3 = ((!SrcGlobal.SELECTIVE_BASEPASS_OUTPUTS || !Mat.MATERIAL_SHADINGMODEL_UNLIT));
if (SrcGlobal.GBUFFER_HAS_VELOCITY || SrcGlobal.GBUFFER_HAS_TANGENT)
{
Dst.PIXELSHADEROUTPUT_MRT4 = Dst.WRITES_VELOCITY_TO_GBUFFER || SrcGlobal.GBUFFER_HAS_TANGENT;
Dst.PIXELSHADEROUTPUT_MRT5 = (!SrcGlobal.SELECTIVE_BASEPASS_OUTPUTS || Dst.WRITES_CUSTOMDATA_TO_GBUFFER);
Dst.PIXELSHADEROUTPUT_MRT6 = (Dst.GBUFFER_HAS_PRECSHADOWFACTOR && (!SrcGlobal.SELECTIVE_BASEPASS_OUTPUTS || (Dst.WRITES_PRECSHADOWFACTOR_TO_GBUFFER && !Mat.MATERIAL_SHADINGMODEL_UNLIT)));
}
else
{
Dst.PIXELSHADEROUTPUT_MRT4 = (!SrcGlobal.SELECTIVE_BASEPASS_OUTPUTS || Dst.WRITES_CUSTOMDATA_TO_GBUFFER);
Dst.PIXELSHADEROUTPUT_MRT5 = (Dst.GBUFFER_HAS_PRECSHADOWFACTOR && (!SrcGlobal.SELECTIVE_BASEPASS_OUTPUTS || (Dst.WRITES_PRECSHADOWFACTOR_TO_GBUFFER && !Mat.MATERIAL_SHADINGMODEL_UNLIT)));
}
}
else
{
Dst.PIXELSHADEROUTPUT_MRT0 = true;
// we also need MRT for thin translucency due to dual blending if we are not on the fallback path
Dst.PIXELSHADEROUTPUT_MRT1 = (Dst.WRITES_VELOCITY_TO_GBUFFER || (Mat.DUAL_SOURCE_COLOR_BLENDING_ENABLED && Dst.MATERIAL_WORKS_WITH_DUAL_SOURCE_COLOR_BLENDING));
}
}
}
```
位于FShaderCompileUtilities::ApplyDerivedDefines()新版本逻辑遍历数据由GBufferInfo.cpp中的FetchLegacyGBufferInfo()处理。
```c++
#if 1
static bool bTestNewVersion = true;
if (bTestNewVersion)
{
//if (DerivedDefines.USES_GBUFFER)
{
for (int32 Iter = 0; Iter < FGBufferInfo::MaxTargets; Iter++)
{
if (bTargetUsage[Iter])
{
FString TargetName = FString::Printf(TEXT("PIXELSHADEROUTPUT_MRT%d"), Iter);
OutEnvironment.SetDefine(TargetName.GetCharArray().GetData(), TEXT("1"));
}
}
}
}
else
{
// This uses the legacy logic from CalculateDerivedMaterialParameters(); Just keeping it around momentarily for testing during the transition.
SET_COMPILE_BOOL_IF_TRUE(PIXELSHADEROUTPUT_MRT0)
SET_COMPILE_BOOL_IF_TRUE(PIXELSHADEROUTPUT_MRT1)
SET_COMPILE_BOOL_IF_TRUE(PIXELSHADEROUTPUT_MRT2)
SET_COMPILE_BOOL_IF_TRUE(PIXELSHADEROUTPUT_MRT3)
SET_COMPILE_BOOL_IF_TRUE(PIXELSHADEROUTPUT_MRT4)
SET_COMPILE_BOOL_IF_TRUE(PIXELSHADEROUTPUT_MRT5)
SET_COMPILE_BOOL_IF_TRUE(PIXELSHADEROUTPUT_MRT6)
}
#endif
```
### MaterialTemplate.ush
MaterialTemplate.ush中定义许多模版函数里面的具体内容会在HLSLMaterialTranslator.h中的**GetMaterialShaderCode()** 中添加。最后这些函数会在BassPassPixelShader.usf中调用。
bool bEnableExecutionFlow的作用为是否使用新的材质HLSL生成器默认为0。
```c++
static TAutoConsoleVariable<int32> CVarMaterialEnableNewHLSLGenerator(
TEXT("r.MaterialEnableNewHLSLGenerator"),
0,
TEXT("Enables the new (WIP) material HLSL generator.\n")
TEXT("0 - Don't allow\n")
TEXT("1 - Allow if enabled by material\n")
TEXT("2 - Force all materials to use new generator\n"),
ECVF_RenderThreadSafe | ECVF_ReadOnly);
```
这个和新版材质HLSL生成器有关相关生成代码为**MaterialEmitHLSL()**=>调用**GenerateMaterialTemplateHLSL()**
bCompileForComputeShader = Material->IsLightFunction();
GetPerInstanceCustomDataX分为Vertex与Pixel版本。
#### FMaterialAttributes
MaterialTemplate.ush有一处`/** Material declarations */`之后会生成对应FMaterialAttributes结构体可以在材质编辑器的HLSL中查看生成结果。这与
- MaterialAttributeDefinitionMap.cppFMaterialAttributeDefinitionMap::InitializeAttributeMap()中定义属性。
- HLSLMaterialTranslator.cppGetMaterialShaderCode()中的`for (const FGuid& AttributeID : OrderedVisibleAttributes)`:生成对应属性结构体以及属性获取函数。
#### DerivativeAutogen.GenerateUsedFunctions()
```c++
{
FString DerivativeHelpers = DerivativeAutogen.GenerateUsedFunctions(*this);
FString DerivativeHelpersAndResources = DerivativeHelpers + ResourcesString;
//LazyPrintf.PushParam(*ResourcesString);
LazyPrintf.PushParam(*DerivativeHelpersAndResources);
}
```
#### GetMaterialEmissiveForCS()以及其他函数
```c++
if (bCompileForComputeShader)
{
LazyPrintf.PushParam(*GenerateFunctionCode(CompiledMP_EmissiveColorCS, BaseDerivativeVariation));
}
else
{
LazyPrintf.PushParam(TEXT("return 0"));
}
{
FLinearColor Extinction = Material->GetTranslucentMultipleScatteringExtinction();
LazyPrintf.PushParam(*FString::Printf(TEXT("return MaterialFloat3(%.5f, %.5f, %.5f)"), Extinction.R, Extinction.G, Extinction.B));
}
LazyPrintf.PushParam(*FString::Printf(TEXT("return %.5f"), Material->GetOpacityMaskClipValue()));
{
const FDisplacementScaling DisplacementScaling = Material->GetDisplacementScaling();
LazyPrintf.PushParam(*FString::Printf(TEXT("return %.5f"), FMath::Max(0.0f, DisplacementScaling.Magnitude)));
LazyPrintf.PushParam(*FString::Printf(TEXT("return %.5f"), FMath::Clamp(DisplacementScaling.Center, 0.0f, 1.0f)));
}
LazyPrintf.PushParam(!bEnableExecutionFlow ? *GenerateFunctionCode(MP_WorldPositionOffset, BaseDerivativeVariation) : TEXT("return Parameters.MaterialAttributes.WorldPositionOffset"));
LazyPrintf.PushParam(!bEnableExecutionFlow ? *GenerateFunctionCode(CompiledMP_PrevWorldPositionOffset, BaseDerivativeVariation) : TEXT("return 0.0f"));
LazyPrintf.PushParam(!bEnableExecutionFlow ? *GenerateFunctionCode(MP_CustomData0, BaseDerivativeVariation) : TEXT("return 0.0f"));
LazyPrintf.PushParam(!bEnableExecutionFlow ? *GenerateFunctionCode(MP_CustomData1, BaseDerivativeVariation) : TEXT("return 0.0f"));
```
%.5f表示按浮点数输出小数点后面取5位其余的舍弃例如5/2 “%.5f”输出为2.50000
#### MaterialCustomizedUVs & CustomInterpolators
- `for (uint32 CustomUVIndex = 0; CustomUVIndex < NumUserTexCoords; CustomUVIndex++)`
- `for (UMaterialExpressionVertexInterpolator* Interpolator : CustomVertexInterpolators`
### 添加ToonDataAssetID 与 ToonOutlineDataAssetID笔记
1. FMaterialRenderProxy::UpdateDeferredCachedUniformExpressions()
2. FMaterialRenderProxy::EvaluateUniformExpressions()
3. FUniformExpressionSet::FillUniformBuffer()
4. EvaluatePreshader()
5. EvaluateParameter()
6. Context.MaterialRenderProxy->GetParameterValue()
可以看得出关键数据在UniformExpressionSet中这里的ParameterIndex则通过`EvaluateParameter(Stack, UniformExpressionSet, ReadPreshaderValue<uint16>(Data), Context);`进行计算。
```c++
const FMaterialNumericParameterInfo& Parameter = UniformExpressionSet->GetNumericParameter(ParameterIndex);
bool bFoundParameter = false;
// First allow proxy the chance to override parameter
if (Context.MaterialRenderProxy)
{
FMaterialParameterValue ParameterValue;
if (Context.MaterialRenderProxy->GetParameterValue(Parameter.ParameterType, Parameter.ParameterInfo, ParameterValue, Context))
{
Stack.PushValue(ParameterValue.AsShaderValue());
bFoundParameter = true;
}
}
bool FMaterialInstanceResource::GetParameterValue(EMaterialParameterType Type, const FHashedMaterialParameterInfo& ParameterInfo, FMaterialParameterValue& OutValue, const FMaterialRenderContext& Context) const
{
checkSlow(IsInParallelRenderingThread());
bool bResult = false;
// Check for hard-coded parameters
if (Type == EMaterialParameterType::Scalar && ParameterInfo.Name == GetSubsurfaceProfileParameterName())
{
check(ParameterInfo.Association == EMaterialParameterAssociation::GlobalParameter);
const USubsurfaceProfile* MySubsurfaceProfileRT = GetSubsurfaceProfileRT();
OutValue = GetSubsurfaceProfileId(MySubsurfaceProfileRT);
bResult = true;
}
else if (Type == EMaterialParameterType::Scalar && NumSpecularProfileRT() > 0)
{
for (uint32 It=0,Count=NumSpecularProfileRT();It<Count;++It)
{
if (ParameterInfo.Name == SpecularProfileAtlas::GetSpecularProfileParameterName(GetSpecularProfileRT(It)))
{
check(ParameterInfo.Association == EMaterialParameterAssociation::GlobalParameter);
OutValue = SpecularProfileAtlas::GetSpecularProfileId(GetSpecularProfileRT(It));
bResult = true;
break;
}
}
}
```
### BasePass EncodeGBufferToMRT/DecodeGBufferDataDirect逻辑笔记
主要逻辑位于FShaderCompileUtilities::WriteGBufferInfoAutogen():
```c++
void FShaderCompileUtilities::WriteGBufferInfoAutogen(EShaderPlatform TargetPlatform, ERHIFeatureLevel::Type FeatureLevel = ERHIFeatureLevel::SM5)
{
FGBufferParams DefaultParams = FetchGBufferParamsPipeline(TargetPlatform, GBL_Default);
FScopeLock MapLock(&GCriticalSection);
// For now, the logic always calculates the new GBuffer, and if it's the first time, write it, otherwise check it hasn't changed. We are doing this for
// debugging, and in the near future it will only calculate the GBuffer on the first time only.
FGBufferInfo DefaultBufferInfo = FetchFullGBufferInfo(DefaultParams);
FString AutoGenDirectory = GetAutoGenDirectory(TargetPlatform);
FString AutogenHeaderFilename = AutoGenDirectory / TEXT("AutogenShaderHeaders.ush");
FString AutogenHeaderFilenameTemp = AutoGenDirectory / TEXT("AutogenShaderHeaders_temp.ush");
if (GLastGBufferIsValid[TargetPlatform])
{
const bool bSame = IsGBufferInfoEqual(GLastGBufferInfo[TargetPlatform], DefaultBufferInfo);//判断GBufferInfo是否相同不同则触发断言
check(bSame);
}
else
{
GLastGBufferIsValid[TargetPlatform] = true;
// should cache this properly, and serialize it, but this is a temporary fix.
GLastGBufferInfo[TargetPlatform] = DefaultBufferInfo;
FString OutputFileData;
OutputFileData += TEXT("// Copyright Epic Games, Inc. All Rights Reserved.\n");
OutputFileData += TEXT("\n");
OutputFileData += TEXT("#pragma once\n");
OutputFileData += TEXT("\n");
OutputFileData += TEXT("#if FEATURE_LEVEL >= FEATURE_LEVEL_SM5\n");
OutputFileData += TEXT("float SampleDeviceZFromSceneTexturesTempCopy(float2 UV)\n");
OutputFileData += TEXT("{\n");
OutputFileData += TEXT("\treturn SceneDepthTexture.SampleLevel(SceneDepthTextureSampler, UV, 0).r;\n");
OutputFileData += TEXT("}\n");
OutputFileData += TEXT("#endif\n");
OutputFileData += TEXT("\n");
OutputFileData += TEXT("#ifndef GBUFFER_LAYOUT\n");
OutputFileData += TEXT("#define GBUFFER_LAYOUT 0\n");
OutputFileData += TEXT("#endif\n");
OutputFileData += TEXT("\n");
for (uint32 Layout = 0; Layout < GBL_Num; ++Layout)
{
FGBufferParams Params = FetchGBufferParamsPipeline(TargetPlatform, (EGBufferLayout)Layout);
FGBufferInfo BufferInfo = FetchFullGBufferInfo(Params);
OutputFileData.Appendf(TEXT("#if GBUFFER_LAYOUT == %u\n\n"), Layout);
OutputFileData += CreateGBufferEncodeFunction(BufferInfo);
OutputFileData += TEXT("\n");
OutputFileData += CreateGBufferDecodeFunctionDirect(BufferInfo);
OutputFileData += TEXT("\n");
//OutputFileData += TEXT("#if SHADING_PATH_DEFERRED\n");
OutputFileData += TEXT("#if FEATURE_LEVEL >= FEATURE_LEVEL_SM5\n");
OutputFileData += TEXT("\n");
OutputFileData += CreateGBufferDecodeFunctionVariation(BufferInfo, EGBufferDecodeType::CoordUV, FeatureLevel);
OutputFileData += TEXT("\n");
OutputFileData += CreateGBufferDecodeFunctionVariation(BufferInfo, EGBufferDecodeType::CoordUInt, FeatureLevel);
OutputFileData += TEXT("\n");
OutputFileData += CreateGBufferDecodeFunctionVariation(BufferInfo, EGBufferDecodeType::SceneTextures, FeatureLevel);
OutputFileData += TEXT("\n");
OutputFileData += CreateGBufferDecodeFunctionVariation(BufferInfo, EGBufferDecodeType::SceneTexturesLoad, FeatureLevel);
OutputFileData += TEXT("\n");
OutputFileData += TEXT("#endif\n");
OutputFileData += TEXT("\n");
OutputFileData += TEXT("#endif\n");
OutputFileData += TEXT("\n");
}
...
}
```
写入内容与这2句获取的FGbufferInfo有关`FGBufferParams Params = FetchGBufferParamsPipeline(TargetPlatform, (EGBufferLayout)Layout);`和`FGBufferInfo BufferInfo = FetchFullGBufferInfo(Params);`
![[ShaderGenerationUtil_CreateGBufferEncodeFunction.png|1200]]
## 是否需要Toon
在材质中:
```c++
FMaterialRelevance UMaterialInterface::GetRelevance_Internal(const UMaterial* Material, ERHIFeatureLevel::Type InFeatureLevel) const
{
if(Material)
{
//YivanLee's Modify 这里仅仅针对人物因为它决定了是否开启ToonGBuffer但是对于ToonLevelToonFoliageToonGrass这里并不需要开启
bool bUseToonData = MaterialResource->GetShadingModels().HasAnyShadingModel({ MSM_ToonStandard, MSM_ToonSkin, MSM_ToonHair, MSM_ToonFace, MSM_ToonEyeBrow });
}
···
MaterialRelevance.bUsesToonData = bUseToonData;
···
}
```
在渲染管线中:
```c++
//RenderUtils.cpp
bool IsUsingToonRendering(const FStaticShaderPlatform Platform)
{
    static FShaderPlatformCachedIniValue<int32> PerPlatformCVar(TEXT("r.ToonRendering.Enable"));
    if (IsMobilePlatform(Platform) || IsForwardShadingEnabled(Platform))//目前不考虑VR与移动端
    {
        return false;
    }
    else
    {
        return (PerPlatformCVar.Get(Platform) == 1);
    }
}
bool IsUsingToonOutline(const FStaticShaderPlatform Platform)
{
    static FShaderPlatformCachedIniValue<int32> PerPlatformCVar(TEXT("r.ToonRendering.ToonOutline"));
    return (PerPlatformCVar.Get(Platform) == 1) && IsUsingToonRendering(Platform);
}
bool IsUsingToonRimLighting(const FStaticShaderPlatform Platform)
{
    static FShaderPlatformCachedIniValue<int32> PerPlatformCVar(TEXT("r.ToonRendering.ToonRimLighting"));
    return (PerPlatformCVar.Get(Platform) == 1) && IsUsingToonRendering(Platform);
}
```
李兄的ToonBuffer判断逻辑
```c++
bool FDeferredShadingSceneRenderer::ShouldRenderToonDataPass() const
{
if (!SupportsToonDataMaterials(FeatureLevel, ShaderPlatform))
{
return false;
}
if (IsForwardShadingEnabled(GetFeatureLevelShaderPlatform(FeatureLevel)))
{
return false;
}
for (auto& View : Views)
{
if (View.ShouldRenderView() && View.ParallelMeshDrawCommandPasses[EMeshPass::ToonDataPass].HasAnyDraw())
{
return true;
}
}
return false;
}
```
## Toon PerObjectGBufferData具体功能表
从3开始0、1、2已被占用。
- ?
## ToonBufferData
- ToonObjectID
```c++
struct FSceneDataIntermediates
{
uint PrimitiveId;
uint InstanceId;
uint ViewIndex;
uint CullingFlags;
// Index from which we load the instance info, needed for the
uint InstanceIdLoadIndex;
FInstanceSceneData InstanceData;
FPrimitiveSceneData Primitive;
};
struct FVertexFactoryIntermediatesCommon
{
/** Cached primitive and instance data */
FSceneDataIntermediates SceneData;
#if USE_INSTANCING || USE_INSTANCE_CULLING
FVertexFactoryInstanceInput InstanceInput;
#endif
#if USE_SPLINEDEFORM
FSplineMeshShaderParams SplineMeshParams;
#endif
};
FPrimitiveSceneData GetPrimitiveData(FVertexFactoryIntermediatesCommon Intermediates)
{
return Intermediates.SceneData.Primitive;
}
```
## 高光
- PBR高光使用Roughness控制是否可行是否需要传入GBuffer一个Mask贴图
- 自定义高光:高光贴图、高光颜色、参数化高光形状、多层高光
# BasePassPixelShader
Velocity相关代码段
```c++
#if USES_GBUFFER
// -0.5 .. 0.5, could be optimzed as lower quality noise would be sufficient
float QuantizationBias = PseudoRandom( MaterialParameters.SvPosition.xy ) - 0.5f;
GBuffer.IndirectIrradiance = IndirectIrradiance;
// this is the new encode, the older encode is the #else, keeping it around briefly until the new version is confirmed stable.
#if 1
{
// change this so that we can pack everything into the gbuffer, but leave this for now
#if GBUFFER_HAS_DIFFUSE_SAMPLE_OCCLUSION
GBuffer.GenericAO = float(GBuffer.DiffuseIndirectSampleOcclusion) * (1.0f / 255.0f);
#elif ALLOW_STATIC_LIGHTING
// No space for AO. Multiply IndirectIrradiance by AO instead of storing.
GBuffer.GenericAO = EncodeIndirectIrradiance(GBuffer.IndirectIrradiance * GBuffer.GBufferAO) + QuantizationBias * (1.0 / 255.0); // Stationary sky light path
#else
GBuffer.GenericAO = GBuffer.GBufferAO; // Movable sky light path
#endif
EncodeGBufferToMRT(Out, GBuffer, QuantizationBias);
if (GBuffer.ShadingModelID == SHADINGMODELID_UNLIT && !STRATA_ENABLED) // Do not touch what strata outputs
{
Out.MRT[1] = 0;
SetGBufferForUnlit(Out.MRT[2]);
Out.MRT[3] = 0;
Out.MRT[GBUFFER_HAS_VELOCITY ? 5 : 4] = 0;
Out.MRT[GBUFFER_HAS_VELOCITY ? 6 : 5] = 0;
}
#if SINGLE_LAYER_WATER_SEPARATED_MAIN_LIGHT
// In deferred, we always output the directional light in a separated buffer.
// This is used to apply distance field shadows or light function to the main directional light.
// Strata also writes it through MRT because this is faster than through UAV.
#if STRATA_ENABLED && STRATA_INLINE_SINGLELAYERWATER
Out.MRT[(GBUFFER_HAS_VELOCITY ? 2 : 1) + (GBUFFER_HAS_PRECSHADOWFACTOR ? 1 : 0)] = float4(SeparatedWaterMainDirLightLuminance * View.PreExposure, 1.0f);
#else
if (GBuffer.ShadingModelID == SHADINGMODELID_SINGLELAYERWATER)
{
Out.MRT[(GBUFFER_HAS_VELOCITY ? 6 : 5) + (GBUFFER_HAS_PRECSHADOWFACTOR ? 1 : 0)] = float4(SeparatedWaterMainDirLightLuminance * View.PreExposure, 1.0f);
}
#endif
#endif
}
```
# 顶点色
## 蓝色协议
用于存储一些低精度数据,插值即可
- 顶点色:
- R:阴影区域控制(强度) 0~1
- G:描边宽度
- B:ToonAO
- 第二套顶点色UV Channel1
- R:深度偏移
- G:用来区分内轮廓不同部位的ID
蓝色协议的R:阴影区域标记 与 B:AO而罪恶装备使用贴图来传递信息。
## 罪恶装备
对阴影判断阈值的偏移。见前面着色部分顶点AO+手绘修正)
R:阴影偏移
G:轮廓线根据与相机的距离扩大多少的系数
B:等高线 Z 轴偏移值
# 罪恶装备
![](https://pic2.zhimg.com/80/v2-56012886fafbaf36932f03b0ad65a165_720w.jpg)8,G为阴影控AOR为高光强度参数金属和光滑材质的部分设置的更大一些。B通道用于照明控制。最大值为高光反之值越小高光越淡。![](https://pic4.zhimg.com/80/v2-748ebbdd4da3efe74054c8215be8b023_720w.jpg)
![](https://pic2.zhimg.com/80/v2-74e1a9fba264af2b18e66616d9f86831_720w.jpg)
https://zhuanlan.zhihu.com/p/360229590一文中介绍了崩坏3与原神的计算方式
崩坏3的LightMap计算方式
```c++
half4 baseColor = SAMPLE_TEXTURE2D(_BaseMap, sampler_BaseMap, input.uv.xy);
half4 LightMapColor = SAMPLE_TEXTURE2D(_LightMap, sampler_LightMap, input.uv.xy);
half3 ShadowColor = baseColor.rgb * _ShadowMultColor.rgb;
half3 DarkShadowColor = baseColor.rgb * _DarkShadowMultColor.rgb;
//如果SFactor = 0,ShallowShadowColor为一级阴影色,否则为BaseColor。
float SWeight = (LightMapColor.g * input.color.r + input.lambert) * 0.5 + 1.125;
float SFactor = floor(SWeight - _ShadowArea);
half3 ShallowShadowColor = SFactor * baseColor.rgb + (1 - SFactor) * ShadowColor.rgb;
```
二级阴影计算:
```c++
//如果SFactor = 0,DarkShadowColor为二级阴影色,否则为一级阴影色。
SFactor = floor(SWeight - _DarkShadowArea);
DarkShadowColor = SFactor * (_FixDarkShadow * ShadowColor + (1 - _FixDarkShadow) * ShallowShadowColor) + (1 - SFactor) * DarkShadowColor;
// 平滑阴影边缘
half rampS = smoothstep(0, _ShadowSmooth, input.lambert - _ShadowArea);
half rampDS = smoothstep(0, _DarkShadowSmooth, input.lambert - _DarkShadowArea);
ShallowShadowColor.rgb = lerp(ShadowColor, baseColor.rgb, rampS);
DarkShadowColor.rgb = lerp(DarkShadowColor.rgb, ShadowColor, rampDS);
//如果SFactor = 0,FinalColor为二级阴影否则为一级阴影。
SFactor = floor(LightMapColor.g * input.color.r + 0.9f);
half4 FinalColor;
FinalColor.rgb = SFactor * ShallowShadowColor + (1 - SFactor) * DarkShadowColor;
```
**罪恶装备**
对阴影判断阈值的偏移。见前面着色部分顶点AO+手绘修正)
G : 轮廓线根据与相机的距离扩大多少的系数
B : 等高线 Z 轴偏移值
A : 轮廓厚度系数。0.5为标准1为最大厚度0为无等高线
# 蓝色协议
[[蓝色协议的方案]]
# 米哈游

View File

@@ -0,0 +1,11 @@
---
title: Untitled
date: 2025-01-15 16:33:08
excerpt:
tags:
rating: ⭐
---
在渲染线程使用UObject会导致崩溃所以使用将部分参数传递到MaterialRenderProxy的方式来规避。但这样还需要解决UToonDataAsset不会触发Material刷新MaterialRenderProxy的问题。
# 思路
1. UMaterialInstance::PreSave()

View File

@@ -0,0 +1,114 @@
---
title: ToonShaderModel
date: 2023-12-18 10:00:34
excerpt:
tags:
rating: ⭐
---
# ToonStandardCel打底
## Diffuse
```c++
//Lighting.Diffuse *= AreaLight.FalloffColor * (Falloff * HalfLambert);
//TODO:添加阴影过渡效果。
//ToonShadow添加类型1.PBR(只能调整过渡颜色) Lighting.Diffuse = DiffuseColor * (1 / PI) * AreaLight.FalloffColor * (Falloff * HalfLambert); 2.ShadowColorTexture兼容RampTexture效果也就是原神的效果还需要添加ShadowColorIntensity后需要改成曝光相关额东西。Lighting.Diffuse = DiffuseColor * (1 / PI);
//ToonLighting添加类型1.PBR多光源模式2.主光有效针对cel。
//---------------------------------------------------------
//Specular
//ToonSpecular添加类型1.PBR 2. 各项异性defaultLit3. 各项异性头发 4. 自定义高光大小与过渡效果 https://zhuanlan.zhihu.com/p/361918341 5.自定义高光形状贴图 https://zhuanlan.zhihu.com/p/640258070 https://github.com/AnCG7/URPShaderCodeSample/blob/891034b3fa6e838e2b231390755479f0f649f181/Assets/Shaders/NPR/Cartoon/Stylized%20Highlight%20(Transform).shader#L2
```
**ToonShadow**
再实现阴影偏移以及阴影羽化效果的基础上。
1. PBR兼容UE默认阴影效果。只能调节阴影过渡颜色。使用的公式为 `Lighting.Diffuse = DiffuseColor * (1 / PI) * AreaLight.FalloffColor * (Falloff * HalfLambert);`
2. ShadowColorTexture用于定义ShadowColor使用贴图指定ShadowColor
**ToonLighting**
1. PBRUE默认光照多光源模式。
2. 仅主光有效针对cel。
## Specular
**ToonSpecular**
1. PBRUE默认高光效果。
2. 各项异性defaultLitUE默认的各向异性高光效果。
3. 各项异性头发 **Kajiya-Kay**高光模型。
4. 自定义高光大小与过渡效果https://zhuanlan.zhihu.com/p/361918341
5. 自定义高光形状贴图https://zhuanlan.zhihu.com/p/640258070 https://github.com/AnCG7/URPShaderCodeSample/blob/891034b3fa6e838e2b231390755479f0f649f181/Assets/Shaders/NPR/Cartoon/Stylized%20Highlight%20(Transform).shader#L2
## 罪恶装备渲染效果还原
- 原始演讲视频:https://www.bilibili.com/video/BV1Ps411C7mw/?share_source=copy_web&vd_source=fe8142e8e12816535feaeabd6f6cdc8e
- 原始演讲PPT:https://www.ggxrd.com/Motomura_Junya_GuiltyGearXrd.pdf
- [【翻译】西川善司「实验做出的游戏图形」「GUILTY GEAR Xrd -SIGN-」中实现的「纯卡通动画的实时3D图形」的秘密前篇1](https://www.cnblogs.com/TracePlus/p/4205798.html "发布于 2015-01-06 12:56")
- [【翻译】西川善司「实验做出的游戏图形」「GUILTY GEAR Xrd -SIGN-」中实现的「纯卡通动画的实时3D图形」的秘密前篇2](https://www.cnblogs.com/TracePlus/p/4205834.html "发布于 2015-01-06 13:23")
- [【翻译】西川善司的「实验做出的游戏图形」「GUILTY GEAR Xrd -SIGN-」中实现的「纯卡通动画的实时3D图形」的秘密后篇](https://www.cnblogs.com/TracePlus/p/4205978.html "发布于 2015-01-06 14:23")
- 知乎文章
1. [x] ***https://zhuanlan.zhihu.com/p/631214546
2. [ ] https://zhuanlan.zhihu.com/p/436850004
3. https://zhuanlan.zhihu.com/p/513598386
4. https://zhuanlan.zhihu.com/p/493802718
5. [x] https://zhuanlan.zhihu.com/p/513228315
1. 其他人的Demo视频角色胜利动画。
1. [ ] [Guraaaa的技术美术作品集](https://www.bilibili.com/video/BV1xx4y1T7Er/?share_source=copy_web&vd_source=fe8142e8e12816535feaeabd6f6cdc8e&t=11)
### 贴图
- base基础色
- [x] rgb亮面颜色
- [x] **a**basecolor的A通道用于区分脸、身体区域和头发区域。BGT会兜帽内侧与脸都是黑色、头发是灰色MAY头发是灰色身体与头是黑色RAM、JKO身体与头发都是黑色SOL正脸、头发是黑色侧脸是白色。
- ShadowMap SSS
- [x] rgb暗面颜色
- **a**
- ILM
- [x] **ilmR**:高光强度 ***高光类型(待验证)***,根绝高光类型来设置不同的强度数值。
- `0` 无高光、无边缘光。为角色的眼睛或者眉毛有个例外JKO的头发。
- `(0~50]` 计算高光?、边缘光。主要的渲染区域,为角色的主要衣服与皮肤。
- `(50~100]` 贴图绘制高光ILM.b)、有边缘光。为角色的**头发**与衣服。
- `(100~150]`计算高光、无边缘光。为角色身体上的金属边缘或者刮痕SOL的武器。
- `(150~200]`贴图绘制高光、无边缘光。为角色身体上大部分的金属物件。SOL腰带上的铁环与肩膀处的铁环。
- `(200~255]`
- [x] ilmG阴影与高光Offset
- [x] ilmB高光遮罩 高光类型Mask。
- 目前猜测计算高光使用数值调整高光大小类似Roughness高光Mask直接使用这个Mask。
- `0`:无高光
- `128`:计算高光区域。
- `(128,255]`(128,255) => (0,1) 高光Mask
- [x] ilmA本衬线基于UV制作风格化内描边原理参考先前文章内描边Mask。
- [x] detailtexcoord[1]细节贴图第二套UV。
- [x] decal贴花单独一个材质。
- VertexColor
- [x] VertexColor(RAO
- [x] VertexColor(G猜测是轮廓线的PixelDepthOffset以此解决角色叠在一起可能会出现的问题。
- ~~Xrd翻译文章对应到Camera的距离轮廓线的在哪个范围膨胀的系数。~~
- [x] VertexColor(B描边粗细也就是Backface Outline的挤出数值。
- Xrd翻译文章轮廓线的Z Offset 值
- ~~VertexColor(A) ~~:不存在
- Xrd翻译文章轮廓线的粗细系数。0.5是标准1是最粗0的话就没有轮廓线。
- [x] OLM皮肤Mask部分角色RAM、JKO还会包括头发。颜色数值不一定相同RAM就不同
### 边缘光
1. 主光的方向不确定,但边缘光的方向是固定的。
2. 使用NoL计算边缘光不同区域的边缘光使用某个参数控制宽度估计是裁边
3.
![[边缘光.png]]
![[BGT边缘光.png|500]]
![[BGT边缘光2.png|500]]
![[BGT边缘光3.png|500]]
![[BGT边缘光4——大佛.png|500]]
![[BGT边缘光5.png|500]]
### TODO
- [x] 实现AmbientOcclusion叠加阴影效果。
- [x] 添加ToonDataAsset 控制是否接受阴影选项。
- [x] 使用代理Shadow模型来渲染阴影。
## UnityChan
## 蓝色协议
# 厚涂 ShaderModel通过修改预积分ShaderModel

View File

@@ -0,0 +1,18 @@
---
title: Toon多光源参考
date: 2025-03-27 19:01:13
excerpt:
tags:
rating: ⭐
---
# 前言
- [【UE5】卡通渲染着色篇3多光源](https://zhuanlan.zhihu.com/p/717533663)
- DirectionalLight
- 主要是通过判断所有灯光的Forward Shading Priority与亮度取得主光。
- 之后在FDeferredLightPS中添加一个判断是否是主光的变体并进行设置即可。
- 也可以通过FlattenNormal来减少高频信息。
- PointLight
- 可以通过FlattenNormal(ShadingModels.ush)来减少点光源计算中的高频细节
- YivanLee的多光源方案
- ShadingModels.ush中只渲染光影不渲染颜色用于合并多光源光影结果。
- 在Lighting Pass之后添加一个LightingPostProcess Pass根据合并的光影采样Ramp渲染最终光照结果。

View File

@@ -0,0 +1,140 @@
---
title: 未命名
date: 2024-05-10 16:30:16
excerpt:
tags:
rating: ⭐
---
PostProcesss
```c++
void AddPostProcessingPasses(
FRDGBuilder& GraphBuilder,
const FViewInfo& View,
int32 ViewIndex,
FSceneUniformBuffer& SceneUniformBuffer,
bool bAnyLumenActive,
bool bLumenGIEnabled,
EReflectionsMethod ReflectionsMethod,
const FPostProcessingInputs& Inputs,
const Nanite::FRasterResults* NaniteRasterResults,
FInstanceCullingManager& InstanceCullingManager,
FVirtualShadowMapArray* VirtualShadowMapArray,
FLumenSceneFrameTemporaries& LumenFrameTemporaries,
const FSceneWithoutWaterTextures& SceneWithoutWaterTextures,
FScreenPassTexture TSRMoireInput)
{
RDG_CSV_STAT_EXCLUSIVE_SCOPE(GraphBuilder, RenderPostProcessing);
QUICK_SCOPE_CYCLE_COUNTER(STAT_PostProcessing_Process);
check(IsInRenderingThread());
check(View.VerifyMembersChecks());
Inputs.Validate();
FScene* Scene = View.Family->Scene->GetRenderScene();
const FIntRect PrimaryViewRect = View.ViewRect;
const FSceneTextureParameters SceneTextureParameters = GetSceneTextureParameters(GraphBuilder, Inputs.SceneTextures);
const FScreenPassRenderTarget ViewFamilyOutput = FScreenPassRenderTarget::CreateViewFamilyOutput(Inputs.ViewFamilyTexture, View);
const FScreenPassTexture SceneDepth(SceneTextureParameters.SceneDepthTexture, PrimaryViewRect);
const FScreenPassTexture CustomDepth(Inputs.CustomDepthTexture, PrimaryViewRect);
const FScreenPassTexture Velocity(SceneTextureParameters.GBufferVelocityTexture, PrimaryViewRect);
const FScreenPassTexture BlackDummy(GSystemTextures.GetBlackDummy(GraphBuilder));
const FTranslucencyPassResources& PostDOFTranslucencyResources = Inputs.TranslucencyViewResourcesMap.Get(ETranslucencyPass::TPT_TranslucencyAfterDOF);
const FTranslucencyPassResources& PostMotionBlurTranslucencyResources = Inputs.TranslucencyViewResourcesMap.Get(ETranslucencyPass::TPT_TranslucencyAfterMotionBlur);
```
# BasePass
```c++
#if WITH_EDITOR
if (ViewFamily.EngineShowFlags.Wireframe)
{
checkf(ExclusiveDepthStencil.IsDepthWrite(), TEXT("Wireframe base pass requires depth-write, but it is set to read-only."));
BasePassTextureCount = 1;
BasePassTextures[0] = SceneTextures.EditorPrimitiveColor;
BasePassTexturesView = MakeArrayView(BasePassTextures.GetData(), BasePassTextureCount);
BasePassDepthTexture = SceneTextures.EditorPrimitiveDepth;
auto* PassParameters = GraphBuilder.AllocParameters<FRenderTargetParameters>();
PassParameters->RenderTargets = GetRenderTargetBindings(ERenderTargetLoadAction::EClear, BasePassTexturesView);
PassParameters->RenderTargets.DepthStencil = FDepthStencilBinding(BasePassDepthTexture, ERenderTargetLoadAction::EClear, ERenderTargetLoadAction::EClear, ExclusiveDepthStencil);
GraphBuilder.AddPass(RDG_EVENT_NAME("WireframeClear"), PassParameters, ERDGPassFlags::Raster, [](FRHICommandList&) {});
}
#endif
// Render targets bindings should remain constant at this point.
FRenderTargetBindingSlots BasePassRenderTargets = GetRenderTargetBindings(ERenderTargetLoadAction::ELoad, BasePassTexturesView);
BasePassRenderTargets.DepthStencil = FDepthStencilBinding(BasePassDepthTexture, ERenderTargetLoadAction::ELoad, ERenderTargetLoadAction::ELoad, ExclusiveDepthStencil);
FForwardBasePassTextures ForwardBasePassTextures{};
if (bForwardShadingEnabled)
{
ForwardBasePassTextures.SceneDepthIfResolved = SceneTextures.Depth.IsSeparate() ? SceneTextures.Depth.Resolve : nullptr;
ForwardBasePassTextures.ScreenSpaceAO = SceneTextures.ScreenSpaceAO;
ForwardBasePassTextures.ScreenSpaceShadowMask = ForwardShadowMaskTexture;
}
else if (!ExclusiveDepthStencil.IsDepthWrite())
{
// If depth write is not enabled, we can bound the depth texture as read only
ForwardBasePassTextures.SceneDepthIfResolved = SceneTextures.Depth.Resolve;
}
ForwardBasePassTextures.bIs24BitUnormDepthStencil = ForwardBasePassTextures.SceneDepthIfResolved ? GPixelFormats[ForwardBasePassTextures.SceneDepthIfResolved->Desc.Format].bIs24BitUnormDepthStencil : 1;
GraphBuilder.SetCommandListStat(GET_STATID(STAT_CLM_BasePass));
RenderBasePassInternal(GraphBuilder, SceneTextures, BasePassRenderTargets, BasePassDepthStencilAccess, ForwardBasePassTextures, DBufferTextures, bDoParallelBasePass, bRenderLightmapDensity, InstanceCullingManager, bNaniteEnabled, NaniteRasterResults);
GraphBuilder.SetCommandListStat(GET_STATID(STAT_CLM_AfterBasePass));
FRenderTargetBindingSlots BasePassRenderTargets = GetRenderTargetBindings(ERenderTargetLoadAction::ELoad, BasePassTexturesView);
BasePassRenderTargets.DepthStencil = FDepthStencilBinding(BasePassDepthTexture, ERenderTargetLoadAction::ELoad, ERenderTargetLoadAction::ELoad, ExclusiveDepthStencil);
RenderBasePassInternal(GraphBuilder, SceneTextures, BasePassRenderTargets, BasePassDepthStencilAccess, ForwardBasePassTextures, DBufferTextures, bDoParallelBasePass, bRenderLightmapDensity, InstanceCullingManager, bNaniteEnabled, NaniteRasterResults);
```
## 读取GBufferTexture
位于SceneRendering.h
```c++
FORCEINLINE FSceneTextures& GetActiveSceneTextures() { return ViewFamily.GetSceneTextures(); }
```
考虑使用形参
- const FSceneTextures* SceneTextures
- const FSceneTextures& SceneTextures
- FSceneTextures& SceneTextures
# UE5.4的FScreenTransform计算
参考VisualizeMotionVectors.cpp
- FScreenTransform::SvPositionToViewportUV(Output.ViewRect)**SvPosition => ViewportUV**
- `FScreenTransform SvPositionToViewportUV = FScreenTransform::SvPositionToViewportUV(Output.ViewRect);`
- FScreenTransform::ViewportUVToScreenPos()**ViewportUV => ScreenPos**
- FScreenTransform::ChangeTextureBasisFromTo():坐标转换。比如下面的坐标是**ViewportUV => TextureUV**
- ```FScreenTransform::ChangeTextureBasisFromTo(Inputs.SceneColor, FScreenTransform::ETextureBasis::ViewportUV, FScreenTransform::ETextureBasis::TextureUV);```
- **SvPosition => ScreenPos**SvPositionToViewportUV * FScreenTransform::ViewportUVToScreenPos
这是用于重采样后2个RT大小不一样而进行的计算。
# OutlinePass
## 距离对于后处理深度描边的关系
需要针对极近距离进行处理。
- 最大深度采样值的最小值为100
| 距离 | 最小深度采样 | 最大深度采样 |
| :-----: | :----: | :----: |
| 100cm | 1 | 10 |
| 200cm | 2 | 20 |
| 500cm | 5 | 50 |
| 1000cm | | 100 |
| 2000cm | | |
| 5000cm | | |
| 10000cm | | |
# 问题记录
## 处于FarDepthValue的Outline被裁剪的问题
- SkyAtmosphere.usf中会将天空球渲染在深度为FarDepthValue的像素上这样会将一些Outline覆盖掉。
- HeightFogPixelShader.usf中会通过判断**DeviceZ != 0.0** 来调整渲染结果绘制方式PSO.BlendState = TStaticBlendState<CW_RGB, BO_Add, BF_One, BF_SourceAlpha>::GetRHI();

View File

@@ -0,0 +1,87 @@
---
title: 未命名
date: 2025-03-27 17:37:02
excerpt:
tags:
rating: ⭐
---
# Toon Lumen
## 抹平法线思路
### ViewVector Fix WorldNormal
参考:
- [【UE5】环境光与GI 2抹平法线](https://zhuanlan.zhihu.com/p/25839790454)
`LumenScreenProbeGather.usf`中对法线进行抹平处理。其Pass位于
- DiffuseIndirectAndAO
- LumenScreenProbeGather
- Integrate
- SimpleDiffuse/SupportImportanceSampleBRDF?/SupportAll?
在如下位置添加代码:
```c++
if (IsValid(Material))
{
const float2 ScreenUV = (Coord.SvPosition + 0.5f) * View.BufferSizeAndInvSize.zw;
const float3 WorldPosition = GetWorldPositionFromScreenUV(ScreenUV, Material.SceneDepth);
const float3 WorldNormal = Material.WorldNormal;
//BlueRose Modify
if (Material.ShadingID == SHADINGMODELID_TOONSTANDARD)
{
//使用Yu-ki016的方法“抹平法线”
float3 V = normalize(LWCHackToFloat(PrimaryView.WorldCameraOrigin) - WorldPosition);
const uint ToonDataAssetID = GetToonDataAssetIDFromGBuffer(Material.GBufferData);
float DiffuseIndirectLightingFlatten = GetDiffuseIndirectLightingFlatten(ToonDataAssetID);
Material.WorldNormal = normalize(lerp(WorldNormal, V, DiffuseIndirectLightingFlatten));
}
//BlueRose Modify End
...
}
}
```
该方法存在一些问题:**当相机围绕角色旋转时,角色身上的 GI变化会比较明显**。
### 低频化WorldNormal & Spherical Harmonics
参考:
- [[UFSH2024]用虚幻引擎5为《幻塔》定制高品质动画流程风格化渲染管线 | 晨风 Neverwind 完美世界游戏】【精准空降到 12:55】](https://www.bilibili.com/video/BV1rW2LYvEox/?share_source=copy_web&vd_source=fe8142e8e12816535feaeabd6f6cdc8e&t=775)
- [[UFSH2024]用虚幻引擎5为《幻塔》定制高品质动画流程风格化渲染管线 | 晨风 Neverwind 完美世界游戏】 【精准空降到 36:52】](https://www.bilibili.com/video/BV1rW2LYvEox/?share_source=copy_web&vd_source=fe8142e8e12816535feaeabd6f6cdc8e&t=2212)
- [ ] 需要实现:法线平滑 => 饱和度平滑。(没说细节,不清楚算法)
- [[UFSH2024]用虚幻引擎5为《幻塔》定制高品质动画流程风格化渲染管线 | 晨风 Neverwind 完美世界游戏】【精准空降到 35:09】](https://www.bilibili.com/video/BV1rW2LYvEox/?share_source=copy_web&vd_source=fe8142e8e12816535feaeabd6f6cdc8e&t=2109)
- [ ] 小于等于High存储的是一个八面体提前加一个Pass进行平滑采样法线方向半球面的平均数值。
- [ ] 大于等于Epic存储的是一个三阶球谐保持1阶球谐数值不变对2~3阶球谐按照系数进行平滑。并且添加能量补偿大致与PBR的结果接近。
![[Lumen_SHData.png|800]]
![[Lumen_能量补偿1.png|800]]
![[Lumen_能量补偿2.png|800]]
UE5中采样球谐贴图方法(ReflectionEnvironmentShared.ush)
```c++
/**
* Computes sky diffuse lighting from the SH irradiance map.
* This has the SH basis evaluation and diffuse convolution weights combined for minimal ALU's - see "Stupid Spherical Harmonics (SH) Tricks"
*/
float3 GetSkySHDiffuse(float3 Normal)
{
float4 NormalVector = float4(Normal, 1.0f);
float3 Intermediate0, Intermediate1, Intermediate2;
Intermediate0.x = dot(SkyIrradianceEnvironmentMap[0], NormalVector);
Intermediate0.y = dot(SkyIrradianceEnvironmentMap[1], NormalVector);
Intermediate0.z = dot(SkyIrradianceEnvironmentMap[2], NormalVector);
float4 vB = NormalVector.xyzz * NormalVector.yzzx;
Intermediate1.x = dot(SkyIrradianceEnvironmentMap[3], vB);
Intermediate1.y = dot(SkyIrradianceEnvironmentMap[4], vB);
Intermediate1.z = dot(SkyIrradianceEnvironmentMap[5], vB);
float vC = NormalVector.x * NormalVector.x - NormalVector.y * NormalVector.y;
Intermediate2 = SkyIrradianceEnvironmentMap[6].xyz * vC;
// max to not get negative colors
return max(0, Intermediate0 + Intermediate1 + Intermediate2);
}
```
## Cel适配
需要实现Cel多光源方案。
大致的思路就是Copy Lumen结果之后在[[Toon多光源参考|Toon多光源Pass]]中提取亮度再根据亮度采样Ramp计算光影结果。

View File

@@ -0,0 +1,834 @@
---
title: ToonReflection
date: 2025-03-20 17:04:16
excerpt:
tags:
rating: ⭐
---
# 反射功能相关Pass
- ReflectionIndirect(None)
- [[#ReflectionEnvironmentAndSky]]
- DiffuseIndirectAndAO(Lumen/SSR)
- LumenReflections
- [[#DiffuseIndirectComposite]]
PS. DiffuseIndirectAndAO Pass会根据 ViewPipelineState.DiffuseIndirectMethod是否为Lumen出现在Light Pass之前GI方法为Lumen或者之后GI方法为非Lumen
## ReflectionEnvironmentAndSky
位于IndirectLightRendering.cpp的RenderDeferredReflectionsAndSkyLighting() => `AddSkyReflectionPass()`
`DiffuseIndirectMethod = EDiffuseIndirectMethod::Lumen`也就是开启Lumen GI如果反射方法为Lumen或者SSR则不会执行后续逻辑。
不开启Lumen GI反射方法为
- Lumen`RenderLumenReflections()`
- RT Reflection`RenderRayTracingReflections()`
- SSR`ScreenSpaceRayTracing::RenderScreenSpaceReflections()`
`RenderDeferredReflectionsAndSkyLighting()`主要执行了:
1. SkyLightDiffuse
1. RenderDistanceFieldLighting()
1. RenderDistanceFieldAOScreenGrid()渲染距离场AO。
2. RenderCapsuleShadowsForMovableSkylight():渲染胶囊阴影。
2. ReflectionIndirect
- RenderLumenReflections()
- RenderRayTracingReflections()
- RenderScreenSpaceReflections()
3. Denoise
- DenoiserIScreenSpaceDenoiser::DenoiseReflections()
- TemporalFilterAddTemporalAAPass()
4. RenderDeferredPlanarReflections():合成平面反射结果。
5. AddSkyReflectionPass()
几种反射方式的大致执行逻辑:
- LumenReflection
1. 输出FRDGTextureRef ReflectionsColor。
- SSR与RT
1. 输出结果到IScreenSpaceDenoiser::FReflectionsInputs DenoiserInputs的FRDGTextureRef Color。
2. 执行对应的降噪算法。
3. 结果赋予给FRDGTextureRef ReflectionsColor。
- 执行完上述反射方法后,最后执行`AddSkyReflectionPass()`
FReflectionEnvironmentSkyLightingPS位于/Engine/Private/ReflectionEnvironmentPixelShader.usf的`ReflectionEnvironmentSkyLighting()`
### ReflectionEnvironmentSkyLighting
```c++
void ReflectionEnvironmentSkyLighting(
in float4 SvPosition : SV_Position,
out float4 OutColor : SV_Target0
#if STRATA_OPAQUE_ROUGH_REFRACTION_ENABLED
, out float3 OutOpaqueRoughRefractionSceneColor : SV_Target1
, out float3 OutSubSurfaceSceneColor : SV_Target2
#endif
)
{
ResolvedView = ResolveView();
//计算获去BufferUV、ScreenPosition
uint2 PixelPos = SvPosition.xy;
float2 BufferUV = SvPositionToBufferUV(SvPosition);
float2 ScreenPosition = SvPositionToScreenPosition(SvPosition).xy;
OutColor = 0.0f;
#if STRATA_OPAQUE_ROUGH_REFRACTION_ENABLED
OutOpaqueRoughRefractionSceneColor = 0.0f;
OutSubSurfaceSceneColor = 0.0f;
#endif
#if STRATA_ENABLED
...
...
#else // STRATA_ENABLED
// Sample scene textures.
FGBufferData GBuffer = GetGBufferDataFromSceneTextures(BufferUV);
uint ShadingModelID = GBuffer.ShadingModelID;
const bool bUnlitMaterial = ShadingModelID == SHADINGMODELID_UNLIT;
float3 DiffuseColor = GBuffer.DiffuseColor;
float3 SpecularColor = GBuffer.SpecularColor;
RemapClearCoatDiffuseAndSpecularColor(GBuffer, ScreenPosition, DiffuseColor, SpecularColor);//针对清漆材质进行Diffuse颜色与Specular颜色重新映射
// Sample the ambient occlusion that is dynamically generated every frame.
float AmbientOcclusion = AmbientOcclusionTexture.SampleLevel(AmbientOcclusionSampler, BufferUV, 0).r;
float3 BentNormal = GBuffer.WorldNormal;
#if APPLY_SKY_SHADOWING
{
BentNormal = UpsampleDFAO(BufferUV, GBuffer.Depth, GBuffer.WorldNormal);
}
#endif
#if ENABLE_DYNAMIC_SKY_LIGHT
BRANCH
if (!bUnlitMaterial) // Only light pixels marked as lit //Unlit材质不会计算动态天光GI的效果。
{
float3 TranslatedWorldPosition = mul(float4(GetScreenPositionForProjectionType(ScreenPosition, GBuffer.Depth), GBuffer.Depth, 1), View.ScreenToTranslatedWorld).xyz;
const float CloudVolumetricAOShadow = GetCloudVolumetricAOShadow(TranslatedWorldPosition);//从体积云 VolumetricCloudShadowMapTexture中取得ShadowFrontDepthKm、MaxOpticalDepthMeanExtinction最终计算出体积云阴影。UE5.3该函数没有启用。
float3 SkyLighting = CloudVolumetricAOShadow * SkyLightDiffuse(GBuffer, AmbientOcclusion, BufferUV, ScreenPosition, BentNormal, DiffuseColor);
FLightAccumulator LightAccumulator = (FLightAccumulator)0;
const bool bNeedsSeparateSubsurfaceLightAccumulation = UseSubsurfaceProfile(ShadingModelID);
LightAccumulator_Add(LightAccumulator, SkyLighting, SkyLighting, 1.0f, bNeedsSeparateSubsurfaceLightAccumulation);
OutColor = LightAccumulator_GetResult(LightAccumulator);
}
#endif // ENABLE_DYNAMIC_SKY_LIGHT
BRANCH
if (!bUnlitMaterial && ShadingModelID != SHADINGMODELID_HAIR)//
{
OutColor.xyz += ReflectionEnvironment(GBuffer, AmbientOcclusion, BufferUV, ScreenPosition, SvPosition, BentNormal, SpecularColor, ShadingModelID);
}
#endif // STRATA_ENABLED
}
```
### SkyLightDiffuse
1. 计算float3 SkyLightingNormal、FSkyLightVisibilityData SkyVisData。
2. 计算Normal、ViewVector、NoV。
3. 针对制定ShadingModel进行额外计算
1. SHADINGMODELID_TWOSIDED_FOLIAGE使用Normal反向量取得SkySHDiffuse在乘以SubsurfaceColor、SkyVisData.SkyDiffuseLookUpMul后累加到结果上。
2. SHADINGMODELID_SUBSURFACE、SHADINGMODELID_PREINTEGRATED_SKIN从GBuffer中提取SubsurfaceColor并累加到结果上。
3. SHADINGMODELID_CLOTH从GBuffer中提取ClothFuzz(SubsurfaceColor)乘以CustomData.a并累加到结果上。
4. SHADINGMODELID_HAIR
1. DiffuseColor = EvaluateEnvHair(GBuffer, V, N, L);
2. SkyVisData.SkyDiffuseLookUpNormal = L;
3. DiffuseWeight = 1.0f;
4. 调用GetSkySHDiffuse()计算天光光照效果。GetSkySHDiffuse()本质是采样球谐贴图来获得天光GI结果。
### ReflectionEnvironment
```c++
float3 ReflectionEnvironment(FGBufferData GBuffer, float AmbientOcclusion, float2 BufferUV, float2 ScreenPosition, float4 SvPosition, float3 BentNormal, float3 SpecularColor, uint ShadingModelID)
{
float4 Color = float4(0, 0, 0, 1);
float IndirectIrradiance = GBuffer.IndirectIrradiance;
#if ENABLE_SKY_LIGHT && ALLOW_STATIC_LIGHTING
BRANCH
// Add in diffuse contribution from dynamic skylights so reflection captures will have something to mix with
if (ReflectionStruct.SkyLightParameters.y > 0 && ReflectionStruct.SkyLightParameters.z > 0)
{
//如果开启天光、并且开启静态关照。会在这里采样SkySH以此累加间接照明。
IndirectIrradiance += GetDynamicSkyIndirectIrradiance(BentNormal, GBuffer.WorldNormal);
}
#endif
//计算反射Vector、WorldNormal、ViewVector
float3 TranslatedWorldPosition = mul(float4(GetScreenPositionForProjectionType(ScreenPosition, GBuffer.Depth), GBuffer.Depth, 1), View.ScreenToTranslatedWorld).xyz;
float3 CameraToPixel = normalize(TranslatedWorldPosition - View.TranslatedWorldCameraOrigin);
float3 ReflectionVector = reflect(CameraToPixel, GBuffer.WorldNormal);
float3 V = -CameraToPixel;
float3 N = GBuffer.WorldNormal;
const float3 SavedTopLayerNormal = N;
#if SUPPORTS_ANISOTROPIC_MATERIALS
ModifyGGXAnisotropicNormalRoughness(GBuffer.WorldTangent, GBuffer.Anisotropy, GBuffer.Roughness, N, V);
#endif
float3 R = 2 * dot( V, N ) * N - V;
float NoV = saturate( dot( N, V ) );
// Point lobe in off-specular peak direction
R = GetOffSpecularPeakReflectionDir(N, R, GBuffer.Roughness);
// 采样 SSR, planar reflections, RT reflections or Lumen 反射结果。
float4 ReflectionInput = Texture2DSample(ReflectionTexture, ReflectionTextureSampler, BufferUV);
Color = CompositeReflections(ReflectionInput, BufferUV, GBuffer.Roughness, ShadingModelID);//Color = float4(ReflectionInput.rgb, 1 - ReflectionInput.a)
#if RAY_TRACED_REFLECTIONS
float4 SavedColor = Color; // When a clear coat material is encountered, we save the reflection buffer color for it to not be affected by operations.
#endif
if(GBuffer.ShadingModelID == SHADINGMODELID_CLEAR_COAT )
{
#if RAY_TRACED_REFLECTIONS
Color = float4(0, 0, 0, 1); // Clear coat reflection is expected to be computed on a black background
#endif
const float ClearCoat = GBuffer.CustomData.x;
Color = lerp( Color, float4(0,0,0,1), ClearCoat );
#if CLEAR_COAT_BOTTOM_NORMAL
const float2 oct1 = ((float2(GBuffer.CustomData.a, GBuffer.CustomData.z) * 4) - (512.0/255.0)) + UnitVectorToOctahedron(GBuffer.WorldNormal);
const float3 ClearCoatUnderNormal = OctahedronToUnitVector(oct1);
const float3 BottomEffectiveNormal = ClearCoatUnderNormal;
R = 2 * dot( V, ClearCoatUnderNormal ) * ClearCoatUnderNormal - V;
#endif
}
float AO = GBuffer.GBufferAO * AmbientOcclusion;//AmbientOcclusion为SSAO或者RTAO或者DFAO或者Lumen……
float RoughnessSq = GBuffer.Roughness * GBuffer.Roughness;
float SpecularOcclusion = GetSpecularOcclusion(NoV, RoughnessSq, AO);
Color.a *= SpecularOcclusion;
#if FEATURE_LEVEL >= FEATURE_LEVEL_SM5
float2 LocalPosition = SvPosition.xy - View.ViewRectMin.xy;
uint GridIndex = ComputeLightGridCellIndex(uint2(LocalPosition.x, LocalPosition.y), GBuffer.Depth);
uint NumCulledEntryIndex = (ForwardLightData.NumGridCells + GridIndex) * NUM_CULLED_LIGHTS_GRID_STRIDE;
uint NumCulledReflectionCaptures = min(ForwardLightData.NumCulledLightsGrid[NumCulledEntryIndex + 0], ForwardLightData.NumReflectionCaptures);
uint DataStartIndex = ForwardLightData.NumCulledLightsGrid[NumCulledEntryIndex + 1];
#else
uint DataStartIndex = 0;
uint NumCulledReflectionCaptures = 0;
#endif
const FBxDFEnergyTerms EnergyTerms = ComputeGGXSpecEnergyTerms(GBuffer.Roughness, NoV, GBuffer.SpecularColor);
//常规反射 或 底层清漆 光照计算
//Top of regular reflection or bottom layer of clear coat.
Color.rgb += View.PreExposure * GatherRadiance(Color.a, TranslatedWorldPosition, R, GBuffer.Roughness, BentNormal, IndirectIrradiance, GBuffer.ShadingModelID, NumCulledReflectionCaptures, DataStartIndex);
BRANCH
if( GBuffer.ShadingModelID == SHADINGMODELID_CLEAR_COAT)
{
const float ClearCoat = GBuffer.CustomData.x;
const float ClearCoatRoughness = GBuffer.CustomData.y;
// Restore saved values needed for the top layer.
GBuffer.WorldNormal = SavedTopLayerNormal;
// Recompute some values unaffected by anistropy for the top layer
N = GBuffer.WorldNormal;
R = 2 * dot(V, N) * N - V;
NoV = saturate(dot(N, V));
R = GetOffSpecularPeakReflectionDir(N, R, ClearCoatRoughness);
// TODO EnvBRDF should have a mask param
#if USE_ENERGY_CONSERVATION
Color.rgb *= EnergyTerms.E * (1 - ClearCoat);
#else
// Hack: Ensures when clear coat is >0, grazing angle does not get too much energy,
// but preserve response at normal incidence
float2 AB = PreIntegratedGF.SampleLevel(PreIntegratedGFSampler, float2(NoV, GBuffer.Roughness), 0).rg;
Color.rgb *= SpecularColor * AB.x + AB.y * saturate(50 * SpecularColor.g) * (1 - ClearCoat);
#endif
// F_Schlick
const float CoatF0 = 0.04f;
#if USE_ENERGY_CONSERVATION
float F = ComputeGGXSpecEnergyTerms(ClearCoatRoughness, NoV, CoatF0).E.x;
#else
float F = EnvBRDF(CoatF0, ClearCoatRoughness, NoV).x;
#endif
F *= ClearCoat;
float LayerAttenuation = (1 - F);
Color.rgb *= LayerAttenuation;
Color.a = F;
#if !RAY_TRACED_REFLECTIONS
Color.rgb += ReflectionInput.rgb * F;
Color.a *= 1 - ReflectionInput.a;
#endif
Color.a *= SpecularOcclusion;
float3 TopLayerR = 2 * dot( V, N ) * N - V;
Color.rgb += View.PreExposure * GatherRadiance(Color.a, TranslatedWorldPosition, TopLayerR, ClearCoatRoughness, BentNormal, IndirectIrradiance, GBuffer.ShadingModelID, NumCulledReflectionCaptures, DataStartIndex);
#if RAY_TRACED_REFLECTIONS
Color.rgb = SavedColor.rgb + Color.rgb * SavedColor.a; // Compose default clear coat reflection over regular refelction (using Premultiplied alpha where SaveColor.a=transmittance)
#endif
}
else
{
#if USE_ENERGY_CONSERVATION
Color.rgb *= EnergyTerms.E;
#else
Color.rgb *= EnvBRDF( SpecularColor, GBuffer.Roughness, NoV );
#endif
}
// Transform NaNs to black, transform negative colors to black.
return -min(-Color.rgb, 0.0);
}
```
### GatherRadiance()
GatherRadiance()主要计算了SkyLightTexture天空盒贴图以及ReflectionCaptureBox、Sphere反射球最终根据之前计算的Color.a进行合成。
```c++
float3 GatherRadiance(float CompositeAlpha, float3 TranslatedWorldPosition, float3 RayDirection, float Roughness, float3 BentNormal, float IndirectIrradiance, uint ShadingModelID, uint NumCulledReflectionCaptures, uint CaptureDataStartIndex)
{
// Indirect occlusion from DFAO, which should be applied to reflection captures and skylight specular, but not SSR
float IndirectSpecularOcclusion = 1.0f;
float3 ExtraIndirectSpecular = 0;
#if SUPPORT_DFAO_INDIRECT_OCCLUSION
float IndirectDiffuseOcclusion;
GetDistanceFieldAOSpecularOcclusion(BentNormal, RayDirection, Roughness, ShadingModelID == SHADINGMODELID_TWOSIDED_FOLIAGE, IndirectSpecularOcclusion, IndirectDiffuseOcclusion, ExtraIndirectSpecular);
// Apply DFAO to IndirectIrradiance before mixing with indirect specular
IndirectIrradiance *= IndirectDiffuseOcclusion;
#endif
const bool bCompositeSkylight = true;
return CompositeReflectionCapturesAndSkylightTWS(
CompositeAlpha,
TranslatedWorldPosition,
RayDirection,
Roughness,
IndirectIrradiance,
IndirectSpecularOcclusion,
ExtraIndirectSpecular,
NumCulledReflectionCaptures,
CaptureDataStartIndex,
0,
bCompositeSkylight);
}
```
主要逻辑位于CompositeReflectionCapturesAndSkylightTWS():
```c++
float3 CompositeReflectionCapturesAndSkylightTWS(
float CompositeAlpha,
float3 TranslatedWorldPosition,
float3 RayDirection,
float Roughness,
float IndirectIrradiance,
float IndirectSpecularOcclusion,
float3 ExtraIndirectSpecular,
uint NumCapturesAffectingTile,
uint CaptureDataStartIndex,
int SingleCaptureIndex,
bool bCompositeSkylight,
uint EyeIndex)
{
float Mip = ComputeReflectionCaptureMipFromRoughness(Roughness, View.ReflectionCubemapMaxMip);
float4 ImageBasedReflections = float4(0, 0, 0, CompositeAlpha);
float2 CompositedAverageBrightness = float2(0.0f, 1.0f);
#if REFLECTION_COMPOSITE_USE_BLENDED_REFLECTION_CAPTURES
// Accumulate reflections from captures affecting this tile, applying largest captures first so that the smallest ones display on top
//ReflectionCapture Blend其顺序为大范围在前
LOOP
for (uint TileCaptureIndex = 0; TileCaptureIndex < NumCapturesAffectingTile; TileCaptureIndex++)
{
BRANCH
if (ImageBasedReflections.a < 0.001)//如果Alpha小于0.001则停止循环,结束合成计算。
{
break;
}
//计算CaptureIndex
uint CaptureIndex = 0;
#ifdef REFLECTION_COMPOSITE_NO_CULLING_DATA
CaptureIndex = TileCaptureIndex; // Go from 0 to NumCapturesAffectingTile as absolute index in capture array
#else
#if (INSTANCED_STEREO || MOBILE_MULTI_VIEW)//VR或者移动端多View
BRANCH
if (EyeIndex == 0)
{
#endif
CaptureIndex = GetCulledLightDataGrid(CaptureDataStartIndex + TileCaptureIndex);
#if (INSTANCED_STEREO || MOBILE_MULTI_VIEW)
}
else
{
CaptureIndex = GetCulledLightDataGridISR(CaptureDataStartIndex + TileCaptureIndex);
}
#endif
#endif
FLWCVector3 CaptureWorldPosition = MakeLWCVector3(GetReflectionTilePosition(CaptureIndex).xyz, GetReflectionPositionAndRadius(CaptureIndex).xyz);
float3 CaptureTranslatedWorldPosition = LWCToFloat(LWCAdd(CaptureWorldPosition, ResolvedView.PreViewTranslation));
float CaptureRadius = GetReflectionPositionAndRadius(CaptureIndex).w;
float4 CaptureProperties = GetReflectionCaptureProperties(CaptureIndex);
float3 CaptureVector = TranslatedWorldPosition - CaptureTranslatedWorldPosition;
float CaptureVectorLength = sqrt(dot(CaptureVector, CaptureVector));
float NormalizedDistanceToCapture = saturate(CaptureVectorLength / CaptureRadius);
BRANCH
if (CaptureVectorLength < CaptureRadius)//当前像素处于ReflectionCapture范围内
{
float3 ProjectedCaptureVector = RayDirection;
float4 CaptureOffsetAndAverageBrightness = GetReflectionCaptureOffsetAndAverageBrightness(CaptureIndex);
// Fade out based on distance to capture
float DistanceAlpha = 0;
#define PROJECT_ONTO_SHAPE 1
#if PROJECT_ONTO_SHAPE
//盒形反射球
#if REFLECTION_COMPOSITE_HAS_BOX_CAPTURES
#if REFLECTION_COMPOSITE_HAS_SPHERE_CAPTURES
// Box
BRANCH if (CaptureProperties.b > 0)
#endif
{
ProjectedCaptureVector = GetLookupVectorForBoxCapture(RayDirection, TranslatedWorldPosition, float4(CaptureTranslatedWorldPosition, CaptureRadius),
GetReflectionBoxTransform(CaptureIndex), GetReflectionBoxScales(CaptureIndex), CaptureOffsetAndAverageBrightness.xyz, DistanceAlpha);
}
#endif
//球形反射球
#if REFLECTION_COMPOSITE_HAS_SPHERE_CAPTURES
// Sphere
#if REFLECTION_COMPOSITE_HAS_BOX_CAPTURES
else
#endif
{
ProjectedCaptureVector = GetLookupVectorForSphereCapture(RayDirection, TranslatedWorldPosition, float4(CaptureTranslatedWorldPosition, CaptureRadius), NormalizedDistanceToCapture, CaptureOffsetAndAverageBrightness.xyz, DistanceAlpha);
}
#endif
#else
DistanceAlpha = 1.0;
#endif //PROJECT_ONTO_SHAPE
float CaptureArrayIndex = CaptureProperties.g;
{
//采样对应的ReflectionCubeMapSample.a为根据距离计算的Alpha。最后结果累加到ImageBasedReflections中。
float4 Sample = ReflectionStruct.ReflectionCubemap.SampleLevel(ReflectionStruct.ReflectionCubemapSampler, float4(ProjectedCaptureVector, CaptureArrayIndex), Mip);
Sample.rgb *= CaptureProperties.r;
Sample *= DistanceAlpha;
// Under operator (back to front)
ImageBasedReflections.rgb += Sample.rgb * ImageBasedReflections.a * IndirectSpecularOcclusion;
ImageBasedReflections.a *= 1 - Sample.a;
float AverageBrightness = CaptureOffsetAndAverageBrightness.w;
CompositedAverageBrightness.x += AverageBrightness * DistanceAlpha * CompositedAverageBrightness.y;
CompositedAverageBrightness.y *= 1 - DistanceAlpha;
}
}
}
#else
//非ReflectionCapture Blend
float3 ProjectedCaptureVector = RayDirection;
FLWCVector3 SingleCaptureWorldPosition = MakeLWCVector3(GetReflectionTilePosition(SingleCaptureIndex).xyz, GetReflectionPositionAndRadius(SingleCaptureIndex).xyz);
float3 SingleCaptureTranslatedWorldPosition = LWCToFloat(LWCAdd(SingleCaptureWorldPosition, ResolvedView.PreViewTranslation));
float SingleCaptureRadius = GetReflectionPositionAndRadius(SingleCaptureIndex).w;
float4 SingleCaptureOffsetAndAverageBrightness = GetReflectionCaptureOffsetAndAverageBrightness(SingleCaptureIndex);
float SingleCaptureBrightness = GetReflectionCaptureProperties(SingleCaptureIndex).x;
float SingleCaptureArrayIndex = GetReflectionCaptureProperties(SingleCaptureIndex).y;
#define APPROXIMATE_CONTINUOUS_SINGLE_CAPTURE_PARALLAX 0
#if APPROXIMATE_CONTINUOUS_SINGLE_CAPTURE_PARALLAX
float3 CaptureVector = TranslatedWorldPosition - SingleCaptureTranslatedWorldPosition;
float CaptureVectorLength = sqrt(dot(CaptureVector, CaptureVector));
float NormalizedDistanceToCapture = saturate(CaptureVectorLength / SingleCaptureRadius);
float UnusedDistanceAlpha = 0;
ProjectedCaptureVector = GetLookupVectorForSphereCapture(RayDirection, TranslatedWorldPosition, float4(SingleCaptureTranslatedWorldPosition, SingleCaptureRadius), NormalizedDistanceToCapture, SingleCaptureOffsetAndAverageBrightness.xyz, UnusedDistanceAlpha);
float x = saturate(NormalizedDistanceToCapture);
float DistanceAlpha = 1 - x * x * (3 - 2 * x);
// Lerp between sphere parallax corrected and infinite based on distance to shape
ProjectedCaptureVector = lerp(RayDirection, normalize(ProjectedCaptureVector), DistanceAlpha);
#endif
float4 Sample = TextureCubeArraySampleLevel(ReflectionStruct.ReflectionCubemap, ReflectionStruct.ReflectionCubemapSampler, ProjectedCaptureVector, SingleCaptureArrayIndex, Mip);
Sample.rgb *= SingleCaptureBrightness;
ImageBasedReflections = float4(Sample.rgb, 1 - Sample.a);
float AverageBrightness = SingleCaptureOffsetAndAverageBrightness.w;
CompositedAverageBrightness.x += AverageBrightness * CompositedAverageBrightness.y;
CompositedAverageBrightness.y = 0;
#endif
// Apply indirect lighting scale while we have only accumulated reflection captures
ImageBasedReflections.rgb *= View.PrecomputedIndirectSpecularColorScale;
CompositedAverageBrightness.x *= Luminance( View.PrecomputedIndirectSpecularColorScale );
#if ENABLE_SKY_LIGHT
BRANCH
if (ReflectionStruct.SkyLightParameters.y > 0 && bCompositeSkylight)
{
float SkyAverageBrightness = 1.0f;
// 不支持Blend的结果大致为SkyLightCubeMap * View.SkyLightColor。支持Blend的lerp(Reflection, BlendDestinationReflection * View.SkyLightColor.rgb, ReflectionStruct.SkyLightParameters.w);
#if REFLECTION_COMPOSITE_SUPPORT_SKYLIGHT_BLEND
float3 SkyLighting = GetSkyLightReflectionSupportingBlend(RayDirection, Roughness, SkyAverageBrightness);
#else
float3 SkyLighting = GetSkyLightReflection(RayDirection, Roughness, SkyAverageBrightness);
#endif
// Normalize for static skylight types which mix with lightmaps, material ambient occlusion as well as diffuse/specular occlusion.
bool bNormalize = ReflectionStruct.SkyLightParameters.z < 1 && ALLOW_STATIC_LIGHTING;
FLATTEN
if (bNormalize)
{
ImageBasedReflections.rgb += ImageBasedReflections.a * SkyLighting * IndirectSpecularOcclusion;
CompositedAverageBrightness.x += SkyAverageBrightness * CompositedAverageBrightness.y;
}
else
{
ExtraIndirectSpecular += SkyLighting * IndirectSpecularOcclusion;
}
}
#endif
#if ALLOW_STATIC_LIGHTING
ImageBasedReflections.rgb *= ComputeMixingWeight(IndirectIrradiance, CompositedAverageBrightness.x, Roughness);
#endif
ImageBasedReflections.rgb += ImageBasedReflections.a * ExtraIndirectSpecular;
return ImageBasedReflections.rgb;
}
```
## DiffuseIndirectComposite
位于IndirectLightRendering.cpp的`RenderDiffuseIndirectAndAmbientOcclusion()`
`RenderDiffuseIndirectAndAmbientOcclusion()`主要执行了:
1. 进行判断是否跳过当前View的计算。主要是判断是否开启Lumen以及传入的bCompositeRegularLumenOnly变量
2. SetupCommonDiffuseIndirectParameters()
3. 计算GI
- ScreenSpaceGI
- RTGI
- Lumen
- Lumen Reflection输出结果到OutTextures.Textures[3]。
- SSR输出结果到OutTextures.Textures[3]。
- 其他Reflection结果输出为黑色。
4. 使用降噪器对SSGI与RTGI进行降噪
5. 渲染AO
- 禁用
- RTAO
- SSAO
- 其他没有实现的会谈报错提醒
6. 将渲染的AO结果赋予SceneTextures.ScreenSpaceAO
7. RenderHairStrandsAmbientOcclusion()
8. 应用GI到渲染结果上。
9. ApplyAmbientCubemapComposite()
FDiffuseIndirectCompositePS位于/Engine/Private/DiffuseIndirectComposite.usf的MainPS()
### FDiffuseIndirectCompositePS
```c++
void MainPS(
float4 SvPosition : SV_POSITION
#if DIM_APPLY_DIFFUSE_INDIRECT
#if ENABLE_DUAL_SRC_BLENDING
, out float4 OutAddColor DUAL_SOURCE_BLENDING_SLOT(0) : SV_Target0
, out float4 OutMultiplyColor DUAL_SOURCE_BLENDING_SLOT(1) : SV_Target1
#else
, out float4 OutColor : SV_Target0
#endif
#else
, out float4 OutMultiplyColor : SV_Target0
#endif
)
{
const uint2 PixelPos = SvPosition.xy;
const float2 SceneBufferUV = SvPositionToBufferUV(SvPosition);
const float2 ScreenPosition = SvPositionToScreenPosition(SvPosition).xy;
#if !ENABLE_DUAL_SRC_BLENDING && DIM_APPLY_DIFFUSE_INDIRECT
float4 OutAddColor = float4(0.0, 0.0, 0.0, 0.0);
float4 OutMultiplyColor;
const float4 SceneColor = SceneColorTexture.SampleLevel(SceneColorSampler, SceneBufferUV, 0);
#endif
// Sample scene textures.
const FLumenMaterialData Material = ReadMaterialData(PixelPos, SceneBufferUV);
// Sample the ambient occlusion that is dynamically generated every frame.
const float DynamicAmbientOcclusion = AmbientOcclusionTexture.SampleLevel(AmbientOcclusionSampler, SceneBufferUV, 0).r;
// Compute the final ambient occlusion to be applied. Lumen handles material AO internally.
float FinalAmbientOcclusion = 1.0f;
#if DIM_APPLY_DIFFUSE_INDIRECT != DIM_APPLY_DIFFUSE_INDIRECT_SCREEN_PROBE_GATHER
{
float AOMask = IsValid(Material);
FinalAmbientOcclusion = lerp(1.0f, Material.MaterialAO * DynamicAmbientOcclusion, AOMask * AmbientOcclusionStaticFraction);
}
#endif
const float3 TranslatedWorldPosition = mul(float4(GetScreenPositionForProjectionType(ScreenPosition, Material.SceneDepth), Material.SceneDepth, 1), View.ScreenToTranslatedWorld).xyz;
const float3 N = Material.WorldNormal;
const float3 V = normalize(View.TranslatedWorldCameraOrigin - TranslatedWorldPosition);
const float NoV = saturate(dot(N, V));
// Apply diffuse indirect.
//这里的DIM可能是Diffuse Indirect Map的缩写
#if DIM_APPLY_DIFFUSE_INDIRECT
OutAddColor = 0;
{
FDirectLighting IndirectLighting = (FDirectLighting)0;
if (IsValid(Material))
{
float3 DiffuseIndirectLighting = 0;
float3 RoughSpecularIndirectLighting = 0;
float4 SpecularIndirectLighting = 0;
//直接从屏幕空间探针中获取数据Lumen
#if DIM_APPLY_DIFFUSE_INDIRECT == DIM_APPLY_DIFFUSE_INDIRECT_SCREEN_PROBE_GATHER
DiffuseIndirectLighting = DiffuseIndirect_Textures_0.SampleLevel(GlobalPointClampedSampler, SceneBufferUV, 0).rgb;
RoughSpecularIndirectLighting = DiffuseIndirect_Textures_2.SampleLevel(GlobalPointClampedSampler, SceneBufferUV, 0).rgb;
SpecularIndirectLighting = DiffuseIndirect_Textures_3.SampleLevel(GlobalPointClampedSampler, SceneBufferUV, 0).rgba;
#else
{
//设置降噪器参数
// Sample the output of the denoiser.
FSSDKernelConfig KernelConfig = CreateKernelConfig();
#if DEBUG_OUTPUT
{
KernelConfig.DebugPixelPosition = uint2(SvPosition.xy);
KernelConfig.DebugEventCounter = 0;
}
#endif
// Compile time.
KernelConfig.bSampleKernelCenter = true;
KernelConfig.BufferLayout = CONFIG_SIGNAL_INPUT_LAYOUT;
KernelConfig.bUnroll = true;
#if DIM_UPSCALE_DIFFUSE_INDIRECT
{
KernelConfig.SampleSet = SAMPLE_SET_2X2_BILINEAR;
KernelConfig.BilateralDistanceComputation = SIGNAL_WORLD_FREQUENCY_REF_METADATA_ONLY;
KernelConfig.WorldBluringDistanceMultiplier = 16.0;
KernelConfig.BilateralSettings[0] = BILATERAL_POSITION_BASED(3);
// SGPRs
KernelConfig.BufferSizeAndInvSize = View.BufferSizeAndInvSize * float4(0.5, 0.5, 2.0, 2.0);
KernelConfig.BufferBilinearUVMinMax = View.BufferBilinearUVMinMax;
}
#else
{
KernelConfig.SampleSet = SAMPLE_SET_1X1;
KernelConfig.bNormalizeSample = true;
// SGPRs
KernelConfig.BufferSizeAndInvSize = View.BufferSizeAndInvSize;
KernelConfig.BufferBilinearUVMinMax = View.BufferBilinearUVMinMax;
}
#endif
// VGPRs
KernelConfig.BufferUV = SceneBufferUV;
{
// STRATA_TODO: We use the top layer data, but we should resolve lighting for each BSDFs.
KernelConfig.CompressedRefSceneMetadata = MaterialToCompressedSceneMetadata(Material.SceneDepth, Material.WorldNormal, Material.Roughness, Material.ShadingID);
KernelConfig.RefBufferUV = SceneBufferUV;
KernelConfig.RefSceneMetadataLayout = METADATA_BUFFER_LAYOUT_DISABLED;
}
KernelConfig.HammersleySeed = Rand3DPCG16(int3(SvPosition.xy, View.StateFrameIndexMod8)).xy;
FSSDSignalAccumulatorArray UncompressedAccumulators = CreateSignalAccumulatorArray();
FSSDCompressedSignalAccumulatorArray CompressedAccumulators = CompressAccumulatorArray(
UncompressedAccumulators, CONFIG_ACCUMULATOR_VGPR_COMPRESSION);
AccumulateKernel(
KernelConfig,
DiffuseIndirect_Textures_0,
DiffuseIndirect_Textures_1,
DiffuseIndirect_Textures_2,
DiffuseIndirect_Textures_3,
/* inout */ UncompressedAccumulators,
/* inout */ CompressedAccumulators);
//PassDebugOutput[uint2(SvPosition.xy)] = float4(UncompressedAccumulators.Array[0].Moment1.SampleCount, 0, 0, 0);
FSSDSignalSample Sample;
#if DIM_UPSCALE_DIFFUSE_INDIRECT
Sample = NormalizeToOneSample(UncompressedAccumulators.Array[0].Moment1);
#else
Sample = UncompressedAccumulators.Array[0].Moment1;
#endif
//SSGI、RTGI直接将降噪上采样结果赋予DiffuseIndirectLighting
#if DIM_APPLY_DIFFUSE_INDIRECT == DIM_APPLY_DIFFUSE_INDIRECT_SSGI || DIM_APPLY_DIFFUSE_INDIRECT == DIM_APPLY_DIFFUSE_INDIRECT_RTGI
{
DiffuseIndirectLighting = Sample.SceneColor.rgb;
}
#else
#error Unimplemented
#endif
}
#endif
#if STRATA_ENABLED
{
FStrataAddressing StrataAddressing = GetStrataPixelDataByteOffset(PixelPos, uint2(View.BufferSizeAndInvSize.xy), Strata.MaxBytesPerPixel);
FStrataPixelHeader StrataPixelHeader = UnpackStrataHeaderIn(Strata.MaterialTextureArray, StrataAddressing, Strata.TopLayerTexture);
if (StrataPixelHeader.GetMaterialMode() > HEADER_MATERIALMODE_NONE)
{
const FStrataDeferredLighting IndirectLighting_Strata = StrataIndirectLighting(
PixelPos,
Strata.MaterialTextureArray,
StrataAddressing,
StrataPixelHeader,
V,
DynamicAmbientOcclusion,
DiffuseIndirectLighting,
SpecularIndirectLighting);
OutAddColor = IndirectLighting_Strata.SceneColor;
#if STRATA_OPAQUE_ROUGH_REFRACTION_ENABLED
const uint2 OutCoord = PixelPos;
OutOpaqueRoughRefractionSceneColor[OutCoord] = IndirectLighting_Strata.OpaqueRoughRefractionSceneColor;
OutSubSurfaceSceneColor[OutCoord] = IndirectLighting_Strata.SubSurfaceSceneColor;
#endif
}
}
#else // STRATA_ENABLED
{
FGBufferData GBuffer = Material.GBufferData;
float3 DiffuseColor = bVisualizeDiffuseIndirect ? float3(.18f, .18f, .18f) : GBuffer.DiffuseColor;
float3 SpecularColor = GBuffer.SpecularColor;
#if DIM_APPLY_DIFFUSE_INDIRECT == DIM_APPLY_DIFFUSE_INDIRECT_SCREEN_PROBE_GATHER
RemapClearCoatDiffuseAndSpecularColor(GBuffer, NoV, DiffuseColor, SpecularColor);
#endif
//取得AO
FShadingOcclusion Occlusion = GetShadingOcclusion(PixelPos, V, N, GBuffer.Roughness, GBuffer.BaseColor, DynamicAmbientOcclusion);
//对双面植被与次表面进行DiffuseOcclusion适配
if (GBuffer.ShadingModelID == SHADINGMODELID_TWOSIDED_FOLIAGE || GBuffer.ShadingModelID == SHADINGMODELID_SUBSURFACE)
{
Occlusion.DiffuseOcclusion = lerp(1, Occlusion.DiffuseOcclusion, LumenFoliageOcclusionStrength);
}
//Hair需要单独处理
if (GBuffer.ShadingModelID == SHADINGMODELID_HAIR)
{
float3 L = 0;
const float3 IndirectDiffuseColor = EvaluateEnvHair(GBuffer, V, N, L /*out*/);
IndirectLighting.Diffuse = DiffuseIndirectLighting * Occlusion.DiffuseOcclusion * IndirectDiffuseColor;
IndirectLighting.Specular = 0;
}
else
{
float3 BackfaceDiffuseIndirectLighting = 0;
//双面植被计算BackfaceDiffuseIndirectLighting。如果不支持BackfaceDiffuse则将预积分结果加到DiffuseColor中。
if (GBuffer.ShadingModelID == SHADINGMODELID_TWOSIDED_FOLIAGE)
{
float3 SubsurfaceColor = ExtractSubsurfaceColor(GBuffer);
if (bLumenSupportBackfaceDiffuse > 0)
{
BackfaceDiffuseIndirectLighting += SubsurfaceColor * DiffuseIndirect_Textures_1.SampleLevel(GlobalPointClampedSampler, SceneBufferUV, 0).rgb;
}
else
{
// Adding Subsurface energy to the diffuse lobe is a poor approximation when DiffuseColor is small and SubsurfaceColor is large
// Reduce the error by attenuating SubsurfaceColor, even though diffuse already has the 1/PI for Lambert.
const float PreintegratedTwoSidedBxDF = 1.0f / PI;
DiffuseColor += SubsurfaceColor * PreintegratedTwoSidedBxDF;
}
}
//次表面、预积分皮肤直接将次表面颜色加到Diffsue上。
if (GBuffer.ShadingModelID == SHADINGMODELID_SUBSURFACE || GBuffer.ShadingModelID == SHADINGMODELID_PREINTEGRATED_SKIN)
{
float3 SubsurfaceColor = ExtractSubsurfaceColor(GBuffer);
// Add subsurface energy to diffuse
DiffuseColor += SubsurfaceColor;
}
//布料计算
if (GBuffer.ShadingModelID == SHADINGMODELID_CLOTH)
{
float3 ClothFuzz = ExtractSubsurfaceColor(GBuffer);
DiffuseColor += ClothFuzz * GBuffer.CustomData.a;
}
//最终GI结果计算
IndirectLighting.Diffuse = (DiffuseIndirectLighting * DiffuseColor + BackfaceDiffuseIndirectLighting) * Occlusion.DiffuseOcclusion;
IndirectLighting.Transmission = 0;
#if DIM_APPLY_DIFFUSE_INDIRECT == DIM_APPLY_DIFFUSE_INDIRECT_SCREEN_PROBE_GATHER
//Lumen计算的反射
RoughSpecularIndirectLighting *= Occlusion.SpecularOcclusion;
IndirectLighting.Specular = CombineRoughSpecular(GBuffer, HasBackfaceDiffuse(Material), NoV, SpecularIndirectLighting, RoughSpecularIndirectLighting, SpecularColor);
#else
//SSGI、RTGI等其他GI方法的反射结果
IndirectLighting.Specular = AddContrastAndSpecularScale(SpecularIndirectLighting.xyz) * EnvBRDF(SpecularColor, GBuffer.Roughness, NoV);
#endif
}
}
#endif // STRATA_ENABLED
}
// Accumulate lighting into the final buffers
#if !STRATA_ENABLED
IndirectLighting.Specular *= GetSSSCheckerboadSpecularScale(PixelPos, Material.bNeedsSeparateLightAccumulation);
FLightAccumulator LightAccumulator = (FLightAccumulator)0;
LightAccumulator_Add(
LightAccumulator,
IndirectLighting.Diffuse + IndirectLighting.Specular,
IndirectLighting.Diffuse,
1.0f,
Material.bNeedsSeparateLightAccumulation);
OutAddColor = LightAccumulator_GetResult(LightAccumulator);
#endif // !STRATA_ENABLED
}
#endif
OutMultiplyColor = FinalAmbientOcclusion;
#if !ENABLE_DUAL_SRC_BLENDING && DIM_APPLY_DIFFUSE_INDIRECT
OutColor = SceneColor * OutMultiplyColor + OutAddColor;
#endif
}
```
PS.
1. 采样自DiffuseIndirect_Textures_2的RoughSpecularIndirectLighting只会用在Lumen的计算中。

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2557aa6acc90d03f2974e3126b138e733dd377165c3142be65ca9e445a146704
size 3465588

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:1e29bcf4c91fb9bcc52215c6d48c705bcbc37b252847e3544dbfae962ce0d2eb
size 768674

Some files were not shown because too many files have changed in this diff Show More