diff --git a/.obsidian/core-plugins.json b/.obsidian/core-plugins.json index eb383b1..6e4715f 100644 --- a/.obsidian/core-plugins.json +++ b/.obsidian/core-plugins.json @@ -6,7 +6,6 @@ "backlink", "canvas", "tag-pane", - "properties", "page-preview", "daily-notes", "note-composer", diff --git a/.obsidian/plugins/various-complements/data.json b/.obsidian/plugins/various-complements/data.json index f6d4ee2..ad55d6d 100644 --- a/.obsidian/plugins/various-complements/data.json +++ b/.obsidian/plugins/various-complements/data.json @@ -47,59 +47,11 @@ }, "showLogAboutPerformanceInConsole": false, "selectionHistoryTree": { - "以垂直关系依次旋转每个轴。": { - "以垂直关系依次旋转每个轴。": { + "2周左右。": { + "2周左右。": { "currentFile": { "count": 1, - "lastUpdated": 1698213001403 - } - } - }, - "设置Skeleton与PreviewMesh资产。": { - "设置Skeleton与PreviewMesh资产。": { - "currentFile": { - "count": 1, - "lastUpdated": 1699256932874 - } - } - }, - "。里面初始化了SourceSkeleton、TargetSkeleton、根骨骼重定向器、UIKRigProcessor、": { - "。里面初始化了SourceSkeleton、TargetSkeleton、根骨骼重定向器、UIKRigProcessor、": { - "currentFile": { - "count": 1, - "lastUpdated": 1699258408430 - } - } - }, - "TargetSkeleton、根骨骼重定向器、UIKRigProcessor、": { - "TargetSkeleton、根骨骼重定向器、UIKRigProcessor、": { - "currentFile": { - "count": 2, - "lastUpdated": 1699258527676 - } - } - }, - "使用最终finalyOffset": { - "使用最终finalyOffset": { - "currentFile": { - "count": 1, - "lastUpdated": 1699348533203 - } - } - }, - "Target": { - "Target": { - "currentFile": { - "count": 1, - "lastUpdated": 1699348599323 - } - } - }, - "遍历所有骨骼,复制SourceGlobalPose到CurrentGlobalTransforms;取得对应骨骼的Index以及父骨骼Index。(跳过根骨骼)": { - "遍历所有骨骼,复制SourceGlobalPose到CurrentGlobalTransforms;取得对应骨骼的Index以及父骨骼Index。(跳过根骨骼)": { - "currentFile": { - "count": 1, - "lastUpdated": 1699350988289 + "lastUpdated": 1703147471771 } } }, @@ -107,7 +59,15 @@ "c++17带来的代码变化": { "internalLink": { "count": 1, - "lastUpdated": 1699369392809 + "lastUpdated": 1703216792464 + } + } + }, + "AlbedoTint、CropEnabled、CropTranslation、CropRotation、CropExtent;texture_width、texture_position、texture_rotation、texture_scaleAndOpacity。": { + "AlbedoTint、CropEnabled、CropTranslation、CropRotation、CropExtent;texture_width、texture_position、texture_rotation、texture_scaleAndOpacity。": { + "currentFile": { + "count": 1, + "lastUpdated": 1703225931596 } } } diff --git a/.obsidian/plugins/workspaces-plus/data.json b/.obsidian/plugins/workspaces-plus/data.json index bc5b812..8b41768 100644 --- a/.obsidian/plugins/workspaces-plus/data.json +++ b/.obsidian/plugins/workspaces-plus/data.json @@ -2,5 +2,14 @@ "showInstructions": true, "showDeletePrompt": true, "saveOnSwitch": false, - "saveOnChange": false + "saveOnChange": false, + "workspaceSettings": false, + "systemDarkMode": false, + "globalSettings": {}, + "activeWorkspaceDesktop": "阅读模式", + "activeWorkspaceMobile": "", + "reloadLivePreview": false, + "workspaceSwitcherRibbon": false, + "modeSwitcherRibbon": false, + "replaceNativeRibbon": false } \ No newline at end of file diff --git a/.obsidian/workspaces.json b/.obsidian/workspaces.json index 33976eb..9b4444a 100644 --- a/.obsidian/workspaces.json +++ b/.obsidian/workspaces.json @@ -179,7 +179,8 @@ "direction": "horizontal", "width": 229 }, - "active": "c8875a5f97f4a74d" + "active": "c8875a5f97f4a74d", + "workspaces-plus:settings-v1": {} }, "阅读模式": { "main": { @@ -363,8 +364,9 @@ "direction": "horizontal", "width": 245 }, - "active": "0d255cdd69daa9bf" + "active": "0d255cdd69daa9bf", + "workspaces-plus:settings-v1": {} } }, - "active": "编辑模式" + "active": "阅读模式" } \ No newline at end of file diff --git a/02-Note/DAWA/AI偶像陪伴项目/AI虚拟偶像陪伴项目开发计划与阶段目标.md b/02-Note/DAWA/AI偶像陪伴项目/AI虚拟偶像陪伴 & AI虚拟直播间 开发计划与阶段目标.md similarity index 64% rename from 02-Note/DAWA/AI偶像陪伴项目/AI虚拟偶像陪伴项目开发计划与阶段目标.md rename to 02-Note/DAWA/AI偶像陪伴项目/AI虚拟偶像陪伴 & AI虚拟直播间 开发计划与阶段目标.md index b6c6d6c..cb784d4 100644 --- a/02-Note/DAWA/AI偶像陪伴项目/AI虚拟偶像陪伴项目开发计划与阶段目标.md +++ b/02-Note/DAWA/AI偶像陪伴项目/AI虚拟偶像陪伴 & AI虚拟直播间 开发计划与阶段目标.md @@ -1,3 +1,41 @@ +# AI虚拟直播间需求&任务整理 +## 实现目标&内容&工期 +>拿给EOE老板看的Demo版本 + +1. 控制AI行为以及数据交互的导播台应用(Electron + Server) + 1. 实现目标 + 1. =>AI服务器 + 1. 能够发送文字、语音数据以及其他指令到AI服务器。 + 2. 能够接收文字、音频、CSV、FBX数据。 + 2. =>渲染机器 + 1. 能够转发音频、CSV、FBX数据到渲染机中。 + 2. 实现内容:基础Electron用户界面以及对应的Server(第一版放在一起后续分离),工期 2周左右。 + 1. 转发数据逻辑 + 2. 用户界面 + 3. 未来TODO: + 1. 自定VMC协议用来传输音频等其他数据 +2. 渲染机(UE插件) + 1. 导播台应用=> + 1. 制作角色蓝图、动画蓝图以及状态机,可以播放音频、CSV、FBX。 + 2. 接收导播台(AI服务器的控制指令) + 2. 实现内容:编写插件实现对应的数据接收节点。 + +## 任务分配 +1. 楼嘉杰搭建基础原型 +2. Electron的UI美化&细化 => 前端 + 1. 搭建基础Electron的VUE模板。 + 2. 将项目传到公司Gitlab平台上。 +3. 插件配套场景搭建 =>杨萱羽 + 1. 从EOE场景里扣一个像直播舞台的场景。 + 2. + +## 存在问题 +- [x] ~~渲染机是否需要传递数据到导播台(渲染机干预导播台的一些行为)?~~ +- [x] 导播台除了文字、音频还有哪些数据会传递到AI端? + - 目前只有文字 +- [x] ChatGPT会过期。ChatGPT账号池已经完成,开发时可以手动续费。 + +# AI虚拟偶像陪伴 ## 阶段技术需求 & 实现目标 ### 第零阶段(快速简历可供AI迭代的基础程序) 1. Express Http服务器。 diff --git a/03-UnrealEngine/Editor/FBXAnimation导入逻辑.md b/03-UnrealEngine/Editor/FBXAnimation导入&导出逻辑.md similarity index 61% rename from 03-UnrealEngine/Editor/FBXAnimation导入逻辑.md rename to 03-UnrealEngine/Editor/FBXAnimation导入&导出逻辑.md index 9040e4e..52fe5a6 100644 --- a/03-UnrealEngine/Editor/FBXAnimation导入逻辑.md +++ b/03-UnrealEngine/Editor/FBXAnimation导入&导出逻辑.md @@ -111,4 +111,61 @@ UE5中FBXSDK相关函数调用方式: # 参考 1. Interchange\\Runtime\\Source\\Parsers 1. InterchangeFbxParser.Build.cs - 2. FbxInclude.h:FBXSDK头文件包含问题。 \ No newline at end of file + 2. FbxInclude.h:FBXSDK头文件包含问题。 + +# UE5中使用FBXSDK导出动画逻辑 +1. FFbxExporter::ExportSkeletalMeshToFbx => FFbxExporter::ExportAnimSequence => FFbxExporter::ExportAnimSequenceToFbx +2. FFbxExporter::CorrectAnimTrackInterpolation + +直接导出会有问题,所以UE在这里做了一步Correct: +```c++ +// The curve code doesn't differentiate between angles and other data, so an interpolation from 179 to -179 +// will cause the bone to rotate all the way around through 0 degrees. So here we make a second pass over the +// rotation tracks to convert the angles into a more interpolation-friendly format. +FFbxExporter::CorrectAnimTrackInterpolation() +{ +void FFbxExporter::CorrectAnimTrackInterpolation( TArray& BoneNodes, FbxAnimLayer* InAnimLayer ) +{ + // Add the animation data to the bone nodes + for(int32 BoneIndex = 0; BoneIndex < BoneNodes.Num(); ++BoneIndex) + { + FbxNode* CurrentBoneNode = BoneNodes[BoneIndex]; + + // Fetch the AnimCurves + FbxAnimCurve* Curves[3]; + Curves[0] = CurrentBoneNode->LclRotation.GetCurve(InAnimLayer, FBXSDK_CURVENODE_COMPONENT_X, true); + Curves[1] = CurrentBoneNode->LclRotation.GetCurve(InAnimLayer, FBXSDK_CURVENODE_COMPONENT_Y, true); + Curves[2] = CurrentBoneNode->LclRotation.GetCurve(InAnimLayer, FBXSDK_CURVENODE_COMPONENT_Z, true); + + for(int32 CurveIndex = 0; CurveIndex < 3; ++CurveIndex) + { + FbxAnimCurve* CurrentCurve = Curves[CurveIndex]; + CurrentCurve->KeyModifyBegin(); + + float CurrentAngleOffset = 0.f; + for(int32 KeyIndex = 1; KeyIndex < CurrentCurve->KeyGetCount(); ++KeyIndex) + { + float PreviousOutVal = CurrentCurve->KeyGetValue( KeyIndex-1 ); + float CurrentOutVal = CurrentCurve->KeyGetValue( KeyIndex ); + + float DeltaAngle = (CurrentOutVal + CurrentAngleOffset) - PreviousOutVal; + + if(DeltaAngle >= 180) + { + CurrentAngleOffset -= 360; + } + else if(DeltaAngle <= -180) + { + CurrentAngleOffset += 360; + } + + CurrentOutVal += CurrentAngleOffset; + + CurrentCurve->KeySetValue(KeyIndex, CurrentOutVal); + } + + CurrentCurve->KeyModifyEnd(); + } + } +} +``` diff --git a/03-UnrealEngine/Gameplay/Http/UE5 Http相关.md b/03-UnrealEngine/Gameplay/Http/UE5 Http相关.md new file mode 100644 index 0000000..0e24592 --- /dev/null +++ b/03-UnrealEngine/Gameplay/Http/UE5 Http相关.md @@ -0,0 +1,17 @@ +--- +title: UE5 Http相关 +date: 2023-11-30 21:31:42 +excerpt: +tags: +rating: ⭐ +--- + +# 案例 +https://dev.epicgames.com/community/learning/tutorials/ZdXD/call-rest-api-using-http-json-from-ue5-c + +# Base64 +https://zhuanlan.zhihu.com/p/344540241 + +# Gzip +https://forums.unrealengine.com/t/why-does-fcompression-not-support-gzip-for-uncompressmemory/357255/3 +https://github.com/gtreshchev/RuntimeArchiver \ No newline at end of file diff --git a/03-UnrealEngine/Rendering/3D高斯/Sibr相关笔记.md b/03-UnrealEngine/Rendering/3D高斯/Sibr相关笔记.md new file mode 100644 index 0000000..0d7446e --- /dev/null +++ b/03-UnrealEngine/Rendering/3D高斯/Sibr相关笔记.md @@ -0,0 +1,73 @@ +--- +title: 未命名 +date: 2023-12-29 16:20:43 +excerpt: +tags: +rating: ⭐ +--- + +# 前言 +- 文档:https://sibr.gitlabpages.inria.fr +- 代码:https://gitlab.inria.fr/sibr/sibr_core + - 案例代码 + - [renderer/SimpleView.hpp](https://gitlab.inria.fr/mbenadel/sibr_simple/-/blob/master/renderer/SimpleView.hpp)&[renderer/SimpleView.cpp](https://gitlab.inria.fr/mbenadel/sibr_simple/-/blob/master/renderer/SimpleView.cpp) + - [renderer/SimpleRenderer.hpp](https://gitlab.inria.fr/mbenadel/sibr_simple/-/blob/master/renderer/SimpleRenderer.hpp)&[renderer/SimpleRenderer.cpp](https://gitlab.inria.fr/mbenadel/sibr_simple/-/blob/master/renderer/SimpleRenderer.cpp) + - [Simple SIBR Project](https://gitlab.inria.fr/sibr/projects/simple)  + - [SIBR/OptiX integration example](https://sibr.gitlabpages.inria.fr/docs/0.9.6/optixPage.html) + - [Tensorflow/OpenGL Interop for SIBR](https://sibr.gitlabpages.inria.fr/docs/0.9.6/tfgl_interopPage.html) + - Shader:需要将你自己编写的Shader放入**renderer/shaders**文件夹中 +- 关键词: + - Structure-from-Motion (SfM) + - Multi-View Stereo (MVS) + +## 功能 +https://sibr.gitlabpages.inria.fr/docs/0.9.6/projects.html + +- [Sample algorithms & toolboxes](https://sibr.gitlabpages.inria.fr/docs/0.9.6/sibr_projects_samples.html) + - [Dataset Preprocessing Tools](https://sibr.gitlabpages.inria.fr/docs/0.9.6/sibr_projects_dataset_tools.html) ([https://gitlab.inria.fr/sibr/sibr_core](https://gitlab.inria.fr/sibr/sibr_core)) + - [Unstructured Lumigraph Rendering (ULR)](https://sibr.gitlabpages.inria.fr/docs/0.9.6/ulrPage.html) ([https://gitlab.inria.fr/sibr/sibr_core](https://gitlab.inria.fr/sibr/sibr_core)) +- [Our algorithms](https://sibr.gitlabpages.inria.fr/docs/0.9.6/sibr_projects_ours.html) + - [Exploiting Repetitions for IBR of Facades](https://sibr.gitlabpages.inria.fr/docs/0.9.6/facade_repetitionsPage.html) ([https://gitlab.inria.fr/sibr/projects/facades-repetitions/facade_repetitions](https://gitlab.inria.fr/sibr/projects/facades-repetitions/facade_repetitions)) (Exploiting Repetitions for IBR of Facades (paper reference :[http://www-sop.inria.fr/reves/Basilic/2018/RBDD18/](http://www-sop.inria.fr/reves/Basilic/2018/RBDD18/))) + - [Deep Blending for Free-Viewpoint Image-Based Rendering – Scalable Inside-Out Image-Based Rendering](https://sibr.gitlabpages.inria.fr/docs/0.9.6/inside_out_deep_blendingPage.html) ([https://gitlab.inria.fr/sibr/projects/inside_out_deep_blending](https://gitlab.inria.fr/sibr/projects/inside_out_deep_blending)) (Deep Blending for Free-Viewpoint Image-Based Rendering, paper references: [http://www-sop.inria.fr/reves/Basilic/2018/HPPFDB18/](http://www-sop.inria.fr/reves/Basilic/2018/HPPFDB18/) , [http://visual.cs.ucl.ac.uk/pubs/deepblending/](http://visual.cs.ucl.ac.uk/pubs/deepblending/) ; Scalable Inside-Out Image-Based Rendering, paper references: [http://www-sop.inria.fr/reves/Basilic/2016/HRDB16](http://www-sop.inria.fr/reves/Basilic/2016/HRDB16) , [http://visual.cs.ucl.ac.uk/pubs/insideout/](http://visual.cs.ucl.ac.uk/pubs/insideout/) ) + - [Multi-view relighting using a geometry-aware network](https://sibr.gitlabpages.inria.fr/docs/0.9.6/outdoorRelightingPage.html) ([https://gitlab.inria.fr/sibr/projects/outdoor_relighting](https://gitlab.inria.fr/sibr/projects/outdoor_relighting)) (Multi-view Relighting Using a Geometry-Aware Network; paper reference ([https://www-sop.inria.fr/reves/Basilic/2019/PGZED19/](https://www-sop.inria.fr/reves/Basilic/2019/PGZED19/)) ) + - [Image-Based Rendering of Cars using Semantic Labels and Approximate Reflection Flow](https://sibr.gitlabpages.inria.fr/docs/0.9.6/semantic_reflectionsPage.html) ([https://gitlab.inria.fr/sibr/projects/semantic-reflections/semantic_reflections](https://gitlab.inria.fr/sibr/projects/semantic-reflections/semantic_reflections)) (Image-Based Rendering of Cars using Semantic Labels and Approximate Reflection Flow (paper reference : [http://www-sop.inria.fr/reves/Basilic/2020/RPHD20/](http://www-sop.inria.fr/reves/Basilic/2020/RPHD20/))) + - [Depth Synthesis and Local Warps for plausible image-based navigation - Bayesian approach for selective image-based rendering using superpixels](https://sibr.gitlabpages.inria.fr/docs/0.9.6/spixelwarpPage.html) ([https://gitlab.inria.fr/sprakash/spixelwarp](https://gitlab.inria.fr/sprakash/spixelwarp)) (Depth Synthesis and Local Warps for plausible image-based navigation, paper reference: [http://www-sop.inria.fr/reves/Basilic/2013/CDSD13/](http://www-sop.inria.fr/reves/Basilic/2013/CDSD13/) ; Bayesian approach for selective image-based rendering using superpixels, paper reference: [http://www-sop.inria.fr/reves/Basilic/2015/ODD15/](http://www-sop.inria.fr/reves/Basilic/2015/ODD15/) )) + - [Glossy Probe Reprojection for Interactive Global Illumination](https://sibr.gitlabpages.inria.fr/docs/0.9.6/synthetic_ibrPage.html) ([https://gitlab.inria.fr/sibr/projects/glossy-probes/synthetic_ibr](https://gitlab.inria.fr/sibr/projects/glossy-probes/synthetic_ibr)) (Glossy Probe Reprojection for Interactive Global Illumination (paper reference : [http://www-sop.inria.fr/reves/Basilic/2020/RLPWSD20/](http://www-sop.inria.fr/reves/Basilic/2020/RLPWSD20/))) +- [Other algorithms](https://sibr.gitlabpages.inria.fr/docs/0.9.6/sibr_projects_others.html) + - [Soft3D](https://sibr.gitlabpages.inria.fr/docs/0.9.6/soft3dPage.html) ([https://gitlab.inria.fr/sibr/projects/soft3d](https://gitlab.inria.fr/sibr/projects/soft3d)) (Soft 3D Reconstruction for View Synthesis (paper reference : [https://ericpenner.github.io/soft3d/](https://ericpenner.github.io/soft3d/))) +- [Integrated toolboxes](https://sibr.gitlabpages.inria.fr/docs/0.9.6/sibr_projects_toolbox.html) + - [Core framework of FRIBR](https://sibr.gitlabpages.inria.fr/docs/0.9.6/fribrFrameworkPage.html) ([https://gitlab.inria.fr/sibr/fribr_framework](https://gitlab.inria.fr/sibr/fribr_framework)) (Core framework of FRIBR) + - [SIBR/OptiX integration example](https://sibr.gitlabpages.inria.fr/docs/0.9.6/optixPage.html) ([https://gitlab.inria.fr/sibr/projects/optix](https://gitlab.inria.fr/sibr/projects/optix)) (SIBR/OptiX integration example) + - [Simple SIBR Project](https://sibr.gitlabpages.inria.fr/docs/0.9.6/simplePage.html) ([https://gitlab.inria.fr/sibr/projects/simple](https://gitlab.inria.fr/sibr/projects/simple)) (A simple sample SIBR project for you to base your projects on) + - [Tensorflow/OpenGL Interop for SIBR](https://sibr.gitlabpages.inria.fr/docs/0.9.6/tfgl_interopPage.html) ([https://gitlab.inria.fr/sibr/tfgl_interop](https://gitlab.inria.fr/sibr/tfgl_interop)) (Tensorflow GL interoperability dependencies and cuda code) + +- [示例算法和工具箱](https://sibr.gitlabpages.inria.fr/docs/0.9.6/sibr_projects_samples.html) + - [数据集预处理工具](https://sibr.gitlabpages.inria.fr/docs/0.9.6/sibr_projects_dataset_tools.html)([https://gitlab.inria.fr/sibr/sibr_core](https://gitlab.inria.fr/sibr/sibr_core)) + - [非结构化 Lumigraph 渲染 (ULR)](https://sibr.gitlabpages.inria.fr/docs/0.9.6/ulrPage.html) ( [https://gitlab.inria.fr/sibr/sibr_core](https://gitlab.inria.fr/sibr/sibr_core) ) +- [我们的算法](https://sibr.gitlabpages.inria.fr/docs/0.9.6/sibr_projects_ours.html) + - [Exploiting Repetitions for IBR of Facades](https://sibr.gitlabpages.inria.fr/docs/0.9.6/facade_repetitionsPage.html) ( [https://gitlab.inria.fr/sibr/projects/facades-repetitions/facade_repetitions](https://gitlab.inria.fr/sibr/projects/facades-repetitions/facade_repetitions) ) (Exploiting Repetitions for IBR of Facades (论文参考: http: [//www-sop.inria.fr/里夫/巴西利克/2018/RBDD18/](http://www-sop.inria.fr/reves/Basilic/2018/RBDD18/))) + - [用于基于自由视点图像的渲染的深度混合 – 可扩展的由内而外基于图像的渲染](https://sibr.gitlabpages.inria.fr/docs/0.9.6/inside_out_deep_blendingPage.html)( [https://gitlab.inria.fr/sibr/projects/inside_out_deep_blending](https://gitlab.inria.fr/sibr/projects/inside_out_deep_blending) ) (用于基于自由视点图像的渲染的深度混合,论文参考:[http://www-sop.inria.fr/reves/Basilic/2018/HPPFDB18/,http](http://www-sop.inria.fr/reves/Basilic/2018/HPPFDB18/) : [//visual.cs.ucl.ac.uk/pubs/deepblending/](http://visual.cs.ucl.ac.uk/pubs/deepblending/);可扩展的由内而外基于图像的渲染,论文参考:[http://www-sop.inria.fr/reves/Basilic/2016/HRDB16,http](http://www-sop.inria.fr/reves/Basilic/2016/HRDB16) : [//visual.cs.ucl.ac.uk/pubs/insideout/](http://visual.cs.ucl.ac.uk/pubs/insideout/)) + - [使用几何感知网络的多视图重新照明](https://sibr.gitlabpages.inria.fr/docs/0.9.6/outdoorRelightingPage.html)( [https://gitlab.inria.fr/sibr/projects/outdoor_relighting](https://gitlab.inria.fr/sibr/projects/outdoor_relighting) )(使用几何感知网络的多视图重新照明;论文参考 ( [https://www-sop. inria.fr/reves/Basilic/2019/PGZED19/](https://www-sop.inria.fr/reves/Basilic/2019/PGZED19/) ) ) + - [使用语义标签和近似反射流的基于图像的汽车渲染](https://sibr.gitlabpages.inria.fr/docs/0.9.6/semantic_reflectionsPage.html)([https://gitlab.inria.fr/sibr/projects/semantic-reflections/semantic_reflections](https://gitlab.inria.fr/sibr/projects/semantic-reflections/semantic_reflections))(使用语义标签和近似反射流的基于图像的汽车渲染(论文参考:[http://www-sop.inria.fr/reves/Basilic/2020/RPHD20/](http://www-sop.inria.fr/reves/Basilic/2020/RPHD20/))) + - [用于合理的基于图像的导航的深度合成和局部扭曲 - 使用超像素进行选择性基于图像的渲染的贝叶斯方法](https://sibr.gitlabpages.inria.fr/docs/0.9.6/spixelwarpPage.html)([https://gitlab.inria.fr/sprakash/spixelwarp](https://gitlab.inria.fr/sprakash/spixelwarp))(用于合理的基于图像的导航的深度合成和局部扭曲,论文参考:[http://www-sop.inria.fr/reves/Basilic/2013/CDSD13/](http://www-sop.inria.fr/reves/Basilic/2013/CDSD13/);使用超像素进行选择性基于图像渲染的贝叶斯方法,论文参考: http: [//www-sop.inria.fr/里夫/巴西利克/2015/ODD15/](http://www-sop.inria.fr/reves/Basilic/2015/ODD15/))) + - [用于交互式全局照明的光泽探针重投影](https://sibr.gitlabpages.inria.fr/docs/0.9.6/synthetic_ibrPage.html)([https://gitlab.inria.fr/sibr/projects/glossy-probes/synthetic_ibr](https://gitlab.inria.fr/sibr/projects/glossy-probes/synthetic_ibr))(用于交互式全局照明的光泽探针重投影(论文参考:[http://www-sop.inria。 fr/reves/Basilic/2020/RLPWSD20/](http://www-sop.inria.fr/reves/Basilic/2020/RLPWSD20/) )) +- [其他算法](https://sibr.gitlabpages.inria.fr/docs/0.9.6/sibr_projects_others.html) + - [Soft3D](https://sibr.gitlabpages.inria.fr/docs/0.9.6/soft3dPage.html)([https://gitlab.inria.fr/sibr/projects/soft3d](https://gitlab.inria.fr/sibr/projects/soft3d))(用于视图合成的软3D重建(论文参考: https: [//ericpenner.github.io/soft3d/](https://ericpenner.github.io/soft3d/))) +- [集成工具箱](https://sibr.gitlabpages.inria.fr/docs/0.9.6/sibr_projects_toolbox.html) + - [FRIBR核心框架](https://sibr.gitlabpages.inria.fr/docs/0.9.6/fribrFrameworkPage.html)([https://gitlab.inria.fr/sibr/fribr_framework)(FRIBR](https://gitlab.inria.fr/sibr/fribr_framework)核心框架) + - [SIBR/OptiX 集成示例](https://sibr.gitlabpages.inria.fr/docs/0.9.6/optixPage.html)( [https://gitlab.inria.fr/sibr/projects/optix](https://gitlab.inria.fr/sibr/projects/optix) )(SIBR/OptiX 集成示例) + - [简单 SIBR 项目](https://sibr.gitlabpages.inria.fr/docs/0.9.6/simplePage.html)( [https://gitlab.inria.fr/sibr/projects/simple](https://gitlab.inria.fr/sibr/projects/simple) )(一个简单的示例 SIBR 项目,供您作为项目的基础) + - [SIBR 的 Tensorflow/OpenGL 互操作](https://sibr.gitlabpages.inria.fr/docs/0.9.6/tfgl_interopPage.html)([https://gitlab.inria.fr/sibr/tfgl_interop)(Tensorflow](https://gitlab.inria.fr/sibr/tfgl_interop) GL 互操作性依赖项和 cuda 代码) + +项目结构: +- `renderer/`: contains your library code and configuration +- `preprocess/`: contains your preprocesses listed by directory, and the configuration CMake file to list them +- `apps/`: contains your apps listed by directory, and the configuration CMake file to list them +- `documentation/`: contains additional doxygen documentation + +# SIBR数据集创建方式 +**SIBR**本身定义了一种数据格式 + +可以使用**RealityCapture**或者**Colmap**创建原生的SIBR数据集,也可以根据文档使用SFM或者MVS系统创建兼容数据集合。 +- [如何从 Reality Capture 创建数据集](https://sibr.gitlabpages.inria.fr/docs/0.9.6/HowToCapreal.html) +- [如何从 Colmap 创建数据集](https://sibr.gitlabpages.inria.fr/docs/0.9.6/HowToColmap.html) \ No newline at end of file diff --git a/03-UnrealEngine/Rendering/RenderFeature/LOD相关代码笔记.md b/03-UnrealEngine/Rendering/RenderFeature/LOD相关代码笔记.md new file mode 100644 index 0000000..cc1517c --- /dev/null +++ b/03-UnrealEngine/Rendering/RenderFeature/LOD相关代码笔记.md @@ -0,0 +1,20 @@ +--- +title: LOD相关代码笔记 +date: 2023-12-28 15:13:44 +excerpt: +tags: +rating: ⭐ +--- +# UE5中的带有LOD名称的相关代码 +- Plugin + - Mesh LOD Toolset + - `Engine\Plugins\Experimental\MeshLODToolset\Source\MeshLODToolset\Public\Graphs\GenerateStaticMeshLODProcess.h` + - Proxy LOD Plugin + - `Engine\Plugins\Experimental\ProxyLODPlugin\Source\ProxyLOD\PrivateProxyLODPlugin.cpp` + - Skeletal Mesh Simplifier + - `Engine\Plugins\Experimental\SkeletalReduction\Source\Private\SkeletalMeshReductionPlugin.cpp` + +# UGenerateStaticMeshLODProcess +- FGenerateStaticMeshLODAssetOperatorOp + - FGenerateStaticMeshLODAssetOperatorOp::CalculateResult() + - void FGenerateMeshLODGraph::EvaluateResult \ No newline at end of file diff --git a/03-UnrealEngine/Rendering/RenderFeature/UE5 3DGaussians 插件笔记.md b/03-UnrealEngine/Rendering/RenderFeature/UE5 3DGaussians 插件笔记.md new file mode 100644 index 0000000..c1f3481 --- /dev/null +++ b/03-UnrealEngine/Rendering/RenderFeature/UE5 3DGaussians 插件笔记.md @@ -0,0 +1,111 @@ +--- +title: UE5 3DGaussians 插件笔记 +date: 2023-12-22 11:44:33 +excerpt: +tags: +rating: ⭐ +--- + +# c++ +插件的c++部分主要实现了 +- FThreeDGaussians——可以理解为一个场景或者根节点 + - FThreeDGaussiansTree——类似BVH的空间切分树 + - FThreeDGaussiansData——具体数据 +- ply点云文件导入,流程如下 + - FThreeDGaussiansImporterModule::PluginButtonClicked() + - LoadPly(),载入`TArray`数据。 + - 进行排序 + - 初始化一个`TArray unsorted`并且进行排序。 + - 取得各种排序用参数DO_SPLIT_BY_3D_MORTON_ORDER、DO_SPLIT_BY_DISTANCE、MAX_TEXTURE_WIDHT、MAX_NUM_PARTICLES + - 采用莫顿码分割法、距离排序法。 + - 莫顿码分割法:使用莫顿码进行排序,之后进行空间分割,构建一个三维加速结构。当当前区域点云数量小于MAX_NUM_PARTICLES后调用CreateDatum()。 + - 距离排序法:根据Position上三个分量中最大绝对值进行排序,之后调用CreateDatum()。 + - CreateDatum() + - Sort3dMortonOrder()排序。 + - CreateExr()创建Exr Texture文件。 + - 将上一步创建的文件导入UE。 + - CreateActorBpSubclass(),创建3DGaussians蓝图Actor,并且查找SetData函数并且将数据塞入。 + +## FThreeDGaussians代码 +```c++ +struct FThreeDGaussiansData +{ + GENERATED_BODY() +public: + FThreeDGaussiansData() {} + FThreeDGaussiansData(const TArray& textures, const FVector3f& in_minPos, const FVector3f& in_maxPos) + { + minPos = in_minPos; + maxPos = in_maxPos; + textureWidth = textures[0]->GetSizeX(); + position = textures[0]; + rotation = textures[1]; + scaleAndOpacity = textures[2]; + + for (int i = 3; i < textures.Num(); i++) + { + sh.Add(textures[i]); + } + } + + UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "3D Gaussians") FVector3f minPos = FVector3f::Zero(); + UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "3D Gaussians") FVector3f maxPos = FVector3f::Zero(); + UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "3D Gaussians") int32 textureWidth = -1; + UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "3D Gaussians") UTexture2D* position; + UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "3D Gaussians") UTexture2D* rotation; + UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "3D Gaussians") UTexture2D* scaleAndOpacity; + UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "3D Gaussians") TArray sh; +}; + +/** 类似BVH的控件数据结构 */ +USTRUCT(BlueprintType) +struct FThreeDGaussiansTree +{ + GENERATED_BODY() +public: + FThreeDGaussiansTree() {} + + // Axis for split (x=0, y=1, z=2) + UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "3D Gaussians") int32 splitAxis = -1; + // max value of the position of gaussian in child0 or leaf0 in "splitAxis" axis + UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "3D Gaussians") float splitValue = 0.0f; + + // index of child tree node (Index of TArray tree) + UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "3D Gaussians") int32 childIndex0 = -1; + UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "3D Gaussians") int32 childIndex1 = -1; + + // index of child data node (Index of TArray data) + UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "3D Gaussians") int32 leafIndex0 = -1; + UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "3D Gaussians") int32 leafIndex1 = -1; +}; + +/* 作为3D高斯数据的载荷 */ +USTRUCT(BlueprintType) +struct FThreeDGaussians +{ + GENERATED_BODY() +public: + FThreeDGaussians() {} + + UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "3D Gaussians") TArray data; + UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "3D Gaussians") TArray tree; +}; +``` + +# BP_3D_Gaussians_Base +- BeginPlay:判断三维加速结构是否还子节点,如果有则开启Tick进行排序。 +- Tick:根据摄像机位置对三维加速结构进行排序。 +- ConstructionScript: + 1. 添加Niagara粒子组件,一个FThreeDGaussiansData生成一个粒子组件。 + 2. 设置Niagara资产:NS_3D_Gaussians_sh0_mesh(勾选mesh选项)、NS_3D_Gaussians_sh0(SH角度)、NS_3D_Gaussians_sh1、NS_3D_Gaussians_sh2、NS_3D_Gaussians_sh3 + 3. 设置粒子材质属性: + 1. AlbedoTint + 2. 剔除设置:CropEnabled、CropTranslation、CropRotation、CropExtent + 3. 数据贴图(FThreeDGaussiansData):texture_width、texture_position、texture_rotation、texture_scaleAndOpacity。 + 4. SH数据贴图(FThreeDGaussiansData):根据角度设置Niagara里texture_sh_X的贴图。 + 5. 社会中剔除空间 CropTranslations、CropRotators、CropExtents、KillTranslations、KillRotators、KillExtents。 + +# Niagara +剔除方式: + +## 材质 diff --git a/03-UnrealEngine/Rendering/RenderingPipeline/ShaderModel添加.md b/03-UnrealEngine/Rendering/RenderingPipeline/ShaderModel添加.md index bae45d9..7aa4a0e 100644 --- a/03-UnrealEngine/Rendering/RenderingPipeline/ShaderModel添加.md +++ b/03-UnrealEngine/Rendering/RenderingPipeline/ShaderModel添加.md @@ -113,4 +113,27 @@ else if(ShadingModel == SHADINGMODELID_NPRSHADING) ## 光照实现 - ShadingModel.ush,BxDF实现。 -- DeferredLightingCommon.ush,延迟光照实现。 \ No newline at end of file +- DeferredLightingCommon.ush,延迟光照实现。 + +# UE5版本 +- 5.2 https://zhuanlan.zhihu.com/p/658700282 +- 5.1 https://zhuanlan.zhihu.com/p/565837897 + +# 相关文件 +- UMaterialInterface、UMaterial、UMaterialInstance、FMaterial、FMaterialResource、FMateialRenderProxy、FMaterialInstanceResource、FDefaultMaterialInstance +- MaterialShader.cpp +- HLSLMaterialTranslator.cpp +- MaterialHLSLEmitter:塞入ShaderModel宏 +- Material.cpp:开启引脚 +- MaterialAttributeDefinitionMap.cpp +- ShaderMaterialDerivedHelpers.cpp +- ShaderGenerationUtil.cpp +- ShadingCommon.ush + +# Material编辑器相关代码 +- FMaterialInstanceBasePropertyOverrides +- FMaterialInstanceParameterDetails + + +https://zhuanlan.zhihu.com/p/565776677 +https://www.cnblogs.com/timlly/p/15109132.html \ No newline at end of file diff --git a/03-UnrealEngine/Rendering/Shader/UE GBuffer存储数据.md b/03-UnrealEngine/Rendering/Shader/UE GBuffer存储数据.md index f300cf3..b1580d3 100644 --- a/03-UnrealEngine/Rendering/Shader/UE GBuffer存储数据.md +++ b/03-UnrealEngine/Rendering/Shader/UE GBuffer存储数据.md @@ -21,7 +21,7 @@ https://zhuanlan.zhihu.com/p/400677108 ## Depth Stencil [0] sandbox bit (bit to be use by any rendering passes, but must be properly reset to 0 after using) STENCIL LOD抖动过渡,后处理,贴花,Local光源都有使用,用来绘制临时Mask -**[1] unallocated** +**[001] unallocated** **[2] unallocated** [3] Temporal AA mask for translucent object. 标记后TAA将不再读取History数据,用来处理半透物体的TAA问题 diff --git a/03-UnrealEngine/卡通渲染相关资料/卡通渲染开发总览.md b/03-UnrealEngine/卡通渲染相关资料/卡通渲染开发总览.md index 2633d44..168b533 100644 --- a/03-UnrealEngine/卡通渲染相关资料/卡通渲染开发总览.md +++ b/03-UnrealEngine/卡通渲染相关资料/卡通渲染开发总览.md @@ -12,7 +12,7 @@ rating: ⭐ - 参考对象分析 - 星穹铁道 - 破晓传说 - - 蓝色协议 + - 蓝色协议3. - 非人学园2 - 少女前线2 追放 - 明日方舟终末地 @@ -29,6 +29,13 @@ rating: ⭐ 5. 实现一波Anti-Lut。 6. Toon曝光偏移、模仿李兄的暗处的ShadowColor适配。 +# 渲染功能兼容的游戏 +- [ ] 罪恶装备 +- [ ] 原神 +- [ ] 星穹铁道 +- [ ] 绝区零 +- [ ] 蓝色协议 + # 卡通渲染引擎功能总览(按照渲染顺序进行排序) - GBuffer For ToonShaderModel - [[GBuffer&Material&BasePass]] @@ -102,7 +109,6 @@ rating: ⭐ - [ ] 丝袜 https://zhuanlan.zhihu.com/p/636157482 [[厚涂风格研究与开发笔记]] - - ShaderModel - 分阶着色 - 二阶化: diff --git a/03-UnrealEngine/卡通渲染相关资料/渲染功能/ShaderModel/GBuffer&Material&BasePass.md b/03-UnrealEngine/卡通渲染相关资料/渲染功能/ShaderModel/GBuffer&Material&BasePass.md index 95cd8db..3188d14 100644 --- a/03-UnrealEngine/卡通渲染相关资料/渲染功能/ShaderModel/GBuffer&Material&BasePass.md +++ b/03-UnrealEngine/卡通渲染相关资料/渲染功能/ShaderModel/GBuffer&Material&BasePass.md @@ -1,73 +1,73 @@ --- -title: Material&BasePass +title: GBuffer&Material&BasePass date: 2023-12-08 17:34:58 excerpt: tags: rating: ⭐ --- -# GBuffer + +# # GBuffer +UE5 GBuffer内容: +[[UE GBuffer存储数据]] ```c# OutGBufferA = WorldNormal/PerObjectGBufferData OutGBufferB = Metallic/Specular/Roughness/EncodeShadingModelIdAndSelectiveOutputMask(GBuffer.ShadingModelID, GBuffer.SelectiveOutputMask); OutGBufferC = BaseColor/GBufferAO OutGBufferD = GBuffer.CustomData; OutGBufferE = GBuffer.PrecomputedShadowFactors; + +// 0..1, 2 bits, use CastContactShadow(GBuffer) or HasDynamicIndirectShadowCasterRepresentation(GBuffer) to extract +half PerObjectGBufferData; ``` + +ToonGBuffer修改&数据存储: ```c# -GBufferB:Metallic/Specular/Roughness=>ToonHairMask OffsetShadowMask/SpcularMask/SpecularValue -OutGBufferD = CustomData.xyzw=》ShaderColor.rgb/NoL -OutGBufferE = GBuffer.PrecomputedShadowFactors.xyzw=》 /RimLightMask/DiffuseOffset/RimLightWidth -OutGBufferF = velocity => OutlineWidth/OutlineID/OutlinePaint/OutlineZShift +OutGBufferA:PerObjectGBufferData => 可以存储额外的有关Tonn渲染功能参数。 +OutGBufferB:Metallic/Specular/Roughness => + ? / SpcularMask(控制高光形状与Mask) / ? / ? + //ToonHairMask OffsetShadowMask/SpcularMask/SpecularValue +OutGBufferD:CustomData.xyzw => + ShaderColor.rgb / NoLOffset //ShadowColor这里可以在Material里通过主光向量、ShadowStep、Shadow羽化计算多层阴影效果。 +OutGBufferE:GBuffer.PrecomputedShadowFactors.xyzw => + ToonDataID/ ToonObjectID(判断是否是一个物体) /OutlineMask 控制Outline绘制以及Outline强度 / ToonAO +OutGBufferF:velocity => + ? / ? / ? / ? ``` -``` -| GBuffer | 表头 | -| -------- | ------------------------------------------------------------------------------------- | -| GBufferB | OffsetShadowMask SpcularMask SpecularValue EncodeShadingModelIdAndSelectiveOutputMask | -| GBufferD | ShaderColor.rgb NoL | -| GBufferE | | -| GBufferF | ID | -``` +蓝色协议的方案 +![[蓝色协议的方案#GBuffer]] -## BaseColor与ShadowColor -- 原神里ShadowColor还会接收其他物体的阴影投射,没有自投影;蓝色协议可能也没有自投影。 - -BaseColor与ShadowColor的过渡需要Step、Feather、Offset等参数,可以直接制作一个HalfLambert的渐变贴图之后使用View传递。因为有多个贴图所以还需要ID贴图指定。但这样需要考虑一个问题: +## Toon PerObjectGBufferData具体功能表 +从3开始,0、1、2已被占用。 +- ? -- 一个物体上的同一个ID区域的BaseColor与ShadowColor是否都是一样的 -- 如果不一样就需要再传递一个ShadowColor.rgb到GBuffer里。 -- 不管如何手绘的补充暗部也是需要加到GBuffer中的 - -这决定传递到View里面的渐变贴图是彩色还是暗色 +## ToonData -### 预计算贴图方案(构想) -Toon渲染一般会使用HalfLambda。之后使用Feather、Step等参数对过渡边界进行调整 -使用 渐变贴图查表来实现 渐变、二阶化。以此代替羽化、step等参数。 -使用ID贴图指定,或者通过BaseColor值来查询? ## 高光 - PBR高光(使用Roughness控制是否可行?是否需要传入GBuffer一个Mask贴图) - 自定义高光:高光贴图、高光颜色、参数化高光形状、多层高光 -## 描边 -- 原神的描边好像是后处理 -- 蓝色协议 -![[08-Assets/Images/ImageBag/UrealEngineNPR/原神_描边.png]] -![[08-Assets/Images/ImageBag/UrealEngineNPR/原神截图_描边.png]] - -TODO:考虑使用顶点色来控制宽度,使用顶点色G - -## 顶点色 +# 顶点色 +## 蓝色协议 用于存储一些低精度数据,插值即可 -- R: -- G:描边宽度 -- B: +- 顶点色: + - R:阴影区域控制(强度) 0~1 + - G:描边宽度 + - B:ToonAO +- 第二套顶点色(UV Channel1): + - R:深度偏移 + - G:用来区分内轮廓不同部位的ID -蓝色协议的R:阴影区域标记 与 B:Ao,而罪恶装备使用贴图来传递信息。 - -## lightmap -### 罪恶装备 -![](https://pic2.zhimg.com/80/v2-56012886fafbaf36932f03b0ad65a165_720w.jpg),G为阴影控(AO),R为高光强度参数,金属和光滑材质的部分设置的更大一些。B通道:用于照明控制。最大值为高光,反之,值越小高光越淡。![](https://pic4.zhimg.com/80/v2-748ebbdd4da3efe74054c8215be8b023_720w.jpg) +蓝色协议的R:阴影区域标记 与 B:AO,而罪恶装备使用贴图来传递信息。 +## 罪恶装备 +对阴影判断阈值的偏移。(见前面着色部分,顶点AO+手绘修正) +R:阴影偏移 +G:轮廓线根据与相机的距离扩大多少的系数 +B:等高线 Z 轴偏移值 + +# 罪恶装备 +![](https://pic2.zhimg.com/80/v2-56012886fafbaf36932f03b0ad65a165_720w.jpg)8,G为阴影控(AO),R为高光强度参数,金属和光滑材质的部分设置的更大一些。B通道:用于照明控制。最大值为高光,反之,值越小高光越淡。![](https://pic4.zhimg.com/80/v2-748ebbdd4da3efe74054c8215be8b023_720w.jpg) ![](https://pic2.zhimg.com/80/v2-74e1a9fba264af2b18e66616d9f86831_720w.jpg) https://zhuanlan.zhihu.com/p/360229590一文中介绍了崩坏3与原神的计算方式 @@ -110,7 +110,7 @@ G : 轮廓线根据与相机的距离扩大多少的系数 B : 等高线 Z 轴偏移值 A : 轮廓厚度系数。0.5为标准,1为最大厚度,0为无等高线 -### 蓝色协议 +# 蓝色协议 [[蓝色协议的方案]] -### 米哈游 +# 米哈游 diff --git a/03-UnrealEngine/卡通渲染相关资料/渲染功能/ShaderModel/ToonShaderModel.md b/03-UnrealEngine/卡通渲染相关资料/渲染功能/ShaderModel/ToonShaderModel.md new file mode 100644 index 0000000..781b4a1 --- /dev/null +++ b/03-UnrealEngine/卡通渲染相关资料/渲染功能/ShaderModel/ToonShaderModel.md @@ -0,0 +1,12 @@ +--- +title: ToonShaderModel +date: 2023-12-18 10:00:34 +excerpt: +tags: +rating: ⭐ +--- + +# ToonStandard(Cel打底) + + +# 厚涂 ShaderModel(通过修改预积分ShaderModel) \ No newline at end of file diff --git a/03-UnrealEngine/卡通渲染相关资料/渲染功能/描边/描边.md b/03-UnrealEngine/卡通渲染相关资料/渲染功能/描边/描边.md index 9930683..414b970 100644 --- a/03-UnrealEngine/卡通渲染相关资料/渲染功能/描边/描边.md +++ b/03-UnrealEngine/卡通渲染相关资料/渲染功能/描边/描边.md @@ -9,6 +9,11 @@ rating: ⭐ - 后处理描边 - MeshDraw描边 +# 其他游戏做法 +## 蓝色协议 +采用后处理、Backface以及预绘制描边。 +[[蓝色协议的方案#轮廓]] + # 杂项 ## 李兄实现Outline思路 ### Depth与Normal描边 diff --git a/03-UnrealEngine/卡通渲染相关资料/演讲笔记/蓝色协议的方案.md b/03-UnrealEngine/卡通渲染相关资料/演讲笔记/蓝色协议的方案.md index 7aa90cf..803824c 100644 --- a/03-UnrealEngine/卡通渲染相关资料/演讲笔记/蓝色协议的方案.md +++ b/03-UnrealEngine/卡通渲染相关资料/演讲笔记/蓝色协议的方案.md @@ -107,12 +107,12 @@ TAA会导致勾线变糊,所以用了UE的Responsive AA,用stencil标记了 ## GBuffer ```c# -GBufferB:Metallic/Specular/Roughness=>ToonHairMask OffsetShadowMask/SpcularMask/SpecularValue -OutGBufferD = CustomData.xyzw=》ShaderColor.rgb/NoL -OutGBufferE = GBuffer.PrecomputedShadowFactors.xyzw=》 /RimLightMask/DiffuseOffset/RimLightWidth +GBufferB:Metallic/Specular/Roughness => ToonHairMask OffsetShadowMask/SpcularMask/SpecularValue +OutGBufferD = CustomData.xyzw => ShaderColor.rgb/NoL +OutGBufferE = GBuffer.PrecomputedShadowFactors.xyzw => /RimLightMask/DiffuseOffset/RimLightWidth OutGBufferF = velocity => OutlineWidth/OutlineID/OutlinePaint/OutlineZShift ``` -关闭角色的预计算阴影。 +PS.需要关闭角色的预计算阴影。 ## 管线 ![](https://pic2.zhimg.com/80/v2-e0df1cfdf229e2c50f6a70ca0ee99101_720w.jpg) \ No newline at end of file diff --git a/07-Other/Node.js/前后端API管理工具.md b/07-Other/Node.js/前后端API管理工具.md new file mode 100644 index 0000000..f498538 --- /dev/null +++ b/07-Other/Node.js/前后端API管理工具.md @@ -0,0 +1 @@ +1. Apifox \ No newline at end of file diff --git a/07-Other/Node.js/在Nodejs中使用Axios下载文件的方法.md b/07-Other/Node.js/在Nodejs中使用Axios下载文件的方法.md new file mode 100644 index 0000000..7214724 --- /dev/null +++ b/07-Other/Node.js/在Nodejs中使用Axios下载文件的方法.md @@ -0,0 +1,45 @@ +# 参考 +- https://blog.csdn.net/clearlxj/article/details/108264141 + +>查看源码发现axios返回的内容默认是Stream格式的; +如果没有设置responseType的话,返回内容将会从Stream转为Buffer再转为String; +如果responseType为stream的话不进行转换; +如果responseType为arraybuffer的话将Stream转为Buffer; +但是如果想把Buffer转为String再转回Buffer的话将会出问题(默认使用utf8进行Buffer的编码和解码),有的文件这样是可以的,但是Excel文件这样做的话两次Buffer的值是不同的;但是转为base64的话是都可以的。 + + +# 设置responseType为stream +```js +const result = await require('axios')({ url, method, data, responseType: 'stream' }); +result.data.pipe(require('fs').createWriteStream(saveFilePath)); +``` + + +提供buffer转Stream并保存为文件的方法 +```js +function bufferToStream(bufferData, saveFilePath) { + return new Promise((res, rej) => { + const bufferStream = new stream.PassThrough(); + bufferStream.end(bufferData); + const ws = fs.createWriteStream(saveFilePath); + bufferStream.pipe(ws).on('finish', () => { + res(saveFilePath); + }); + }); +} +``` + + +# 设置responseType为arraybuffer +```js +const result = await require('axios')({ url, method, data, responseType: 'arraybuffer' }); +const bufferData = result.data; +await bufferToStream(bufferData, saveFilePath); +``` + +# 不设置responseType,设置responseEncoding为base64 +```js +const result = await require('axios')({ url, method, data, responseEncoding: 'base64' }); +const bufferData = Buffer.from(result.data, 'base64'); +await bufferToStream(bufferData, saveFilePath); +``` \ No newline at end of file diff --git a/08-Assets/Images/ImageBag/Images/20231025141028.png b/08-Assets/Images/ImageBag/Images/20231025141028.png new file mode 100644 index 0000000..19808b8 Binary files /dev/null and b/08-Assets/Images/ImageBag/Images/20231025141028.png differ diff --git a/08-Assets/Images/ImageBag/Images/20231025141119.png b/08-Assets/Images/ImageBag/Images/20231025141119.png new file mode 100644 index 0000000..19808b8 Binary files /dev/null and b/08-Assets/Images/ImageBag/Images/20231025141119.png differ diff --git a/08-Assets/Images/ImageBag/Images/20231025144447.png b/08-Assets/Images/ImageBag/Images/20231025144447.png new file mode 100644 index 0000000..f576bdb Binary files /dev/null and b/08-Assets/Images/ImageBag/Images/20231025144447.png differ diff --git a/08-Assets/Images/ImageBag/RuntimeModifyAnimSequence.png b/08-Assets/Images/ImageBag/RuntimeModifyAnimSequence.png new file mode 100644 index 0000000..9f855a5 Binary files /dev/null and b/08-Assets/Images/ImageBag/RuntimeModifyAnimSequence.png differ diff --git a/document/UrealEngineNPR渲染实现/蓝色协议的方案.html.md b/document/UrealEngineNPR渲染实现/蓝色协议的方案.html.md deleted file mode 100644 index e69de29..0000000