From 13a3603578f87ee47eb69e26271dc67d2d7df209 Mon Sep 17 00:00:00 2001 From: Campbell Barton Date: Wed, 15 May 2024 23:39:31 +1000 Subject: [PATCH] Move addons from the addons repo to "addons_core" Ref: !121825 --- scripts/addons_core/bl_pkg/Makefile | 136 + scripts/addons_core/bl_pkg/__init__.py | 545 +++ .../addons_core/bl_pkg/bl_extension_cli.py | 829 ++++ .../addons_core/bl_pkg/bl_extension_local.py | 46 + .../addons_core/bl_pkg/bl_extension_notify.py | 378 ++ .../addons_core/bl_pkg/bl_extension_ops.py | 2337 ++++++++++ scripts/addons_core/bl_pkg/bl_extension_ui.py | 807 ++++ .../addons_core/bl_pkg/bl_extension_utils.py | 1305 ++++++ scripts/addons_core/bl_pkg/cli/blender_ext.py | 2923 ++++++++++++ .../bl_pkg/example_extension/AUTHORS | 5 + .../bl_pkg/example_extension/__init__.py | 8 + .../example_extension/blender_manifest.toml | 17 + scripts/addons_core/bl_pkg/readme.rst | 135 + .../tests/modules/http_server_context.py | 103 + .../tests/modules/python_wheel_generate.py | 153 + .../addons_core/bl_pkg/tests/test_blender.py | 199 + scripts/addons_core/bl_pkg/tests/test_cli.py | 420 ++ .../bl_pkg/tests/test_cli_blender.py | 515 +++ scripts/addons_core/bl_pkg/wheel_manager.py | 380 ++ scripts/addons_core/copy_global_transform.py | 1100 +++++ scripts/addons_core/hydra_storm/__init__.py | 33 + scripts/addons_core/hydra_storm/engine.py | 47 + scripts/addons_core/hydra_storm/properties.py | 62 + scripts/addons_core/hydra_storm/ui.py | 259 ++ scripts/addons_core/io_anim_bvh/__init__.py | 388 ++ scripts/addons_core/io_anim_bvh/export_bvh.py | 294 ++ scripts/addons_core/io_anim_bvh/import_bvh.py | 783 ++++ scripts/addons_core/io_curve_svg/__init__.py | 67 + .../addons_core/io_curve_svg/import_svg.py | 1904 ++++++++ .../addons_core/io_curve_svg/svg_colors.py | 153 + scripts/addons_core/io_curve_svg/svg_util.py | 107 + .../addons_core/io_curve_svg/svg_util_test.py | 166 + .../addons_core/io_mesh_uv_layout/__init__.py | 300 ++ .../io_mesh_uv_layout/export_uv_eps.py | 82 + .../io_mesh_uv_layout/export_uv_png.py | 119 + .../io_mesh_uv_layout/export_uv_svg.py | 54 + scripts/addons_core/io_scene_fbx/__init__.py | 733 +++ .../addons_core/io_scene_fbx/data_types.py | 62 + .../addons_core/io_scene_fbx/encode_bin.py | 434 ++ .../io_scene_fbx/export_fbx_bin.py | 3742 +++++++++++++++ scripts/addons_core/io_scene_fbx/fbx2json.py | 341 ++ scripts/addons_core/io_scene_fbx/fbx_utils.py | 1931 ++++++++ .../io_scene_fbx/fbx_utils_threading.py | 194 + .../addons_core/io_scene_fbx/import_fbx.py | 4024 +++++++++++++++++ scripts/addons_core/io_scene_fbx/json2fbx.py | 165 + scripts/addons_core/io_scene_fbx/parse_fbx.py | 274 ++ .../addons_core/io_scene_gltf2/__init__.py | 1972 ++++++++ .../io_scene_gltf2/blender/__init__.py | 3 + .../blender/com/gltf2_blender_conversion.py | 215 + .../blender/com/gltf2_blender_data_path.py | 80 + .../blender/com/gltf2_blender_default.py | 15 + .../blender/com/gltf2_blender_extras.py | 87 + .../blender/com/gltf2_blender_json.py | 27 + .../com/gltf2_blender_material_helpers.py | 32 + .../blender/com/gltf2_blender_math.py | 209 + .../blender/com/gltf2_blender_ui.py | 734 +++ .../blender/com/gltf2_blender_utils.py | 70 + .../gltf2_blender_gather_fcurves_animation.py | 55 + ...2_blender_gather_fcurves_channel_target.py | 53 + .../gltf2_blender_gather_fcurves_channels.py | 377 ++ .../gltf2_blender_gather_fcurves_keyframes.py | 208 + .../gltf2_blender_gather_fcurves_sampler.py | 231 + .../animation/gltf2_blender_gather_action.py | 740 +++ .../gltf2_blender_gather_animation_utils.py | 291 ++ .../gltf2_blender_gather_animations.py | 22 + .../animation/gltf2_blender_gather_drivers.py | 79 + .../gltf2_blender_gather_keyframes.py | 127 + .../gltf2_blender_gather_scene_animation.py | 231 + .../animation/gltf2_blender_gather_tracks.py | 718 +++ .../armature/armature_action_sampled.py | 88 + .../armature/armature_channel_target.py | 54 + .../sampled/armature/armature_channels.py | 216 + .../sampled/armature/armature_keyframes.py | 93 + .../sampled/armature/armature_sampler.py | 231 + ...ltf2_blender_gather_data_channel_target.py | 48 + .../gltf2_blender_gather_data_channels.py | 113 + .../gltf2_blender_gather_data_keyframes.py | 142 + .../data/gltf2_blender_gather_data_sampler.py | 136 + ...blender_gather_animation_sampling_cache.py | 662 +++ ...f2_blender_gather_object_action_sampled.py | 78 + ...f2_blender_gather_object_channel_target.py | 51 + .../gltf2_blender_gather_object_channels.py | 125 + .../gltf2_blender_gather_object_keyframes.py | 86 + .../gltf2_blender_gather_object_sampler.py | 171 + .../gltf2_blender_gather_sk_action_sampled.py | 62 + .../gltf2_blender_gather_sk_channel_target.py | 38 + .../gltf2_blender_gather_sk_channels.py | 75 + .../gltf2_blender_gather_sk_keyframes.py | 109 + .../gltf2_blender_gather_sk_sampler.py | 112 + .../blender/exp/gltf2_blender_export.py | 402 ++ .../blender/exp/gltf2_blender_gather.py | 126 + .../exp/gltf2_blender_gather_accessors.py | 189 + .../blender/exp/gltf2_blender_gather_cache.py | 153 + .../exp/gltf2_blender_gather_cameras.py | 152 + .../exp/gltf2_blender_gather_joints.py | 110 + .../exp/gltf2_blender_gather_light_spots.py | 47 + .../exp/gltf2_blender_gather_lights.py | 196 + .../blender/exp/gltf2_blender_gather_mesh.py | 160 + .../blender/exp/gltf2_blender_gather_nodes.py | 530 +++ ...tf2_blender_gather_primitive_attributes.py | 222 + .../exp/gltf2_blender_gather_primitives.py | 351 ++ ...gltf2_blender_gather_primitives_extract.py | 1435 ++++++ .../exp/gltf2_blender_gather_sampler.py | 187 + .../blender/exp/gltf2_blender_gather_skins.py | 148 + .../blender/exp/gltf2_blender_gather_tree.py | 904 ++++ .../blender/exp/gltf2_blender_get.py | 33 + .../exp/gltf2_blender_gltf2_exporter.py | 554 +++ ...tf2_blender_gather_materials_anisotropy.py | 260 ++ ...ltf2_blender_gather_materials_clearcoat.py | 156 + ...gltf2_blender_gather_materials_emission.py | 115 + .../gltf2_blender_gather_materials_ior.py | 45 + .../gltf2_blender_gather_materials_sheen.py | 129 + ...gltf2_blender_gather_materials_specular.py | 140 + ...2_blender_gather_materials_transmission.py | 76 + ...gltf2_blender_gather_materials_variants.py | 19 + .../gltf2_blender_gather_materials_volume.py | 118 + .../extensions/gltf2_blender_image.py | 373 ++ .../material/gltf2_blender_gather_image.py | 439 ++ .../gltf2_blender_gather_materials.py | 717 +++ ...gather_materials_pbr_metallic_roughness.py | 260 ++ .../gltf2_blender_gather_materials_unlit.py | 180 + .../material/gltf2_blender_gather_texture.py | 244 + .../gltf2_blender_gather_texture_info.py | 287 ++ .../gltf2_blender_search_node_tree.py | 845 ++++ .../gltf2_blender_KHR_materials_anisotropy.py | 116 + ...der_KHR_materials_pbrSpecularGlossiness.py | 203 + .../imp/gltf2_blender_KHR_materials_unlit.py | 56 + .../blender/imp/gltf2_blender_animation.py | 161 + .../imp/gltf2_blender_animation_node.py | 189 + .../imp/gltf2_blender_animation_pointer.py | 714 +++ .../imp/gltf2_blender_animation_utils.py | 69 + .../imp/gltf2_blender_animation_weight.py | 95 + .../blender/imp/gltf2_blender_camera.py | 75 + .../blender/imp/gltf2_blender_gltf.py | 624 +++ .../blender/imp/gltf2_blender_image.py | 102 + .../blender/imp/gltf2_blender_light.py | 143 + .../blender/imp/gltf2_blender_material.py | 99 + .../imp/gltf2_blender_material_utils.py | 196 + .../blender/imp/gltf2_blender_mesh.py | 863 ++++ .../blender/imp/gltf2_blender_node.py | 326 ++ .../imp/gltf2_blender_pbrMetallicRoughness.py | 849 ++++ .../blender/imp/gltf2_blender_scene.py | 121 + .../blender/imp/gltf2_blender_texture.py | 233 + .../blender/imp/gltf2_blender_vnode.py | 584 +++ .../gltf2_io_draco_compression_extension.py | 138 + .../addons_core/io_scene_gltf2/io/__init__.py | 5 + .../io_scene_gltf2/io/com/gltf2_io.py | 1215 +++++ .../io/com/gltf2_io_constants.py | 159 + .../io_scene_gltf2/io/com/gltf2_io_debug.py | 119 + .../gltf2_io_draco_compression_extension.py | 58 + .../io/com/gltf2_io_extensions.py | 30 + .../io/com/gltf2_io_lights_punctual.py | 68 + .../io_scene_gltf2/io/com/gltf2_io_path.py | 19 + .../io/com/gltf2_io_variants.py | 32 + .../io/exp/gltf2_io_binary_data.py | 35 + .../io_scene_gltf2/io/exp/gltf2_io_buffer.py | 54 + .../gltf2_io_draco_compression_extension.py | 173 + .../io_scene_gltf2/io/exp/gltf2_io_export.py | 122 + .../io/exp/gltf2_io_image_data.py | 48 + .../io/exp/gltf2_io_user_extensions.py | 17 + .../io_scene_gltf2/io/imp/__init__.py | 5 + .../io_scene_gltf2/io/imp/gltf2_io_binary.py | 215 + .../io_scene_gltf2/io/imp/gltf2_io_gltf.py | 198 + .../io/imp/gltf2_io_user_extensions.py | 13 + scripts/addons_core/pose_library/__init__.py | 55 + .../addons_core/pose_library/asset_browser.py | 99 + .../addons_core/pose_library/conversion.py | 62 + scripts/addons_core/pose_library/functions.py | 57 + scripts/addons_core/pose_library/gui.py | 254 ++ scripts/addons_core/pose_library/keymaps.py | 38 + scripts/addons_core/pose_library/operators.py | 460 ++ .../addons_core/pose_library/pose_creation.py | 411 ++ .../addons_core/pose_library/pose_usage.py | 51 + scripts/addons_core/ui_translate/__init__.py | 61 + .../ui_translate/edit_translation.py | 408 ++ scripts/addons_core/ui_translate/settings.py | 194 + .../addons_core/ui_translate/update_addon.py | 377 ++ .../addons_core/ui_translate/update_repo.py | 256 ++ scripts/addons_core/ui_translate/update_ui.py | 272 ++ .../viewport_vr_preview/__init__.py | 52 + .../viewport_vr_preview/action_map.py | 165 + .../viewport_vr_preview/action_map_io.py | 346 ++ .../viewport_vr_preview/configs/default.py | 424 ++ .../viewport_vr_preview/defaults.py | 1501 ++++++ .../addons_core/viewport_vr_preview/gui.py | 284 ++ .../viewport_vr_preview/operators.py | 540 +++ .../viewport_vr_preview/properties.py | 228 + .../viewport_vr_preview/versioning.py | 38 + 188 files changed, 63801 insertions(+) create mode 100644 scripts/addons_core/bl_pkg/Makefile create mode 100644 scripts/addons_core/bl_pkg/__init__.py create mode 100644 scripts/addons_core/bl_pkg/bl_extension_cli.py create mode 100644 scripts/addons_core/bl_pkg/bl_extension_local.py create mode 100644 scripts/addons_core/bl_pkg/bl_extension_notify.py create mode 100644 scripts/addons_core/bl_pkg/bl_extension_ops.py create mode 100644 scripts/addons_core/bl_pkg/bl_extension_ui.py create mode 100644 scripts/addons_core/bl_pkg/bl_extension_utils.py create mode 100755 scripts/addons_core/bl_pkg/cli/blender_ext.py create mode 100644 scripts/addons_core/bl_pkg/example_extension/AUTHORS create mode 100644 scripts/addons_core/bl_pkg/example_extension/__init__.py create mode 100644 scripts/addons_core/bl_pkg/example_extension/blender_manifest.toml create mode 100644 scripts/addons_core/bl_pkg/readme.rst create mode 100644 scripts/addons_core/bl_pkg/tests/modules/http_server_context.py create mode 100644 scripts/addons_core/bl_pkg/tests/modules/python_wheel_generate.py create mode 100644 scripts/addons_core/bl_pkg/tests/test_blender.py create mode 100644 scripts/addons_core/bl_pkg/tests/test_cli.py create mode 100644 scripts/addons_core/bl_pkg/tests/test_cli_blender.py create mode 100644 scripts/addons_core/bl_pkg/wheel_manager.py create mode 100644 scripts/addons_core/copy_global_transform.py create mode 100644 scripts/addons_core/hydra_storm/__init__.py create mode 100644 scripts/addons_core/hydra_storm/engine.py create mode 100644 scripts/addons_core/hydra_storm/properties.py create mode 100644 scripts/addons_core/hydra_storm/ui.py create mode 100644 scripts/addons_core/io_anim_bvh/__init__.py create mode 100644 scripts/addons_core/io_anim_bvh/export_bvh.py create mode 100644 scripts/addons_core/io_anim_bvh/import_bvh.py create mode 100644 scripts/addons_core/io_curve_svg/__init__.py create mode 100644 scripts/addons_core/io_curve_svg/import_svg.py create mode 100644 scripts/addons_core/io_curve_svg/svg_colors.py create mode 100644 scripts/addons_core/io_curve_svg/svg_util.py create mode 100755 scripts/addons_core/io_curve_svg/svg_util_test.py create mode 100644 scripts/addons_core/io_mesh_uv_layout/__init__.py create mode 100644 scripts/addons_core/io_mesh_uv_layout/export_uv_eps.py create mode 100644 scripts/addons_core/io_mesh_uv_layout/export_uv_png.py create mode 100644 scripts/addons_core/io_mesh_uv_layout/export_uv_svg.py create mode 100644 scripts/addons_core/io_scene_fbx/__init__.py create mode 100644 scripts/addons_core/io_scene_fbx/data_types.py create mode 100644 scripts/addons_core/io_scene_fbx/encode_bin.py create mode 100644 scripts/addons_core/io_scene_fbx/export_fbx_bin.py create mode 100755 scripts/addons_core/io_scene_fbx/fbx2json.py create mode 100644 scripts/addons_core/io_scene_fbx/fbx_utils.py create mode 100644 scripts/addons_core/io_scene_fbx/fbx_utils_threading.py create mode 100644 scripts/addons_core/io_scene_fbx/import_fbx.py create mode 100755 scripts/addons_core/io_scene_fbx/json2fbx.py create mode 100644 scripts/addons_core/io_scene_fbx/parse_fbx.py create mode 100755 scripts/addons_core/io_scene_gltf2/__init__.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/__init__.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_conversion.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_data_path.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_default.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_extras.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_json.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_material_helpers.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_math.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_ui.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_utils.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_animation.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_channel_target.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_channels.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_keyframes.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_sampler.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_action.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animation_utils.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animations.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_drivers.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_keyframes.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_scene_animation.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_tracks.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_action_sampled.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_channel_target.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_channels.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_keyframes.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_sampler.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_channel_target.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_channels.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_keyframes.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_sampler.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/gltf2_blender_gather_animation_sampling_cache.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_action_sampled.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_channel_target.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_channels.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_keyframes.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_sampler.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_action_sampled.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_channel_target.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_channels.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_keyframes.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_sampler.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_export.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_accessors.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_cache.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_cameras.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_joints.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_light_spots.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_lights.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_mesh.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_nodes.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitive_attributes.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitives.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitives_extract.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_sampler.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_skins.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_tree.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_get.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gltf2_exporter.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_anisotropy.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_clearcoat.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_emission.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_ior.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_sheen.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_specular.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_transmission.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_variants.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_volume.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_image.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_image.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_materials.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_materials_pbr_metallic_roughness.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_materials_unlit.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_texture.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_texture_info.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_search_node_tree.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_KHR_materials_anisotropy.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_KHR_materials_pbrSpecularGlossiness.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_KHR_materials_unlit.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation_node.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation_pointer.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation_utils.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation_weight.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_camera.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_gltf.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_image.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_light.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_material.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_material_utils.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_mesh.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_node.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_pbrMetallicRoughness.py create mode 100755 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_scene.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_texture.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_vnode.py create mode 100644 scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_io_draco_compression_extension.py create mode 100755 scripts/addons_core/io_scene_gltf2/io/__init__.py create mode 100755 scripts/addons_core/io_scene_gltf2/io/com/gltf2_io.py create mode 100755 scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_constants.py create mode 100755 scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_debug.py create mode 100644 scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_draco_compression_extension.py create mode 100644 scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_extensions.py create mode 100644 scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_lights_punctual.py create mode 100644 scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_path.py create mode 100644 scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_variants.py create mode 100755 scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_binary_data.py create mode 100755 scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_buffer.py create mode 100644 scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_draco_compression_extension.py create mode 100755 scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_export.py create mode 100755 scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_image_data.py create mode 100644 scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_user_extensions.py create mode 100755 scripts/addons_core/io_scene_gltf2/io/imp/__init__.py create mode 100755 scripts/addons_core/io_scene_gltf2/io/imp/gltf2_io_binary.py create mode 100755 scripts/addons_core/io_scene_gltf2/io/imp/gltf2_io_gltf.py create mode 100644 scripts/addons_core/io_scene_gltf2/io/imp/gltf2_io_user_extensions.py create mode 100644 scripts/addons_core/pose_library/__init__.py create mode 100644 scripts/addons_core/pose_library/asset_browser.py create mode 100644 scripts/addons_core/pose_library/conversion.py create mode 100644 scripts/addons_core/pose_library/functions.py create mode 100644 scripts/addons_core/pose_library/gui.py create mode 100644 scripts/addons_core/pose_library/keymaps.py create mode 100644 scripts/addons_core/pose_library/operators.py create mode 100644 scripts/addons_core/pose_library/pose_creation.py create mode 100644 scripts/addons_core/pose_library/pose_usage.py create mode 100644 scripts/addons_core/ui_translate/__init__.py create mode 100644 scripts/addons_core/ui_translate/edit_translation.py create mode 100644 scripts/addons_core/ui_translate/settings.py create mode 100644 scripts/addons_core/ui_translate/update_addon.py create mode 100644 scripts/addons_core/ui_translate/update_repo.py create mode 100644 scripts/addons_core/ui_translate/update_ui.py create mode 100644 scripts/addons_core/viewport_vr_preview/__init__.py create mode 100644 scripts/addons_core/viewport_vr_preview/action_map.py create mode 100644 scripts/addons_core/viewport_vr_preview/action_map_io.py create mode 100644 scripts/addons_core/viewport_vr_preview/configs/default.py create mode 100644 scripts/addons_core/viewport_vr_preview/defaults.py create mode 100644 scripts/addons_core/viewport_vr_preview/gui.py create mode 100644 scripts/addons_core/viewport_vr_preview/operators.py create mode 100644 scripts/addons_core/viewport_vr_preview/properties.py create mode 100644 scripts/addons_core/viewport_vr_preview/versioning.py diff --git a/scripts/addons_core/bl_pkg/Makefile b/scripts/addons_core/bl_pkg/Makefile new file mode 100644 index 00000000000..0551dd44f9c --- /dev/null +++ b/scripts/addons_core/bl_pkg/Makefile @@ -0,0 +1,136 @@ +# SPDX-FileCopyrightText: 2011-2023 Blender Authors +# +# SPDX-License-Identifier: GPL-2.0-or-later + +# note: this isn't needed for building, +# its just for some convenience targets. + +# Needed for when tests are run from another directory: `make -C ./path/to/this/directory` +BASE_DIR := ${CURDIR} + +PY_FILES=$(shell find ./ -type f -name '*.py') +# Filter out files which use `bpy`. +PY_FILES_MYPY=$(filter-out \ + ./__init__.py \ + ./bl_extension_cli.py \ + ./bl_extension_local.py \ + ./bl_extension_monkeypatch.py \ + ./bl_extension_notify.py \ + ./bl_extension_ops.py \ + ./bl_extension_ui.py \ + ./bl_extension_utils.py \ + ./wheel_manager.py \ + ./example_extension/__init__.py \ + ./example_extension/foo.py \ + ,$(PY_FILES)) + +PY_FILES_MYPY_STANDALONE= \ + ./bl_extension_utils.py \ + ./bl_extension_cli.py \ + ./wheel_manager.py + +EXTRA_WATCH_FILES=Makefile + +# For tests that launch Blender directly. +BLENDER_BIN?=$(shell which blender) +PYTHON_BIN?=$(shell which python3) + +pep8: FORCE + @flake8 $(PY_FILES) --ignore=E501,E302,E123,E126,E128,E129,E124,E122,W504 + +# `--no-namespace-packages` is needed otherwise `./cli/blender_ext.py` loads in parent modules +# (the Blender add-on which imports `bpy`). +check_mypy: FORCE + @mypy --no-namespace-packages --strict $(PY_FILES_MYPY) + @mypy --strict --follow-imports=skip $(PY_FILES_MYPY_STANDALONE) + +check_ruff: FORCE + @env --chdir="$(BASE_DIR)" ruff check $(PY_FILES_MYPY) + @env --chdir="$(BASE_DIR)" ruff check $(PY_FILES_MYPY_STANDALONE) + +check_pylint: + @env --chdir="$(BASE_DIR)" \ + pylint $(PY_FILES) \ + --disable=C0111,C0301,C0302,C0103,C0415,R1705,R0902,R0903,R0913,E0611,E0401,I1101,R0801,C0209,W0511,W0718,W0719,C0413,R0911,R0912,R0914,R0915 + +# python3 ./tests/test_cli.py +test: FORCE + @env --chdir="$(BASE_DIR)" \ + USE_HTTP=0 \ + $(PYTHON_BIN) ./tests/test_cli.py + @env --chdir="$(BASE_DIR)" \ + USE_HTTP=1 \ + $(PYTHON_BIN) ./tests/test_cli.py + +# NOTE: these rely on the blender binary. +test_blender: FORCE + @env --chdir="$(BASE_DIR)" \ + ASAN_OPTIONS=check_initialization_order=0:leak_check_at_exit=0 \ + $(BLENDER_BIN) --background --factory-startup -noaudio --python ./tests/test_blender.py -- --verbose + +watch_test_blender: FORCE + @cd "$(BASE_DIR)" && \ + while true; do \ + $(MAKE) test_blender; \ + inotifywait -q -e close_write $(EXTRA_WATCH_FILES) $(PY_FILES) ; \ + tput clear; \ + done + +test_cli_blender: FORCE + @env BLENDER_BIN=$(BLENDER_BIN) \ + $(PYTHON_BIN) ./tests/test_cli_blender.py + +watch_test_cli_blender: FORCE + @while true; do \ + env BLENDER_BIN=$(BLENDER_BIN) \ + $(MAKE) test_cli_blender; \ + inotifywait -q -e close_write $(EXTRA_WATCH_FILES) $(PY_FILES) ; \ + tput clear; \ + done + + +# https://www.cyberciti.biz/faq/howto-create-linux-ram-disk-filesystem/ +# mkfs -q /dev/ram1 8192 +# mkdir -p /ramcache +# sudo mount /dev/ram1 /ramcache +# sudo chmod 777 /ramcache +# mkdir /ramcache/tmp + +watch_test: FORCE + @cd "$(BASE_DIR)" && \ + while true; do \ + $(MAKE) test; \ + inotifywait -q -e close_write $(EXTRA_WATCH_FILES) $(PY_FILES) ; \ + tput clear; \ + done + +watch_check_mypy: + @cd "$(BASE_DIR)" && \ + while true; do \ + $(MAKE) check_mypy; \ + inotifywait -q -e close_write $(EXTRA_WATCH_FILES) \ + $(PY_FILES_MYPY) \ + ./bl_extension_utils.py ; \ + tput clear; \ + done + +watch_check_ruff: + @cd "$(BASE_DIR)" && \ + while true; do \ + $(MAKE) check_ruff; \ + inotifywait -q -e close_write $(EXTRA_WATCH_FILES) \ + $(PY_FILES_MYPY) \ + ./bl_extension_utils.py ; \ + tput clear; \ + done + +watch_check_pylint: + @cd "$(BASE_DIR)" && \ + while true; do \ + $(MAKE) check_pylint; \ + inotifywait -q -e close_write $(EXTRA_WATCH_FILES) $(PY_FILES) ; \ + tput clear; \ + done + + +FORCE: diff --git a/scripts/addons_core/bl_pkg/__init__.py b/scripts/addons_core/bl_pkg/__init__.py new file mode 100644 index 00000000000..926784f00b3 --- /dev/null +++ b/scripts/addons_core/bl_pkg/__init__.py @@ -0,0 +1,545 @@ +# SPDX-FileCopyrightText: 2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +bl_info = { + "name": "Blender Extensions", + "author": "Campbell Barton", + "version": (0, 0, 1), + "blender": (4, 0, 0), + "location": "Edit -> Preferences -> Extensions", + "description": "Extension repository support for remote repositories", + "warning": "", + # "doc_url": "{BLENDER_MANUAL_URL}/addons/bl_pkg/bl_pkg.html", + "support": 'OFFICIAL', + "category": "System", +} + +if "bpy" in locals(): + import importlib + from . import ( + bl_extension_ops, + bl_extension_ui, + bl_extension_utils, + ) + importlib.reload(bl_extension_ops) + importlib.reload(bl_extension_ui) + importlib.reload(bl_extension_utils) + del ( + bl_extension_ops, + bl_extension_ui, + bl_extension_utils, + ) + del importlib + +import bpy + +from bpy.props import ( + BoolProperty, + EnumProperty, + IntProperty, + StringProperty, +) + +from bpy.types import ( + AddonPreferences, +) + + +class BlExtPreferences(AddonPreferences): + bl_idname = __name__ + timeout: IntProperty( + name="Time Out", + default=10, + ) + show_development_reports: BoolProperty( + name="Show Development Reports", + description=( + "Show the result of running commands in the main interface " + "this has the advantage that multiple processes that run at once have their errors properly grouped " + "which is not the case for reports which are mixed together" + ), + default=False, + ) + + +class StatusInfoUI: + __slots__ = ( + # The the title of the status/notification. + "title", + # The result of an operation. + "log", + # Set to true when running (via a modal operator). + "running", + ) + + def __init__(self): + self.log = [] + self.title = "" + self.running = False + + def from_message(self, title, text): + log_new = [] + for line in text.split("\n"): + if not (line := line.rstrip()): + continue + # Don't show any prefix for "Info" since this is implied. + log_new.append(('STATUS', line.removeprefix("Info: "))) + if not log_new: + return + + self.title = title + self.running = False + self.log = log_new + + +def cookie_from_session(): + # This path is a unique string for this session. + # Don't use a constant as it may be changed at run-time. + return bpy.app.tempdir + + +# ----------------------------------------------------------------------------- +# Shared Low Level Utilities + +def repo_paths_or_none(repo_item): + if (directory := repo_item.directory) == "": + return None, None + if repo_item.use_remote_url: + if not (remote_url := repo_item.remote_url): + return None, None + else: + remote_url = "" + return directory, remote_url + + +def repo_active_or_none(): + prefs = bpy.context.preferences + if not prefs.experimental.use_extension_repos: + return + + extensions = prefs.extensions + active_extension_index = extensions.active_repo + try: + active_repo = None if active_extension_index < 0 else extensions.repos[active_extension_index] + except IndexError: + active_repo = None + return active_repo + + +def print_debug(*args, **kw): + if not bpy.app.debug: + return + print(*args, **kw) + + +use_repos_to_notify = False + + +def repos_to_notify(): + repos_notify = [] + if not bpy.app.background: + # To use notifications on startup requires: + # - The splash displayed. + # - The status bar displayed. + # + # Since it's not all that common to disable the status bar just run notifications + # if any repositories are marked to run notifications. + + prefs = bpy.context.preferences + if prefs.experimental.use_extension_repos: + extension_repos = prefs.extensions.repos + for repo_item in extension_repos: + if not repo_item.enabled: + continue + if not repo_item.use_sync_on_startup: + continue + if not repo_item.use_remote_url: + continue + # Invalid, if there is no remote path this can't update. + if not repo_item.remote_url: + continue + repos_notify.append(repo_item) + return repos_notify + + +# ----------------------------------------------------------------------------- +# Handlers + +@bpy.app.handlers.persistent +def extenion_repos_sync(*_): + # This is called from operators (create or an explicit call to sync) + # so calling a modal operator is "safe". + if (active_repo := repo_active_or_none()) is None: + return + + print_debug("SYNC:", active_repo.name) + # There may be nothing to upgrade. + + from contextlib import redirect_stdout + import io + stdout = io.StringIO() + + with redirect_stdout(stdout): + bpy.ops.bl_pkg.repo_sync_all('INVOKE_DEFAULT', use_active_only=True) + + if text := stdout.getvalue(): + repo_status_text.from_message("Sync \"{:s}\"".format(active_repo.name), text) + + +@bpy.app.handlers.persistent +def extenion_repos_upgrade(*_): + # This is called from operators (create or an explicit call to sync) + # so calling a modal operator is "safe". + if (active_repo := repo_active_or_none()) is None: + return + + print_debug("UPGRADE:", active_repo.name) + + from contextlib import redirect_stdout + import io + stdout = io.StringIO() + + with redirect_stdout(stdout): + bpy.ops.bl_pkg.pkg_upgrade_all('INVOKE_DEFAULT', use_active_only=True) + + if text := stdout.getvalue(): + repo_status_text.from_message("Upgrade \"{:s}\"".format(active_repo.name), text) + + +@bpy.app.handlers.persistent +def extenion_repos_files_clear(directory, _): + # Perform a "safe" file deletion by only removing files known to be either + # packages or known extension meta-data. + # + # Safer because removing a repository which points to an arbitrary path + # has the potential to wipe user data #119481. + import shutil + import os + from .bl_extension_utils import scandir_with_demoted_errors + # Unlikely but possible a new repository is immediately removed before initializing, + # avoid errors in this case. + if not os.path.isdir(directory): + return + + if os.path.isdir(path := os.path.join(directory, ".blender_ext")): + try: + shutil.rmtree(path) + except BaseException as ex: + print("Failed to remove files", ex) + + for entry in scandir_with_demoted_errors(directory): + if not entry.is_dir(): + continue + path = entry.path + if not os.path.exists(os.path.join(path, "blender_manifest.toml")): + continue + try: + shutil.rmtree(path) + except BaseException as ex: + print("Failed to remove files", ex) + + +# ----------------------------------------------------------------------------- +# Wrap Handlers + +_monkeypatch_extenions_repos_update_dirs = set() + + +def monkeypatch_extenions_repos_update_pre_impl(): + _monkeypatch_extenions_repos_update_dirs.clear() + + extension_repos = bpy.context.preferences.extensions.repos + for repo_item in extension_repos: + if not repo_item.enabled: + continue + directory, _repo_path = repo_paths_or_none(repo_item) + if directory is None: + continue + + _monkeypatch_extenions_repos_update_dirs.add(directory) + + +def monkeypatch_extenions_repos_update_post_impl(): + import os + from . import bl_extension_ops + + bl_extension_ops.repo_cache_store_refresh_from_prefs() + + # Refresh newly added directories. + extension_repos = bpy.context.preferences.extensions.repos + for repo_item in extension_repos: + if not repo_item.enabled: + continue + directory, _repo_path = repo_paths_or_none(repo_item) + if directory is None: + continue + # Happens for newly added extension directories. + if not os.path.exists(directory): + continue + if directory in _monkeypatch_extenions_repos_update_dirs: + continue + # Ignore missing because the new repo might not have a JSON file. + repo_cache_store.refresh_remote_from_directory(directory=directory, error_fn=print, force=True) + repo_cache_store.refresh_local_from_directory(directory=directory, error_fn=print, ignore_missing=True) + + _monkeypatch_extenions_repos_update_dirs.clear() + + +@bpy.app.handlers.persistent +def monkeypatch_extensions_repos_update_pre(*_): + print_debug("PRE:") + try: + monkeypatch_extenions_repos_update_pre_impl() + except BaseException as ex: + print_debug("ERROR", str(ex)) + try: + monkeypatch_extensions_repos_update_pre._fn_orig() + except BaseException as ex: + print_debug("ERROR", str(ex)) + + +@bpy.app.handlers.persistent +def monkeypatch_extenions_repos_update_post(*_): + print_debug("POST:") + try: + monkeypatch_extenions_repos_update_post._fn_orig() + except BaseException as ex: + print_debug("ERROR", str(ex)) + try: + monkeypatch_extenions_repos_update_post_impl() + except BaseException as ex: + print_debug("ERROR", str(ex)) + + +def monkeypatch_install(): + import addon_utils + + handlers = bpy.app.handlers._extension_repos_update_pre + fn_orig = addon_utils._initialize_extension_repos_pre + fn_override = monkeypatch_extensions_repos_update_pre + for i, fn in enumerate(handlers): + if fn is fn_orig: + handlers[i] = fn_override + fn_override._fn_orig = fn_orig + break + + handlers = bpy.app.handlers._extension_repos_update_post + fn_orig = addon_utils._initialize_extension_repos_post + fn_override = monkeypatch_extenions_repos_update_post + for i, fn in enumerate(handlers): + if fn is fn_orig: + handlers[i] = fn_override + fn_override._fn_orig = fn_orig + break + + +def monkeypatch_uninstall(): + handlers = bpy.app.handlers._extension_repos_update_pre + fn_override = monkeypatch_extensions_repos_update_pre + for i in range(len(handlers)): + fn = handlers[i] + if fn is fn_override: + handlers[i] = fn_override._fn_orig + del fn_override._fn_orig + break + + handlers = bpy.app.handlers._extension_repos_update_post + fn_override = monkeypatch_extenions_repos_update_post + for i in range(len(handlers)): + fn = handlers[i] + if fn is fn_override: + handlers[i] = fn_override._fn_orig + del fn_override._fn_orig + break + + +# Text to display in the UI (while running...). +repo_status_text = StatusInfoUI() + +# Singleton to cache all repositories JSON data and handles refreshing. +repo_cache_store = None + + +# ----------------------------------------------------------------------------- +# Theme Integration + +def theme_preset_draw(menu, context): + from .bl_extension_utils import ( + pkg_theme_file_list, + ) + layout = menu.layout + repos_all = [ + repo_item for repo_item in context.preferences.extensions.repos + if repo_item.enabled + ] + if not repos_all: + return + import os + menu_idname = type(menu).__name__ + for i, pkg_manifest_local in enumerate(repo_cache_store.pkg_manifest_from_local_ensure(error_fn=print)): + if pkg_manifest_local is None: + continue + repo_item = repos_all[i] + directory = repo_item.directory + for pkg_idname, value in pkg_manifest_local.items(): + if value["type"] != "theme": + continue + + theme_dir, theme_files = pkg_theme_file_list(directory, pkg_idname) + for filename in theme_files: + props = layout.operator(menu.preset_operator, text=bpy.path.display_name(filename)) + props.filepath = os.path.join(theme_dir, filename) + props.menu_idname = menu_idname + + +def cli_extension(argv): + from . import bl_extension_cli + return bl_extension_cli.cli_extension_handler(argv) + + +# ----------------------------------------------------------------------------- +# Registration + +classes = ( + BlExtPreferences, +) + +cli_commands = [] + + +def register(): + # pylint: disable-next=global-statement + global repo_cache_store + + from bpy.types import WindowManager + from . import ( + bl_extension_ops, + bl_extension_ui, + bl_extension_utils, + ) + + if repo_cache_store is None: + repo_cache_store = bl_extension_utils.RepoCacheStore() + else: + repo_cache_store.clear() + bl_extension_ops.repo_cache_store_refresh_from_prefs() + + for cls in classes: + bpy.utils.register_class(cls) + + bl_extension_ops.register() + bl_extension_ui.register() + + WindowManager.extension_search = StringProperty( + name="Filter", + description="Filter by extension name, author & category", + options={'TEXTEDIT_UPDATE'}, + ) + WindowManager.extension_type = EnumProperty( + items=( + ('ALL', "All", "Show all extensions"), + None, + ('ADDON', "Add-ons", "Only show add-ons"), + ('THEME', "Themes", "Only show themes"), + ), + name="Filter by Type", + description="Show extensions by type", + default='ALL', + ) + WindowManager.extension_enabled_only = BoolProperty( + name="Show Enabled Extensions", + description="Only show enabled extensions", + ) + WindowManager.extension_updates_only = BoolProperty( + name="Show Updates Available", + description="Only show extensions with updates available", + ) + WindowManager.extension_installed_only = BoolProperty( + name="Show Installed Extensions", + description="Only show installed extensions", + ) + WindowManager.extension_show_legacy_addons = BoolProperty( + name="Show Legacy Add-Ons", + description="Only show extensions, hiding legacy add-ons", + default=True, + ) + + from bl_ui.space_userpref import USERPREF_MT_interface_theme_presets + USERPREF_MT_interface_theme_presets.append(theme_preset_draw) + + handlers = bpy.app.handlers._extension_repos_sync + handlers.append(extenion_repos_sync) + + handlers = bpy.app.handlers._extension_repos_upgrade + handlers.append(extenion_repos_upgrade) + + handlers = bpy.app.handlers._extension_repos_files_clear + handlers.append(extenion_repos_files_clear) + + cli_commands.append(bpy.utils.register_cli_command("extension", cli_extension)) + + global use_repos_to_notify + if (repos_notify := repos_to_notify()): + use_repos_to_notify = True + from . import bl_extension_notify + bl_extension_notify.register(repos_notify) + del repos_notify + + monkeypatch_install() + + +def unregister(): + # pylint: disable-next=global-statement + global repo_cache_store + + from bpy.types import WindowManager + from . import ( + bl_extension_ops, + bl_extension_ui, + ) + + bl_extension_ops.unregister() + bl_extension_ui.unregister() + + del WindowManager.extension_search + del WindowManager.extension_type + del WindowManager.extension_enabled_only + del WindowManager.extension_installed_only + del WindowManager.extension_show_legacy_addons + + for cls in classes: + bpy.utils.unregister_class(cls) + + if repo_cache_store is None: + pass + else: + repo_cache_store.clear() + repo_cache_store = None + + from bl_ui.space_userpref import USERPREF_MT_interface_theme_presets + USERPREF_MT_interface_theme_presets.remove(theme_preset_draw) + + handlers = bpy.app.handlers._extension_repos_sync + if extenion_repos_sync in handlers: + handlers.remove(extenion_repos_sync) + + handlers = bpy.app.handlers._extension_repos_upgrade + if extenion_repos_upgrade in handlers: + handlers.remove(extenion_repos_upgrade) + + handlers = bpy.app.handlers._extension_repos_files_clear + if extenion_repos_files_clear in handlers: + handlers.remove(extenion_repos_files_clear) + + for cmd in cli_commands: + bpy.utils.unregister_cli_command(cmd) + cli_commands.clear() + + global use_repos_to_notify + if use_repos_to_notify: + use_repos_to_notify = False + from . import bl_extension_notify + bl_extension_notify.unregister() + + monkeypatch_uninstall() diff --git a/scripts/addons_core/bl_pkg/bl_extension_cli.py b/scripts/addons_core/bl_pkg/bl_extension_cli.py new file mode 100644 index 00000000000..e40fbd9a51d --- /dev/null +++ b/scripts/addons_core/bl_pkg/bl_extension_cli.py @@ -0,0 +1,829 @@ +# SPDX-FileCopyrightText: 2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +Command line access for extension operations see: + + blender --command extension --help +""" + +__all__ = ( + "cli_extension_handler", +) + +import argparse +import os +import sys + +from typing import ( + Any, + Dict, + List, + Optional, + Tuple, + Union, +) + +show_color = ( + False if os.environ.get("NO_COLOR") else + sys.stdout.isatty() +) + + +if show_color: + color_codes = { + 'black': '\033[0;30m', + 'bright_gray': '\033[0;37m', + 'blue': '\033[0;34m', + 'white': '\033[1;37m', + 'green': '\033[0;32m', + 'bright_blue': '\033[1;34m', + 'cyan': '\033[0;36m', + 'bright_green': '\033[1;32m', + 'red': '\033[0;31m', + 'bright_cyan': '\033[1;36m', + 'purple': '\033[0;35m', + 'bright_red': '\033[1;31m', + 'yellow': '\033[0;33m', + 'bright_purple': '\033[1;35m', + 'dark_gray': '\033[1;30m', + 'bright_yellow': '\033[1;33m', + 'normal': '\033[0m', + } + + def colorize(text: str, color: str) -> str: + return (color_codes[color] + text + color_codes["normal"]) +else: + def colorize(text: str, color: str) -> str: + return text + +# ----------------------------------------------------------------------------- +# Wrap Operators + + +def blender_preferences_write() -> bool: + import bpy # type: ignore + try: + ok = 'FINISHED' in bpy.ops.wm.save_userpref() + except RuntimeError as ex: + print("Failed to write preferences: {!r}".format(ex)) + ok = False + return ok + + +# ----------------------------------------------------------------------------- +# Argument Implementation (Utilities) + +class subcmd_utils: + + def __new__(cls) -> Any: + raise RuntimeError("{:s} should not be instantiated".format(cls)) + + @staticmethod + def sync( + *, + show_done: bool = True, + ) -> bool: + import bpy + try: + bpy.ops.bl_pkg.repo_sync_all() + if show_done: + sys.stdout.write("Done...\n\n") + except BaseException: + print("Error synchronizing") + import traceback + traceback.print_exc() + return False + return True + + @staticmethod + def _expand_package_ids( + packages: List[str], + *, + use_local: bool, + ) -> Union[List[Tuple[int, str]], str]: + # Takes a terse lists of package names and expands to repo index and name list, + # returning an error string if any can't be resolved. + from . import repo_cache_store + from .bl_extension_ops import extension_repos_read + + repo_map = {} + errors = [] + + repos_all = extension_repos_read() + for ( + repo_index, + pkg_manifest, + ) in enumerate( + repo_cache_store.pkg_manifest_from_local_ensure(error_fn=print) + if use_local else + repo_cache_store.pkg_manifest_from_remote_ensure(error_fn=print) + ): + # Show any exceptions created while accessing the JSON, + repo = repos_all[repo_index] + repo_map[repo.module] = (repo_index, set(pkg_manifest.keys())) + + repos_and_packages = [] + + for pkg_id_full in packages: + repo_id, pkg_id = pkg_id_full.rpartition(".")[0::2] + if not pkg_id: + errors.append("Malformed package name \"{:s}\", expected \"repo_id.pkg_id\"!".format(pkg_id_full)) + continue + if repo_id: + repo_index, repo_packages = repo_map.get(repo_id, (-1, ())) + if repo_index == -1: + errors.append("Repository \"{:s}\" not found in [{:s}]!".format( + repo_id, + ", ".join(sorted("\"{:s}\"".format(x) for x in repo_map.keys())) + )) + continue + else: + repo_index = -1 + for repo_id_iter, (repo_index_iter, repo_packages_iter) in repo_map.items(): + if pkg_id in repo_packages_iter: + repo_index = repo_index_iter + break + if repo_index == -1: + if use_local: + errors.append("Package \"{:s}\" not installed in local repositories!".format(pkg_id)) + else: + errors.append("Package \"{:s}\" not found in remote repositories!".format(pkg_id)) + continue + repos_and_packages.append((repo_index, pkg_id)) + + if errors: + return "\n".join(errors) + + return repos_and_packages + + @staticmethod + def expand_package_ids_from_remote(packages: List[str]) -> Union[List[Tuple[int, str]], str]: + return subcmd_utils._expand_package_ids(packages, use_local=False) + + @staticmethod + def expand_package_ids_from_local(packages: List[str]) -> Union[List[Tuple[int, str]], str]: + return subcmd_utils._expand_package_ids(packages, use_local=True) + + +# ----------------------------------------------------------------------------- +# Argument Implementation (Queries) + +class subcmd_query: + + def __new__(cls) -> Any: + raise RuntimeError("{:s} should not be instantiated".format(cls)) + + @staticmethod + def list( + *, + sync: bool, + ) -> bool: + + def list_item( + pkg_id: str, + item_remote: Optional[Dict[str, Any]], + item_local: Optional[Dict[str, Any]], + ) -> None: + # Both can't be None. + assert item_remote is not None or item_local is not None + + if item_remote is not None: + item_version = item_remote["version"] + if item_local is None: + item_local_version = None + is_outdated = False + else: + item_local_version = item_local["version"] + is_outdated = item_local_version != item_version + + if item_local is not None: + if is_outdated: + status_info = " [{:s}]".format(colorize("outdated: {:s} -> {:s}".format( + item_local_version, + item_version, + ), "red")) + else: + status_info = " [{:s}]".format(colorize("installed", "green")) + else: + status_info = "" + item = item_remote + else: + # All local-only packages are installed. + status_info = " [{:s}]".format(colorize("installed", "green")) + assert isinstance(item_local, dict) + item = item_local + + print( + " {:s}{:s}: {:s}".format( + pkg_id, + status_info, + colorize("\"{:s}\", {:s}".format(item["name"], item.get("tagline", "")), "dark_gray"), + )) + + if sync: + if not subcmd_utils.sync(): + return False + + # NOTE: exactly how this data is extracted is rather arbitrary. + # This uses the same code paths as drawing code. + from .bl_extension_ops import extension_repos_read + from . import repo_cache_store + + repos_all = extension_repos_read() + + for repo_index, ( + pkg_manifest_remote, + pkg_manifest_local, + ) in enumerate(zip( + repo_cache_store.pkg_manifest_from_remote_ensure(error_fn=print), + repo_cache_store.pkg_manifest_from_local_ensure(error_fn=print), + )): + # Show any exceptions created while accessing the JSON, + repo = repos_all[repo_index] + + print("Repository: \"{:s}\" (id={:s})".format(repo.name, repo.module)) + if pkg_manifest_remote is not None: + for pkg_id, item_remote in pkg_manifest_remote.items(): + if pkg_manifest_local is not None: + item_local = pkg_manifest_local.get(pkg_id) + else: + item_local = None + list_item(pkg_id, item_remote, item_local) + else: + for pkg_id, item_local in pkg_manifest_local.items(): + list_item(pkg_id, None, item_local) + + return True + + +# ----------------------------------------------------------------------------- +# Argument Implementation (Packages) + +class subcmd_pkg: + + def __new__(cls) -> Any: + raise RuntimeError("{:s} should not be instantiated".format(cls)) + + @staticmethod + def update( + *, + sync: bool, + ) -> bool: + if sync: + if not subcmd_utils.sync(): + return False + + import bpy + try: + bpy.ops.bl_pkg.pkg_upgrade_all() + except RuntimeError: + return False # The error will have been printed. + return True + + @staticmethod + def install( + *, + sync: bool, + packages: List[str], + enable_on_install: bool, + no_prefs: bool, + ) -> bool: + if sync: + if not subcmd_utils.sync(): + return False + + # Expand all package ID's. + repos_and_packages = subcmd_utils.expand_package_ids_from_remote(packages) + if isinstance(repos_and_packages, str): + sys.stderr.write(repos_and_packages) + sys.stderr.write("\n") + return False + + import bpy + for repo_index, pkg_id in repos_and_packages: + bpy.ops.bl_pkg.pkg_mark_set( + repo_index=repo_index, + pkg_id=pkg_id, + ) + + try: + bpy.ops.bl_pkg.pkg_install_marked(enable_on_install=enable_on_install) + except RuntimeError: + return False # The error will have been printed. + + if not no_prefs: + if enable_on_install: + blender_preferences_write() + + return True + + @staticmethod + def remove( + *, + packages: List[str], + no_prefs: bool, + ) -> bool: + # Expand all package ID's. + repos_and_packages = subcmd_utils.expand_package_ids_from_local(packages) + if isinstance(repos_and_packages, str): + sys.stderr.write(repos_and_packages) + sys.stderr.write("\n") + return False + + import bpy + for repo_index, pkg_id in repos_and_packages: + bpy.ops.bl_pkg.pkg_mark_set(repo_index=repo_index, pkg_id=pkg_id) + + try: + bpy.ops.bl_pkg.pkg_uninstall_marked() + except RuntimeError: + return False # The error will have been printed. + + if not no_prefs: + blender_preferences_write() + + return True + + @staticmethod + def install_file( + *, + filepath: str, + repo_id: str, + enable_on_install: bool, + no_prefs: bool, + ) -> bool: + import bpy + + # Blender's operator requires an absolute path. + filepath = os.path.abspath(filepath) + + try: + bpy.ops.bl_pkg.pkg_install_files( + filepath=filepath, + repo=repo_id, + enable_on_install=enable_on_install, + ) + except RuntimeError: + return False # The error will have been printed. + except BaseException as ex: + sys.stderr.write(str(ex)) + sys.stderr.write("\n") + + if not no_prefs: + if enable_on_install: + blender_preferences_write() + + return True + + +# ----------------------------------------------------------------------------- +# Argument Implementation (Repositories) + +class subcmd_repo: + + def __new__(cls) -> Any: + raise RuntimeError("{:s} should not be instantiated".format(cls)) + + @staticmethod + def list() -> bool: + from .bl_extension_ops import extension_repos_read + repos_all = extension_repos_read() + for repo in repos_all: + print("{:s}:".format(repo.module)) + print(" name: \"{:s}\"".format(repo.name)) + print(" directory: \"{:s}\"".format(repo.directory)) + if url := repo.remote_url: + print(" url: \"{:s}\"".format(url)) + + return True + + @staticmethod + def add( + *, + name: str, + id: str, + directory: str, + url: str, + cache: bool, + clear_all: bool, + no_prefs: bool, + ) -> bool: + from bpy import context + + extension_repos = context.preferences.extensions.repos + if clear_all: + while extension_repos: + extension_repos.remove(extension_repos[0]) + + repo = extension_repos.new( + name=name, + module=id, + custom_directory=directory, + remote_url=url, + ) + repo.use_cache = cache + + if not no_prefs: + blender_preferences_write() + + return True + + @staticmethod + def remove( + *, + id: str, + no_prefs: bool, + ) -> bool: + from bpy import context + extension_repos = context.preferences.extensions.repos + extension_repos_module_map = {repo.module: repo for repo in extension_repos} + repo = extension_repos_module_map.get(id) + if repo is None: + sys.stderr.write("Repository: \"{:s}\" not found in [{:s}]\n".format( + id, + ", ".join(["\"{:s}\"".format(x) for x in sorted(extension_repos_module_map.keys())]) + )) + return False + extension_repos.remove(repo) + print("Removed repo \"{:s}\"".format(id)) + + if not no_prefs: + blender_preferences_write() + + return True + + +# ----------------------------------------------------------------------------- +# Command Line Argument Definitions + +def arg_handle_int_as_bool(value: str) -> bool: + result = int(value) + if result not in {0, 1}: + raise argparse.ArgumentTypeError("Expected a 0 or 1") + return bool(result) + + +def generic_arg_sync(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + "-s", + "--sync", + dest="sync", + action="store_true", + default=False, + help=( + "Sync the remote directory before performing the action." + ), + ) + + +def generic_arg_enable_on_install(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + "-e", + "--enable", + dest="enable", + action="store_true", + default=False, + help=( + "Enable the extension after installation." + ), + ) + + +def generic_arg_no_prefs(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + "--no-prefs", + dest="no_prefs", + action="store_true", + default=False, + help=( + "Treat the user-preferences as read-only,\n" + "preventing updates for operations that would otherwise modify them.\n" + "This means removing extensions or repositories for example, wont update the user-preferences." + ), + ) + + +def generic_arg_package_list_positional(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + dest="packages", + metavar="PACKAGES", + type=str, + help=( + "The packages to operate on (separated by ``,`` without spaces)." + ), + ) + + +def generic_arg_package_file_positional(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + dest="file", + metavar="FILE", + type=str, + help=( + "The packages file." + ), + ) + + +def generic_arg_repo_id(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + "-r", + "--repo", + dest="repo", + type=str, + help=( + "The repository identifier." + ), + required=True, + ) + + +def generic_arg_package_repo_id_positional(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + dest="id", + metavar="ID", + type=str, + help=( + "The repository identifier." + ), + ) + + +# ----------------------------------------------------------------------------- +# Blender Package Manipulation + +def cli_extension_args_list(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None: + # Implement "list". + subparse = subparsers.add_parser( + "list", + help="List all packages.", + description=( + "List packages from all enabled repositories." + ), + formatter_class=argparse.RawTextHelpFormatter, + ) + generic_arg_sync(subparse) + + subparse.set_defaults( + func=lambda args: subcmd_query.list( + sync=args.sync, + ), + ) + + +def cli_extension_args_sync(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None: + # Implement "sync". + subparse = subparsers.add_parser( + "sync", + help="Synchronize with remote repositories.", + description=( + "Download package information for remote repositories." + ), + formatter_class=argparse.RawTextHelpFormatter, + ) + subparse.set_defaults( + func=lambda args: subcmd_utils.sync(show_done=False), + ) + + +def cli_extension_args_upgrade(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None: + # Implement "update". + subparse = subparsers.add_parser( + "update", + help="Upgrade any outdated packages.", + description=( + "Download and update any outdated packages." + ), + formatter_class=argparse.RawTextHelpFormatter, + ) + generic_arg_sync(subparse) + + subparse.set_defaults( + func=lambda args: subcmd_pkg.update(sync=args.sync), + ) + + +def cli_extension_args_install(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None: + # Implement "install". + subparse = subparsers.add_parser( + "install", + help="Install packages.", + formatter_class=argparse.RawTextHelpFormatter, + ) + generic_arg_sync(subparse) + generic_arg_package_list_positional(subparse) + + generic_arg_enable_on_install(subparse) + generic_arg_no_prefs(subparse) + + subparse.set_defaults( + func=lambda args: subcmd_pkg.install( + sync=args.sync, + packages=args.packages.split(","), + enable_on_install=args.enable, + no_prefs=args.no_prefs, + ), + ) + + +def cli_extension_args_install_file(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None: + # Implement "install-file". + subparse = subparsers.add_parser( + "install-file", + help="Install package from file.", + description=( + "Install a package file into a local repository." + ), + formatter_class=argparse.RawTextHelpFormatter, + ) + + generic_arg_package_file_positional(subparse) + generic_arg_repo_id(subparse) + + generic_arg_enable_on_install(subparse) + generic_arg_no_prefs(subparse) + + subparse.set_defaults( + func=lambda args: subcmd_pkg.install_file( + filepath=args.file, + repo_id=args.repo, + enable_on_install=args.enable, + no_prefs=args.no_prefs, + ), + ) + + +def cli_extension_args_remove(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None: + # Implement "remove". + subparse = subparsers.add_parser( + "remove", + help="Remove packages.", + description=( + "Disable & remove package(s)." + ), + formatter_class=argparse.RawTextHelpFormatter, + ) + generic_arg_package_list_positional(subparse) + generic_arg_no_prefs(subparse) + + subparse.set_defaults( + func=lambda args: subcmd_pkg.remove( + packages=args.packages.split(","), + no_prefs=args.no_prefs, + ), + ) + + +# ----------------------------------------------------------------------------- +# Blender Repository Manipulation + +def cli_extension_args_repo_list(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None: + # Implement "repo-list". + subparse = subparsers.add_parser( + "repo-list", + help="List repositories.", + description=( + "List all repositories stored in Blender's preferences." + ), + formatter_class=argparse.RawTextHelpFormatter, + ) + subparse.set_defaults( + func=lambda args: subcmd_repo.list(), + ) + + +def cli_extension_args_repo_add(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None: + # Implement "repo-add". + subparse = subparsers.add_parser( + "repo-add", + help="Add repository.", + description=( + "Add a new local or remote repository." + ), + formatter_class=argparse.RawTextHelpFormatter, + ) + generic_arg_package_repo_id_positional(subparse) + + # Optional. + subparse.add_argument( + "--name", + dest="name", + type=str, + default="", + metavar="NAME", + help=( + "The name to display in the interface (optional)." + ), + ) + + subparse.add_argument( + "--directory", + dest="directory", + type=str, + default="", + help=( + "The directory where the repository stores local files (optional).\n" + "When omitted a directory in the users directory is automatically selected." + ), + ) + subparse.add_argument( + "--url", + dest="url", + type=str, + default="", + metavar="URL", + help=( + "The URL, for remote repositories (optional).\n" + "When omitted the repository is considered \"local\"\n" + "as it is not connected to an external repository,\n" + "where packages may be installed by file or managed manually." + ), + ) + + subparse.add_argument( + "--cache", + dest="cache", + metavar="BOOLEAN", + type=arg_handle_int_as_bool, + default=True, + help=( + "Use package cache (default=1)." + ), + ) + + subparse.add_argument( + "--clear-all", + dest="clear_all", + action="store_true", + help=( + "Clear all repositories before adding, simplifies test setup." + ), + ) + + generic_arg_no_prefs(subparse) + + subparse.set_defaults( + func=lambda args: subcmd_repo.add( + id=args.id, + name=args.name, + directory=args.directory, + url=args.url, + cache=args.cache, + clear_all=args.clear_all, + no_prefs=args.no_prefs, + ), + ) + + +def cli_extension_args_repo_remove(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None: + # Implement "repo-remove". + subparse = subparsers.add_parser( + "repo-remove", + help="Remove repository.", + description=( + "Remove a repository." + ), + formatter_class=argparse.RawTextHelpFormatter, + ) + generic_arg_package_repo_id_positional(subparse) + generic_arg_no_prefs(subparse) + + subparse.set_defaults( + func=lambda args: subcmd_repo.remove( + id=args.id, + no_prefs=args.no_prefs, + ), + ) + + +# ----------------------------------------------------------------------------- +# Implement Additional Arguments + +def cli_extension_args_extra(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None: + # Package commands. + cli_extension_args_list(subparsers) + cli_extension_args_sync(subparsers) + cli_extension_args_upgrade(subparsers) + cli_extension_args_install(subparsers) + cli_extension_args_install_file(subparsers) + cli_extension_args_remove(subparsers) + + # Preference commands. + cli_extension_args_repo_list(subparsers) + cli_extension_args_repo_add(subparsers) + cli_extension_args_repo_remove(subparsers) + + +def cli_extension_handler(args: List[str]) -> int: + from .cli import blender_ext + result = blender_ext.main( + args, + args_internal=False, + args_extra_subcommands_fn=cli_extension_args_extra, + prog="blender --command extension", + ) + # Needed as the import isn't followed by `mypy`. + assert isinstance(result, int) + return result diff --git a/scripts/addons_core/bl_pkg/bl_extension_local.py b/scripts/addons_core/bl_pkg/bl_extension_local.py new file mode 100644 index 00000000000..b2838622693 --- /dev/null +++ b/scripts/addons_core/bl_pkg/bl_extension_local.py @@ -0,0 +1,46 @@ +# SPDX-FileCopyrightText: 2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +High level API for managing an extension local site-packages and wheels. + +NOTE: this is a standalone module. +""" + +__all__ = ( + "sync", +) + + +import os +import sys + +from .wheel_manager import WheelSource + +from typing import ( + List, +) + + +def sync( + *, + local_dir: str, + wheel_list: List[WheelSource], +) -> None: + from . import wheel_manager + local_dir_site_packages = os.path.join( + local_dir, + "lib", + "python{:d}.{:d}".format(sys.version_info.major, sys.version_info.minor), + "site-packages", + ) + + wheel_manager.apply_action( + local_dir=local_dir, + local_dir_site_packages=local_dir_site_packages, + wheel_list=wheel_list, + ) + if os.path.exists(local_dir_site_packages): + if local_dir_site_packages not in sys.path: + sys.path.append(local_dir_site_packages) diff --git a/scripts/addons_core/bl_pkg/bl_extension_notify.py b/scripts/addons_core/bl_pkg/bl_extension_notify.py new file mode 100644 index 00000000000..109be539b81 --- /dev/null +++ b/scripts/addons_core/bl_pkg/bl_extension_notify.py @@ -0,0 +1,378 @@ +# SPDX-FileCopyrightText: 2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +Startup notifications. +""" + +__all__ = ( + "register", + "unregister", +) + + +import os +import bpy + +from . import bl_extension_ops +from . import bl_extension_utils + +# Request the processes exit, then wait for them to exit. +# NOTE(@ideasman42): This is all well and good but any delays exiting are unwanted, +# only keep this as a reference and in case we can speed up forcing them to exit. +USE_GRACEFUL_EXIT = False + + +# ----------------------------------------------------------------------------- +# Internal Utilities + +def sync_status_count_outdated_extensions(repos_notify): + from . import repo_cache_store + + repos_notify_directories = [repo_item.directory for repo_item in repos_notify] + + package_count = 0 + + for ( + pkg_manifest_remote, + pkg_manifest_local, + ) in zip( + repo_cache_store.pkg_manifest_from_remote_ensure( + error_fn=print, + directory_subset=repos_notify_directories, + ), + repo_cache_store.pkg_manifest_from_local_ensure( + error_fn=print, + directory_subset=repos_notify_directories, + # Needed as these have been updated. + check_files=True, + ), + ): + if pkg_manifest_remote is None: + continue + if pkg_manifest_local is None: + continue + + for pkg_id, item_remote in pkg_manifest_remote.items(): + item_local = pkg_manifest_local.get(pkg_id) + if item_local is None: + # Not installed. + continue + + if item_remote["version"] != item_local["version"]: + package_count += 1 + return package_count + + +# ----------------------------------------------------------------------------- +# Update Iterator +# +# This is a black-box which handled running the updates, yielding status text. + +def sync_apply_locked(repos_notify, repos_notify_files, unique_ext): + """ + Move files with a unique extension to their final location + with a locked repository to ensure multiple Blender instances never overwrite + repositories at the same time. + + Lock the repositories for the shortest time reasonably possible. + If locking fails, this is OK as it's possible another Blender got here first. + + Another reason this is needed is exiting Blender will close the sync sub-processes, + this is OK as long as the final location of the repositories JSON isn't being written + to the moment Blender and it's sub-processes exit. + """ + # TODO: handle the case of cruft being left behind, perhaps detect previous + # files created with a `unique_ext` (`@{HEX}` extension) and removing them. + # Although this shouldn't happen on a regular basis. Only when exiting immediately after launching + # Blender and even then the user would need to be *lucky*. + from . import cookie_from_session + + any_lock_errors = False + repo_directories = [repo_item.directory for repo_item in repos_notify] + with bl_extension_utils.RepoLockContext( + repo_directories=repo_directories, + cookie=cookie_from_session(), + ) as lock_result: + for directory, repo_files in zip(repo_directories, repos_notify_files): + repo_files = [os.path.join(directory, filepath_rel) for filepath_rel in repo_files] + + if (lock_result_for_repo := lock_result[directory]) is not None: + print("Warning \"{:s}\" locking \"{:s}\"".format(lock_result_for_repo, repr(directory))) + any_lock_errors = True + for filepath in repo_files: + try: + os.remove(filepath) + except Exception as ex: + print("Failed to remove file:", ex) + continue + + # Locking worked, rename the files. + for filepath in repo_files: + filepath_dst = filepath[:-len(unique_ext)] + try: + os.remove(filepath_dst) + except Exception as ex: + print("Failed to remove file before renaming:", ex) + continue + os.rename(filepath, filepath_dst) + return any_lock_errors + + +def sync_status_generator(repos_notify): + + # Generator results... + # -> None: do nothing. + # -> (text, ICON_ID, NUMBER_OF_UPDATES) + + # ################ # + # Setup The Update # + # ################ # + + yield None + + # An extension unique to this session. + unique_ext = "@{:x}".format(os.getpid()) + + from functools import partial + + cmd_batch_partial = [] + for repo_item in repos_notify: + # Local only repositories should still refresh, but not run the sync. + assert repo_item.remote_url + cmd_batch_partial.append(partial( + bl_extension_utils.repo_sync, + directory=repo_item.directory, + remote_url=repo_item.remote_url, + online_user_agent=bl_extension_ops.online_user_agent_from_blender(), + # Never sleep while there is no input, as this blocks Blender. + use_idle=False, + # Needed so the user can exit blender without warnings about a broken pipe. + # TODO: write to a temporary location, once done: + # There is no chance of corrupt data as the data isn't written directly to the target JSON. + force_exit_ok=not USE_GRACEFUL_EXIT, + extension_override=unique_ext, + )) + + yield None + + # repos_lock = [repo_item.directory for repo_item in self.repos_notify] + + # Lock repositories. + # self.repo_lock = bl_extension_utils.RepoLock(repo_directories=repos_lock, cookie=cookie_from_session()) + + import atexit + + cmd_batch = None + + def cmd_force_quit(): + if cmd_batch is None: + return + cmd_batch.exec_non_blocking(request_exit=True) + + if USE_GRACEFUL_EXIT: + import time + # Force all commands to close. + while not cmd_batch.exec_non_blocking(request_exit=True).all_complete: + # Avoid high CPU usage on exit. + time.sleep(0.01) + + atexit.register(cmd_force_quit) + + cmd_batch = bl_extension_utils.CommandBatch( + # Used as a prefix in status. + title="Update", + batch=cmd_batch_partial, + ) + del cmd_batch_partial + + yield None + + # ############## # + # Run The Update # + # ############## # + + # The count is unknown. + update_total = -1 + any_lock_errors = False + + repos_notify_files = [[] for _ in repos_notify] + + is_debug = bpy.app.debug + while True: + command_result = cmd_batch.exec_non_blocking( + # TODO: if Blender requested an exit... this should request exit here. + request_exit=False, + ) + # Forward new messages to reports. + msg_list_per_command = cmd_batch.calc_status_log_since_last_request_or_none() + if msg_list_per_command is not None: + for i, msg_list in enumerate(msg_list_per_command): + for (ty, msg) in msg_list: + if ty == 'PATH': + if not msg.endswith(unique_ext): + print("Unexpected path:", msg) + repos_notify_files[i].append(msg) + continue + + if not is_debug: + continue + + # TODO: output this information to a place for users, if they want to debug. + if len(msg_list_per_command) > 1: + # These reports are flattened, note the process number that fails so + # whoever is reading the reports can make sense of the messages. + msg = "{:s} (process {:d} of {:d})".format(msg, i + 1, len(msg_list_per_command)) + if ty == 'STATUS': + print('INFO', msg) + else: + print(ty, msg) + + # TODO: more elegant way to detect changes. + # Re-calculating the same information each time then checking if it's different isn't great. + if command_result.status_data_changed: + if command_result.all_complete: + any_lock_errors = sync_apply_locked(repos_notify, repos_notify_files, unique_ext) + update_total = sync_status_count_outdated_extensions(repos_notify) + yield (cmd_batch.calc_status_data(), update_total, any_lock_errors) + else: + yield None + + if command_result.all_complete: + break + + atexit.unregister(cmd_force_quit) + + # ################### # + # Finalize The Update # + # ################### # + + yield None + + # Unlock repositories. + # lock_result_any_failed_with_report(self, self.repo_lock.release(), report_type='WARNING') + # self.repo_lock = None + + +# ----------------------------------------------------------------------------- +# Private API + +# The timer before running the timer (initially). +TIME_WAIT_INIT = 0.05 +# The time between calling the timer. +TIME_WAIT_STEP = 0.1 + +state_text = ( + "Checking for updates...", +) + + +class NotifyHandle: + __slots__ = ( + "splash_region", + "state", + + "sync_generator", + "sync_info", + ) + + def __init__(self, repos_notify): + self.splash_region = None + self.state = 0 + # We could start the generator separately, this seems OK here for now. + self.sync_generator = iter(sync_status_generator(repos_notify)) + # TEXT/ICON_ID/COUNT + self.sync_info = None + + +# When non-null, the timer is running. +_notify = None + + +def _region_exists(region): + # TODO: this is a workaround for there being no good way to inspect temporary regions. + # A better solution could be to store the `PyObject` in the `ARegion` so that it gets invalidated when freed. + # This is a bigger change though - so use the context override as a way to check if a region is valid. + exists = False + try: + with bpy.context.temp_override(region=region): + exists = True + except TypeError: + pass + return exists + + +def _ui_refresh_timer(): + if _notify is None: + return None + + default_wait = TIME_WAIT_STEP + + sync_info = next(_notify.sync_generator, ...) + # If the generator exited, early exit here. + if sync_info is ...: + return None + if sync_info is None: + # Nothing changed, no action is needed (waiting for a response). + return default_wait + + # Re-display. + assert isinstance(sync_info, tuple) + assert len(sync_info) == 3 + + _notify.sync_info = sync_info + + # Check if the splash_region is valid. + if _notify.splash_region is not None: + if not _region_exists(_notify.splash_region): + _notify.splash_region = None + return None + _notify.splash_region.tag_redraw() + _notify.splash_region.tag_refresh_ui() + + # TODO: redraw the status bar. + + return default_wait + + +def splash_draw_status_fn(self, context): + if _notify.splash_region is None: + _notify.splash_region = context.region_popup + + if _notify.sync_info is None: + self.layout.label(text="Updates starting...") + else: + status_data, update_count, any_lock_errors = _notify.sync_info + text, icon = bl_extension_utils.CommandBatch.calc_status_text_icon_from_data(status_data, update_count) + if any_lock_errors: + text = text + " - failed to acquire lock!" + row = self.layout.row(align=True) + if update_count > 0: + row.operator("bl_pkg.extensions_show_for_update", text=text, icon=icon) + else: + row.label(text=text, icon=icon) + + self.layout.separator() + self.layout.separator() + + +# ----------------------------------------------------------------------------- +# Public API + + +def register(repos_notify): + global _notify + _notify = NotifyHandle(repos_notify) + bpy.types.WM_MT_splash.append(splash_draw_status_fn) + bpy.app.timers.register(_ui_refresh_timer, first_interval=TIME_WAIT_INIT) + + +def unregister(): + global _notify + assert _notify is not None + _notify = None + + bpy.types.WM_MT_splash.remove(splash_draw_status_fn) + # This timer is responsible for un-registering itself. + # `bpy.app.timers.unregister(_ui_refresh_timer)` diff --git a/scripts/addons_core/bl_pkg/bl_extension_ops.py b/scripts/addons_core/bl_pkg/bl_extension_ops.py new file mode 100644 index 00000000000..125f21908fe --- /dev/null +++ b/scripts/addons_core/bl_pkg/bl_extension_ops.py @@ -0,0 +1,2337 @@ +# SPDX-FileCopyrightText: 2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +Blender, thin wrapper around ``blender_extension_utils``. +Where the operator shows progress, any errors and supports canceling operations. +""" + +__all__ = ( + "extension_repos_read", +) + +import os + +from functools import partial + +from typing import ( + NamedTuple, +) + +import bpy + +from bpy.types import ( + Operator, +) +from bpy.props import ( + BoolProperty, + CollectionProperty, + EnumProperty, + StringProperty, + IntProperty, +) +from bpy.app.translations import ( + pgettext_iface as iface_, +) + +# Localize imports. +from . import ( + bl_extension_utils, +) # noqa: E402 + +from . import ( + repo_status_text, + cookie_from_session, +) + +from .bl_extension_utils import ( + RepoLock, + RepoLockContext, +) + +rna_prop_url = StringProperty(name="URL", subtype='FILE_PATH', options={'HIDDEN'}) +rna_prop_directory = StringProperty(name="Repo Directory", subtype='FILE_PATH') +rna_prop_repo_index = IntProperty(name="Repo Index", default=-1) +rna_prop_remote_url = StringProperty(name="Repo URL", subtype='FILE_PATH') +rna_prop_pkg_id = StringProperty(name="Package ID") + +rna_prop_enable_on_install = BoolProperty( + name="Enable on Install", + description="Enable after installing", + default=True, +) +rna_prop_enable_on_install_type_map = { + "add-on": "Enable Add-on", + "theme": "Set Current Theme", +} + + +def rna_prop_repo_enum_local_only_itemf(_self, context): + if context is None: + result = [] + else: + result = [ + ( + repo_item.module, + repo_item.name if repo_item.enabled else (repo_item.name + " (disabled)"), + "", + ) + for repo_item in repo_iter_valid_local_only(context) + ] + # Prevent the strings from being freed. + rna_prop_repo_enum_local_only_itemf._result = result + return result + + +is_background = bpy.app.background + +# Execute tasks concurrently. +is_concurrent = True + +# Selected check-boxes. +blender_extension_mark = set() +blender_extension_show = set() + + +# Map the enum value to the value in the manifest. +blender_filter_by_type_map = { + "ALL": "", + "ADDON": "add-on", + "KEYMAP": "keymap", + "THEME": "theme", +} + + +# ----------------------------------------------------------------------------- +# Signal Context Manager (Catch Control-C) +# + + +class CheckSIGINT_Context: + __slots__ = ( + "has_interrupt", + "_old_fn", + ) + + def _signal_handler_sigint(self, _, __): + self.has_interrupt = True + print("INTERRUPT") + + def __init__(self): + self.has_interrupt = False + self._old_fn = None + + def __enter__(self): + import signal + self._old_fn = signal.signal(signal.SIGINT, self._signal_handler_sigint) + return self + + def __exit__(self, _ty, _value, _traceback): + import signal + signal.signal(signal.SIGINT, self._old_fn or signal.SIG_DFL) + + +# ----------------------------------------------------------------------------- +# Internal Utilities +# + +def extension_url_find_repo_index_and_pkg_id(url): + from .bl_extension_utils import ( + pkg_manifest_archive_url_abs_from_remote_url, + ) + from .bl_extension_ops import ( + extension_repos_read, + ) + # return repo_index, pkg_id + from . import repo_cache_store + + # NOTE: we might want to use `urllib.parse.urlsplit` so it's possible to include variables in the URL. + url_basename = url.rpartition("/")[2] + + repos_all = extension_repos_read() + + for repo_index, ( + pkg_manifest_remote, + pkg_manifest_local, + ) in enumerate(zip( + repo_cache_store.pkg_manifest_from_remote_ensure(error_fn=print), + repo_cache_store.pkg_manifest_from_local_ensure(error_fn=print), + )): + # It's possible the remote repo could not be connected to when syncing. + # Allow it to be None without raising an exception. + if pkg_manifest_remote is None: + continue + + repo = repos_all[repo_index] + remote_url = repo.remote_url + if not remote_url: + continue + for pkg_id, item_remote in pkg_manifest_remote.items(): + archive_url = item_remote["archive_url"] + archive_url_basename = archive_url.rpartition("/")[2] + # First compare the filenames, if this matches, check the full URL. + if url_basename != archive_url_basename: + continue + + # Calculate the absolute URL. + archive_url_abs = pkg_manifest_archive_url_abs_from_remote_url(remote_url, archive_url) + if archive_url_abs == url: + return repo_index, repo.name, pkg_id, item_remote, pkg_manifest_local.get(pkg_id) + + return -1, "", "", None, None + + +def online_user_agent_from_blender(): + # NOTE: keep this brief and avoid `platform.platform()` which could identify individual users. + # Produces something like this: `Blender/4.2.0 (Linux x86_64; cycle=alpha)` or similar. + import platform + return "Blender/{:d}.{:d}.{:d} ({:s} {:s}; cycle={:s})".format( + *bpy.app.version, + platform.system(), + platform.machine(), + bpy.app.version_cycle, + ) + + +def lock_result_any_failed_with_report(op, lock_result, report_type='ERROR'): + """ + Convert any locking errors from ``bl_extension_utils.RepoLock.acquire`` into reports. + + Note that we might want to allow some repositories not to lock and still proceed (in the future). + """ + any_errors = False + for directory, lock_result_for_repo in lock_result.items(): + if lock_result_for_repo is None: + continue + print("Error \"{:s}\" locking \"{:s}\"".format(lock_result_for_repo, repr(directory))) + op.report({report_type}, lock_result_for_repo) + any_errors = True + return any_errors + + +def pkg_info_check_exclude_filter_ex(name, tagline, search_lower): + return ( + (search_lower in name.lower() or search_lower in iface_(name).lower()) or + (search_lower in tagline.lower() or search_lower in iface_(tagline).lower()) + ) + + +def pkg_info_check_exclude_filter(item, search_lower): + return pkg_info_check_exclude_filter_ex(item["name"], item["tagline"], search_lower) + + +def extension_theme_enable_filepath(filepath): + bpy.ops.script.execute_preset( + filepath=filepath, + menu_idname="USERPREF_MT_interface_theme_presets", + ) + + +def extension_theme_enable(repo_directory, pkg_idname): + from .bl_extension_utils import ( + pkg_theme_file_list, + ) + # Enable the theme. + theme_dir, theme_files = pkg_theme_file_list(repo_directory, pkg_idname) + + # NOTE: a theme package can contain multiple themes, in this case just use the first + # as the list is sorted and picking any theme is arbitrary if there are multiple. + if not theme_files: + return + + extension_theme_enable_filepath(os.path.join(theme_dir, theme_files[0])) + + +def repo_iter_valid_local_only(context): + from . import repo_paths_or_none + extension_repos = context.preferences.extensions.repos + for repo_item in extension_repos: + if not repo_item.enabled: + continue + # Ignore repositories that have invalid settings. + directory, remote_url = repo_paths_or_none(repo_item) + if directory is None: + continue + if remote_url: + continue + yield repo_item + + +class RepoItem(NamedTuple): + name: str + directory: str + remote_url: str + module: str + use_cache: bool + + +def repo_cache_store_refresh_from_prefs(include_disabled=False): + from . import repo_cache_store + from . import repo_paths_or_none + extension_repos = bpy.context.preferences.extensions.repos + repos = [] + for repo_item in extension_repos: + if not include_disabled: + if not repo_item.enabled: + continue + directory, remote_url = repo_paths_or_none(repo_item) + if directory is None: + continue + repos.append((directory, remote_url)) + + repo_cache_store.refresh_from_repos(repos=repos) + + +def _preferences_ensure_disabled(*, repo_item, pkg_id_sequence, default_set): + import sys + import addon_utils + + result = {} + errors = [] + + def handle_error(ex): + print("Error:", ex) + errors.append(str(ex)) + + modules_clear = [] + + module_base_elem = ("bl_ext", repo_item.module) + + repo_module = sys.modules.get(".".join(module_base_elem)) + if repo_module is None: + print("Repo module \"{:s}\" not in \"sys.modules\", unexpected!".format(".".join(module_base_elem))) + + for pkg_id in pkg_id_sequence: + addon_module_elem = (*module_base_elem, pkg_id) + addon_module_name = ".".join(addon_module_elem) + loaded_default, loaded_state = addon_utils.check(addon_module_name) + + result[addon_module_name] = loaded_default, loaded_state + + # Not loaded or default, skip. + if not (loaded_default or loaded_state): + continue + + # This report isn't needed, it just shows a warning in the case of irregularities + # which may be useful when debugging issues. + if repo_module is not None: + if not hasattr(repo_module, pkg_id): + print("Repo module \"{:s}.{:s}\" not a sub-module!".format(".".join(module_base_elem), pkg_id)) + + addon_utils.disable(addon_module_name, default_set=default_set, handle_error=handle_error) + + modules_clear.append(pkg_id) + + # Clear modules. + + # Extensions, repository & final `.` to ensure the module is part of the repository. + prefix_base = ".".join(module_base_elem) + "." + # Needed for `startswith` check. + prefix_addon_modules = {prefix_base + pkg_id for pkg_id in modules_clear} + # Needed for `startswith` check (sub-modules). + prefix_addon_modules_base = tuple([module + "." for module in prefix_addon_modules]) + + # NOTE(@ideasman42): clearing the modules is not great practice, + # however we need to ensure this is fully un-loaded then reloaded. + for key in list(sys.modules.keys()): + if not key.startswith(prefix_base): + continue + if not ( + # This module is the add-on. + key in prefix_addon_modules or + # This module is a sub-module of the add-on. + key.startswith(prefix_addon_modules_base) + ): + continue + + # Use pop instead of del because there is a (very) small chance + # that classes defined in a removed module define a `__del__` method manipulates modules. + sys.modules.pop(key, None) + + # Now remove from the module from it's parent (when found). + # Although in most cases this isn't needed because disabling the add-on typically deletes the module, + # don't report a warning if this is the case. + if repo_module is not None: + for pkg_id in pkg_id_sequence: + if not hasattr(repo_module, pkg_id): + continue + delattr(repo_module, pkg_id) + + return result, errors + + +def _preferences_ensure_enabled(*, repo_item, pkg_id_sequence, result, handle_error): + import addon_utils + for addon_module_name, (loaded_default, loaded_state) in result.items(): + # The module was not loaded, so no need to restore it. + if not loaded_state: + continue + + addon_utils.enable(addon_module_name, default_set=loaded_default, handle_error=handle_error) + + +def _preferences_ensure_enabled_all(*, addon_restore, handle_error): + for repo_item, pkg_id_sequence, result in addon_restore: + _preferences_ensure_enabled( + repo_item=repo_item, + pkg_id_sequence=pkg_id_sequence, + result=result, + handle_error=handle_error, + ) + + +def _preferences_install_post_enable_on_install( + *, + directory, + pkg_manifest_local, + pkg_id_sequence, + # There were already installed and an attempt to enable it will have already been made. + pkg_id_sequence_upgrade, + handle_error, +): + import addon_utils + + # It only ever makes sense to enable one theme. + has_theme = False + + repo_item = _extensions_repo_from_directory(directory) + for pkg_id in pkg_id_sequence: + item_local = pkg_manifest_local.get(pkg_id) + if item_local is None: + # Unlikely but possible, do nothing in this case. + print("Package should have been installed but not found:", pkg_id) + return + + if item_local["type"] == "add-on": + # Check if the add-on will have been enabled from re-installing. + if pkg_id in pkg_id_sequence_upgrade: + continue + + addon_module_name = "bl_ext.{:s}.{:s}".format(repo_item.module, pkg_id) + addon_utils.enable(addon_module_name, default_set=True, handle_error=handle_error) + elif item_local["type"] == "theme": + if has_theme: + continue + extension_theme_enable(directory, pkg_id) + has_theme = True + + +def _preferences_ui_redraw(): + for win in bpy.context.window_manager.windows: + for area in win.screen.areas: + if area.type != 'PREFERENCES': + continue + area.tag_redraw() + + +def _preferences_ui_refresh_addons(): + import addon_utils + # TODO: make a public method. + addon_utils.modules._is_first = True + + +def _preferences_ensure_sync(): + # TODO: define when/where exactly sync should be ensured. + # This is a general issue: + from . import repo_cache_store + sync_required = False + for repo_index, ( + pkg_manifest_remote, + pkg_manifest_local, + ) in enumerate(zip( + repo_cache_store.pkg_manifest_from_remote_ensure(error_fn=print), + repo_cache_store.pkg_manifest_from_local_ensure(error_fn=print), + )): + if pkg_manifest_remote is None: + sync_required = True + break + if pkg_manifest_local is None: + sync_required = True + break + + if sync_required: + for wm in bpy.data.window_managers: + for win in wm.windows: + win.cursor_set('WAIT') + try: + bpy.ops.bl_pkg.repo_sync_all() + except BaseException as ex: + print("Sync failed:", ex) + + for wm in bpy.data.window_managers: + for win in wm.windows: + win.cursor_set('DEFAULT') + + +def extension_repos_read_index(index, *, include_disabled=False): + from . import repo_paths_or_none + extension_repos = bpy.context.preferences.extensions.repos + index_test = 0 + for repo_item in extension_repos: + if not include_disabled: + if not repo_item.enabled: + continue + directory, remote_url = repo_paths_or_none(repo_item) + if directory is None: + continue + + if index == index_test: + return RepoItem( + name=repo_item.name, + directory=directory, + remote_url=remote_url, + module=repo_item.module, + use_cache=repo_item.use_cache, + ) + index_test += 1 + return None + + +def extension_repos_read(*, include_disabled=False, use_active_only=False): + from . import repo_paths_or_none + extensions = bpy.context.preferences.extensions + extension_repos = extensions.repos + result = [] + + if use_active_only: + try: + extension_active = extension_repos[extensions.active_repo] + except IndexError: + return result + + extension_repos = [extension_active] + del extension_active + + for repo_item in extension_repos: + if not include_disabled: + if not repo_item.enabled: + continue + + # Ignore repositories that have invalid settings. + directory, remote_url = repo_paths_or_none(repo_item) + if directory is None: + continue + + result.append(RepoItem( + name=repo_item.name, + directory=directory, + remote_url=remote_url, + module=repo_item.module, + use_cache=repo_item.use_cache, + )) + return result + + +def _extension_repos_index_from_directory(directory): + directory = os.path.normpath(directory) + repos_all = extension_repos_read() + for i, repo_item in enumerate(repos_all): + if os.path.normpath(repo_item.directory) == directory: + return i + if os.path.exists(directory): + for i, repo_item in enumerate(repos_all): + if os.path.normpath(repo_item.directory) == directory: + return i + return -1 + + +def _extensions_repo_from_directory(directory): + repos_all = extension_repos_read() + repo_index = _extension_repos_index_from_directory(directory) + if repo_index == -1: + return None + return repos_all[repo_index] + + +def _extensions_repo_from_directory_and_report(directory, report_fn): + if not directory: + report_fn({'ERROR', "Directory not set"}) + return None + + repo_item = _extensions_repo_from_directory(directory) + if repo_item is None: + report_fn({'ERROR'}, "Directory has no repo entry: {:s}".format(directory)) + return None + return repo_item + + +def _pkg_marked_by_repo(pkg_manifest_all): + # NOTE: pkg_manifest_all can be from local or remote source. + wm = bpy.context.window_manager + search_lower = wm.extension_search.lower() + filter_by_type = blender_filter_by_type_map[wm.extension_type] + + repo_pkg_map = {} + for pkg_id, repo_index in blender_extension_mark: + # While this should be prevented, any marked packages out of the range will cause problems, skip them. + if repo_index >= len(pkg_manifest_all): + continue + + pkg_manifest = pkg_manifest_all[repo_index] + item = pkg_manifest.get(pkg_id) + if item is None: + continue + if filter_by_type and (filter_by_type != item["type"]): + continue + if search_lower and not pkg_info_check_exclude_filter(item, search_lower): + continue + + pkg_list = repo_pkg_map.get(repo_index) + if pkg_list is None: + pkg_list = repo_pkg_map[repo_index] = [] + pkg_list.append(pkg_id) + return repo_pkg_map + + +# ----------------------------------------------------------------------------- +# Wheel Handling +# + +def _extensions_wheel_filter_for_platform(wheels): + + # Copied from `wheel.bwheel_dist.get_platform(..)` which isn't part of Python. + # This misses some additional checks which aren't supported by official Blender builds, + # it's highly doubtful users ever run into this but we could add extend this if it's really needed. + # (e.g. `linux-i686` on 64 bit systems & `linux-armv7l`). + import sysconfig + platform_tag_current = sysconfig.get_platform().replace("-", "_") + + # https://packaging.python.org/en/latest/specifications/binary-distribution-format/#file-name-convention + # This also defines the name spec: + # `{distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl` + + wheels_compatible = [] + for wheel in wheels: + wheel_filename = wheel.rsplit("/", 1)[-1] + + # Handled by validation (paranoid). + if not wheel_filename.lower().endswith(".whl"): + print("Error: wheel doesn't end with \".whl\", skipping!") + continue + + wheel_filename_split = wheel_filename[:-4].split("-") + # Skipping, should never happen as validation will fail, + # keep paranoid check although this might be removed in the future. + if not (5 <= len(wheel_filename_split) <= 6): + print("Error: wheel doesn't follow naming spec \"{:s}\"".format(wheel_filename)) + continue + # TODO: Match Python & ABI tags. + _python_tag, _abi_tag, platform_tag = wheel_filename_split[-3:] + + if platform_tag in {"any", platform_tag_current}: + pass + elif platform_tag_current.startswith("macosx_") and ( + # FIXME: `macosx_11.00` should be `macosx_11_0`. + platform_tag.startswith("macosx_") and + # Ignore the MACOSX version, ensure `arm64` suffix. + platform_tag.endswith("_" + platform_tag_current.rpartition("_")[2]) + ): + pass + elif platform_tag_current.startswith("linux_") and ( + # May be `manylinux1` or `manylinux2010`. + platform_tag.startswith("manylinux") and + # Match against the architecture: `linux_x86_64` -> `_x86_64` (ensure the same suffix). + # The GLIBC version is ignored because it will often be older. + # Although we will probably want to detect incompatible GLIBC versions eventually. + platform_tag.endswith("_" + platform_tag_current.partition("_")[2]) + ): + pass + else: + # Useful to know, can quiet print in the future. + print( + "Skipping wheel for other system", + "({:s} != {:s}):".format(platform_tag, platform_tag_current), + wheel_filename, + ) + continue + + wheels_compatible.append(wheel) + return wheels_compatible + + +def _extensions_repo_sync_wheels(repo_cache_store): + """ + This function collects all wheels from all packages and ensures the packages are either extracted or removed + when they are no longer used. + """ + from .bl_extension_local import sync + + repos_all = extension_repos_read() + + wheel_list = [] + for repo_index, pkg_manifest_local in enumerate(repo_cache_store.pkg_manifest_from_local_ensure(error_fn=print)): + repo = repos_all[repo_index] + repo_module = repo.module + repo_directory = repo.directory + for pkg_id, item_local in pkg_manifest_local.items(): + pkg_dirpath = os.path.join(repo_directory, pkg_id) + wheels_rel = item_local.get("wheels", None) + if wheels_rel is None: + continue + if not isinstance(wheels_rel, list): + continue + + # Filter only the wheels for this platform. + wheels_rel = _extensions_wheel_filter_for_platform(wheels_rel) + if not wheels_rel: + continue + + wheels_abs = [] + for filepath_rel in wheels_rel: + filepath_abs = os.path.join(pkg_dirpath, filepath_rel) + if not os.path.exists(filepath_abs): + continue + wheels_abs.append(filepath_abs) + + if not wheels_abs: + continue + + unique_pkg_id = "{:s}.{:s}".format(repo_module, pkg_id) + wheel_list.append((unique_pkg_id, wheels_abs)) + + extensions = bpy.utils.user_resource('EXTENSIONS') + local_dir = os.path.join(extensions, ".local") + + sync( + local_dir=local_dir, + wheel_list=wheel_list, + ) + + +# ----------------------------------------------------------------------------- +# Theme Handling +# + +def _preferences_theme_state_create(): + from .bl_extension_utils import ( + file_mtime_or_none, + scandir_with_demoted_errors, + ) + filepath = bpy.context.preferences.themes[0].filepath + if not filepath: + return None, None + + if (result := file_mtime_or_none(filepath)) is not None: + return result, filepath + + # It's possible the XML was renamed after upgrading, detect another. + dirpath = os.path.dirname(filepath) + + # Not essential, just avoids a demoted error from `scandir` which seems like it may be a bug. + if not os.path.exists(dirpath): + return None, None + + filepath = "" + for entry in scandir_with_demoted_errors(dirpath): + if entry.is_dir(): + continue + # There must only ever be one. + if entry.name.lower().endswith(".xml"): + if (result := file_mtime_or_none(entry.path)) is not None: + return result, filepath + return None, None + + +def _preferences_theme_state_restore(state): + state_update = _preferences_theme_state_create() + # Unchanged, return. + if state == state_update: + return + + # Uninstall: + # The current theme was an extension that was uninstalled. + if state[0] is not None and state_update[0] is None: + bpy.ops.preferences.reset_default_theme() + return + + # Update: + if state_update[0] is not None: + extension_theme_enable_filepath(state_update[1]) + + +# ----------------------------------------------------------------------------- +# Internal Implementation +# + +def _is_modal(op): + if is_background: + return False + if not op.options.is_invoke: + return False + return True + + +class CommandHandle: + __slots__ = ( + "modal_timer", + "cmd_batch", + "wm", + "request_exit", + ) + + def __init__(self): + self.modal_timer = None + self.cmd_batch = None + self.wm = None + self.request_exit = None + + @staticmethod + def op_exec_from_iter(op, context, cmd_batch, is_modal): + if not is_modal: + with CheckSIGINT_Context() as sigint_ctx: + has_request_exit = cmd_batch.exec_blocking( + report_fn=_report, + request_exit_fn=lambda: sigint_ctx.has_interrupt, + concurrent=is_concurrent, + ) + if has_request_exit: + op.report({'WARNING'}, "Command interrupted") + return {'FINISHED'} + + return {'FINISHED'} + + handle = CommandHandle() + handle.cmd_batch = cmd_batch + handle.modal_timer = context.window_manager.event_timer_add(0.01, window=context.window) + handle.wm = context.window_manager + + handle.wm.modal_handler_add(op) + op._runtime_handle = handle + return {'RUNNING_MODAL'} + + def op_modal_step(self, op, context): + command_result = self.cmd_batch.exec_non_blocking( + request_exit=self.request_exit, + ) + + # Forward new messages to reports. + msg_list_per_command = self.cmd_batch.calc_status_log_since_last_request_or_none() + if msg_list_per_command is not None: + for i, msg_list in enumerate(msg_list_per_command, 1): + for (ty, msg) in msg_list: + if len(msg_list_per_command) > 1: + # These reports are flattened, note the process number that fails so + # whoever is reading the reports can make sense of the messages. + msg = "{:s} (process {:d} of {:d})".format(msg, i, len(msg_list_per_command)) + if ty == 'STATUS': + op.report({'INFO'}, msg) + else: + op.report({'WARNING'}, msg) + del msg_list_per_command + + # Avoid high CPU usage by only redrawing when there has been a change. + msg_list = self.cmd_batch.calc_status_log_or_none() + if msg_list is not None: + context.workspace.status_text_set( + " | ".join( + ["{:s}: {:s}".format(ty, str(msg)) for (ty, msg) in msg_list] + ) + ) + + # Setting every time is a bit odd. but OK. + repo_status_text.title = self.cmd_batch.title + repo_status_text.log = msg_list + repo_status_text.running = True + _preferences_ui_redraw() + + if command_result.all_complete: + self.wm.event_timer_remove(self.modal_timer) + del op._runtime_handle + context.workspace.status_text_set(None) + repo_status_text.running = False + return {'FINISHED'} + + return {'RUNNING_MODAL'} + + def op_modal_impl(self, op, context, event): + refresh = False + if event.type == 'TIMER': + refresh = True + elif event.type == 'ESC': + if not self.request_exit: + print("Request exit!") + self.request_exit = True + refresh = True + + if refresh: + return self.op_modal_step(op, context) + return {'RUNNING_MODAL'} + + +def _report(ty, msg): + if ty == 'DONE': + assert msg == "" + return + + if is_background: + print(ty, msg) + return + + +def _repo_dir_and_index_get(repo_index, directory, report_fn): + if repo_index != -1: + repo_item = extension_repos_read_index(repo_index) + directory = repo_item.directory if (repo_item is not None) else "" + if not directory: + report_fn({'ERROR'}, "Repository not set") + return directory + + +# ----------------------------------------------------------------------------- +# Public Repository Actions +# + +class _BlPkgCmdMixIn: + """ + Utility to execute mix-in. + + Sub-class must define. + - bl_idname + - bl_label + - exec_command_iter + - exec_command_finish + """ + cls_slots = ( + "_runtime_handle", + ) + + @classmethod + def __init_subclass__(cls) -> None: + for attr in ("exec_command_iter", "exec_command_finish"): + if getattr(cls, attr) is getattr(_BlPkgCmdMixIn, attr): + raise Exception("Subclass did not define 'exec_command_iter'!") + + def exec_command_iter(self, is_modal): + raise Exception("Subclass must define!") + + def exec_command_finish(self): + raise Exception("Subclass must define!") + + def error_fn_from_exception(self, ex): + # A bit silly setting every time, but it's needed to ensure there is a title. + repo_status_text.log.append(("ERROR", str(ex))) + + def execute(self, context): + is_modal = _is_modal(self) + cmd_batch = self.exec_command_iter(is_modal) + # It's possible the action could not be started. + # In this case `exec_command_iter` should report an error. + if cmd_batch is None: + return {'CANCELLED'} + + # Needed in cast there are no commands within `cmd_batch`, + # the title should still be set. + repo_status_text.title = cmd_batch.title + + result = CommandHandle.op_exec_from_iter(self, context, cmd_batch, is_modal) + if 'FINISHED' in result: + self.exec_command_finish() + return result + + def modal(self, context, event): + result = self._runtime_handle.op_modal_impl(self, context, event) + if 'FINISHED' in result: + self.exec_command_finish() + return result + + +class BlPkgDummyProgress(Operator, _BlPkgCmdMixIn): + bl_idname = "bl_pkg.dummy_progress" + bl_label = "Ext Demo" + __slots__ = _BlPkgCmdMixIn.cls_slots + + def exec_command_iter(self, is_modal): + return bl_extension_utils.CommandBatch( + title="Dummy Progress", + batch=[ + partial( + bl_extension_utils.dummy_progress, + use_idle=is_modal, + ), + ], + ) + + def exec_command_finish(self): + _preferences_ui_redraw() + + +class BlPkgRepoSync(Operator, _BlPkgCmdMixIn): + bl_idname = "bl_pkg.repo_sync" + bl_label = "Ext Repo Sync" + __slots__ = _BlPkgCmdMixIn.cls_slots + + repo_directory: rna_prop_directory + repo_index: rna_prop_repo_index + + def exec_command_iter(self, is_modal): + directory = _repo_dir_and_index_get(self.repo_index, self.repo_directory, self.report) + if not directory: + return None + + if (repo_item := _extensions_repo_from_directory_and_report(directory, self.report)) is None: + return None + + if not os.path.exists(directory): + try: + os.makedirs(directory) + except BaseException as ex: + self.report({'ERROR'}, str(ex)) + return {'CANCELLED'} + + # Needed to refresh. + self.repo_directory = directory + + # Lock repositories. + self.repo_lock = RepoLock(repo_directories=[directory], cookie=cookie_from_session()) + if lock_result_any_failed_with_report(self, self.repo_lock.acquire()): + return None + + cmd_batch = [] + if repo_item.remote_url: + cmd_batch.append( + partial( + bl_extension_utils.repo_sync, + directory=directory, + remote_url=repo_item.remote_url, + online_user_agent=online_user_agent_from_blender(), + use_idle=is_modal, + ) + ) + + return bl_extension_utils.CommandBatch( + title="Sync", + batch=cmd_batch, + ) + + def exec_command_finish(self): + from . import repo_cache_store + + repo_cache_store_refresh_from_prefs() + repo_cache_store.refresh_remote_from_directory( + directory=self.repo_directory, + error_fn=self.error_fn_from_exception, + force=True, + ) + + # Unlock repositories. + lock_result_any_failed_with_report(self, self.repo_lock.release(), report_type='WARNING') + del self.repo_lock + + _preferences_ui_redraw() + + +class BlPkgRepoSyncAll(Operator, _BlPkgCmdMixIn): + bl_idname = "bl_pkg.repo_sync_all" + bl_label = "Ext Repo Sync All" + __slots__ = _BlPkgCmdMixIn.cls_slots + + use_active_only: BoolProperty( + name="Active Only", + description="Only sync the active repository", + ) + + def exec_command_iter(self, is_modal): + use_active_only = self.use_active_only + repos_all = extension_repos_read(use_active_only=use_active_only) + + if not repos_all: + self.report({'INFO'}, "No repositories to sync") + return None + + for repo_item in repos_all: + if not os.path.exists(repo_item.directory): + try: + os.makedirs(repo_item.directory) + except BaseException as ex: + self.report({'WARNING'}, str(ex)) + return None + + cmd_batch = [] + for repo_item in repos_all: + # Local only repositories should still refresh, but not run the sync. + if repo_item.remote_url: + cmd_batch.append(partial( + bl_extension_utils.repo_sync, + directory=repo_item.directory, + remote_url=repo_item.remote_url, + online_user_agent=online_user_agent_from_blender(), + use_idle=is_modal, + )) + + repos_lock = [repo_item.directory for repo_item in repos_all] + + # Lock repositories. + self.repo_lock = RepoLock(repo_directories=repos_lock, cookie=cookie_from_session()) + if lock_result_any_failed_with_report(self, self.repo_lock.acquire()): + return None + + return bl_extension_utils.CommandBatch( + title="Sync \"{:s}\"".format(repos_all[0].name) if use_active_only else "Sync All", + batch=cmd_batch, + ) + + def exec_command_finish(self): + from . import repo_cache_store + + repo_cache_store_refresh_from_prefs() + + for repo_item in extension_repos_read(): + repo_cache_store.refresh_remote_from_directory( + directory=repo_item.directory, + error_fn=self.error_fn_from_exception, + force=True, + ) + + # Unlock repositories. + lock_result_any_failed_with_report(self, self.repo_lock.release(), report_type='WARNING') + del self.repo_lock + + _preferences_ui_redraw() + + +class BlPkgPkgUpgradeAll(Operator, _BlPkgCmdMixIn): + bl_idname = "bl_pkg.pkg_upgrade_all" + bl_label = "Ext Package Upgrade All" + __slots__ = _BlPkgCmdMixIn.cls_slots + ( + "_repo_directories", + ) + + use_active_only: BoolProperty( + name="Active Only", + description="Only sync the active repository", + ) + + def exec_command_iter(self, is_modal): + from . import repo_cache_store + self._repo_directories = set() + self._addon_restore = [] + self._theme_restore = _preferences_theme_state_create() + + use_active_only = self.use_active_only + repos_all = extension_repos_read(use_active_only=use_active_only) + repo_directory_supset = [repo_entry.directory for repo_entry in repos_all] if use_active_only else None + + if not repos_all: + self.report({'INFO'}, "No repositories to upgrade") + return None + + # NOTE: Unless we have a "clear-cache" operator - there isn't a great place to apply cache-clearing. + # So when cache is disabled simply clear all cache before performing an update. + # Further, individual install & remove operation will manage the cache + # for the individual packages being installed or removed. + for repo_item in repos_all: + if repo_item.use_cache: + continue + bl_extension_utils.pkg_repo_cache_clear(repo_item.directory) + + # Track add-ons to disable before uninstalling. + handle_addons_info = [] + + packages_to_upgrade = [[] for _ in range(len(repos_all))] + package_count = 0 + + pkg_manifest_local_all = list(repo_cache_store.pkg_manifest_from_local_ensure( + error_fn=self.error_fn_from_exception, + directory_subset=repo_directory_supset, + )) + for repo_index, pkg_manifest_remote in enumerate(repo_cache_store.pkg_manifest_from_remote_ensure( + error_fn=self.error_fn_from_exception, + directory_subset=repo_directory_supset, + )): + if pkg_manifest_remote is None: + continue + + pkg_manifest_local = pkg_manifest_local_all[repo_index] + if pkg_manifest_local is None: + continue + + for pkg_id, item_remote in pkg_manifest_remote.items(): + item_local = pkg_manifest_local.get(pkg_id) + if item_local is None: + # Not installed. + continue + + if item_remote["version"] != item_local["version"]: + packages_to_upgrade[repo_index].append(pkg_id) + package_count += 1 + + if packages_to_upgrade[repo_index]: + handle_addons_info.append((repos_all[repo_index], list(packages_to_upgrade[repo_index]))) + + cmd_batch = [] + for repo_index, pkg_id_sequence in enumerate(packages_to_upgrade): + if not pkg_id_sequence: + continue + repo_item = repos_all[repo_index] + cmd_batch.append(partial( + bl_extension_utils.pkg_install, + directory=repo_item.directory, + remote_url=repo_item.remote_url, + pkg_id_sequence=pkg_id_sequence, + online_user_agent=online_user_agent_from_blender(), + use_cache=repo_item.use_cache, + use_idle=is_modal, + )) + self._repo_directories.add(repo_item.directory) + + if not cmd_batch: + self.report({'INFO'}, "No installed packages to update") + return None + + # Lock repositories. + self.repo_lock = RepoLock(repo_directories=list(self._repo_directories), cookie=cookie_from_session()) + if lock_result_any_failed_with_report(self, self.repo_lock.acquire()): + return None + + for repo_item, pkg_id_sequence in handle_addons_info: + result, errors = _preferences_ensure_disabled( + repo_item=repo_item, + pkg_id_sequence=pkg_id_sequence, + default_set=False, + ) + self._addon_restore.append((repo_item, pkg_id_sequence, result)) + + return bl_extension_utils.CommandBatch( + title=( + "Update {:d} Package(s) from \"{:s}\"".format(package_count, repos_all[0].name) if use_active_only else + "Update {:d} Package(s)".format(package_count) + ), + batch=cmd_batch, + ) + + def exec_command_finish(self): + + # Unlock repositories. + lock_result_any_failed_with_report(self, self.repo_lock.release(), report_type='WARNING') + del self.repo_lock + + # Refresh installed packages for repositories that were operated on. + from . import repo_cache_store + for directory in self._repo_directories: + repo_cache_store.refresh_local_from_directory( + directory=directory, + error_fn=self.error_fn_from_exception, + ) + + # TODO: it would be nice to include this message in the banner. + def handle_error(ex): + self.report({'ERROR'}, str(ex)) + + _preferences_ensure_enabled_all( + addon_restore=self._addon_restore, + handle_error=handle_error, + ) + _preferences_theme_state_restore(self._theme_restore) + + _preferences_ui_redraw() + _preferences_ui_refresh_addons() + + +class BlPkgPkgInstallMarked(Operator, _BlPkgCmdMixIn): + bl_idname = "bl_pkg.pkg_install_marked" + bl_label = "Ext Package Install_marked" + __slots__ = _BlPkgCmdMixIn.cls_slots + ( + "_repo_directories", + "_repo_map_packages_addon_only", + ) + + enable_on_install: rna_prop_enable_on_install + + def exec_command_iter(self, is_modal): + from . import repo_cache_store + repos_all = extension_repos_read() + pkg_manifest_remote_all = list(repo_cache_store.pkg_manifest_from_remote_ensure( + error_fn=self.error_fn_from_exception, + )) + repo_pkg_map = _pkg_marked_by_repo(pkg_manifest_remote_all) + self._repo_directories = set() + self._repo_map_packages_addon_only = [] + package_count = 0 + + cmd_batch = [] + for repo_index, pkg_id_sequence in sorted(repo_pkg_map.items()): + repo_item = repos_all[repo_index] + # Filter out already installed. + pkg_manifest_local = repo_cache_store.refresh_local_from_directory( + directory=repo_item.directory, + error_fn=self.error_fn_from_exception, + ) + if pkg_manifest_local is None: + continue + pkg_id_sequence = [pkg_id for pkg_id in pkg_id_sequence if pkg_id not in pkg_manifest_local] + if not pkg_id_sequence: + continue + + cmd_batch.append(partial( + bl_extension_utils.pkg_install, + directory=repo_item.directory, + remote_url=repo_item.remote_url, + pkg_id_sequence=pkg_id_sequence, + online_user_agent=online_user_agent_from_blender(), + use_cache=repo_item.use_cache, + use_idle=is_modal, + )) + self._repo_directories.add(repo_item.directory) + package_count += len(pkg_id_sequence) + + # Filter out non add-on extensions. + pkg_manifest_remote = pkg_manifest_remote_all[repo_index] + + pkg_id_sequence_addon_only = [ + pkg_id for pkg_id in pkg_id_sequence if pkg_manifest_remote[pkg_id]["type"] == "add-on"] + if pkg_id_sequence_addon_only: + self._repo_map_packages_addon_only.append((repo_item.directory, pkg_id_sequence_addon_only)) + + if not cmd_batch: + self.report({'ERROR'}, "No un-installed packages marked") + return None + + # Lock repositories. + self.repo_lock = RepoLock(repo_directories=list(self._repo_directories), cookie=cookie_from_session()) + if lock_result_any_failed_with_report(self, self.repo_lock.acquire()): + return None + + return bl_extension_utils.CommandBatch( + title="Install {:d} Marked Package(s)".format(package_count), + batch=cmd_batch, + ) + + def exec_command_finish(self): + + # Unlock repositories. + lock_result_any_failed_with_report(self, self.repo_lock.release(), report_type='WARNING') + del self.repo_lock + + # Refresh installed packages for repositories that were operated on. + from . import repo_cache_store + for directory in self._repo_directories: + repo_cache_store.refresh_local_from_directory( + directory=directory, + error_fn=self.error_fn_from_exception, + ) + + _extensions_repo_sync_wheels(repo_cache_store) + + # TODO: it would be nice to include this message in the banner. + def handle_error(ex): + self.report({'ERROR'}, str(ex)) + + for directory, pkg_id_sequence in self._repo_map_packages_addon_only: + + pkg_manifest_local = repo_cache_store.refresh_local_from_directory( + directory=directory, + error_fn=self.error_fn_from_exception, + ) + + if self.enable_on_install: + _preferences_install_post_enable_on_install( + directory=directory, + pkg_manifest_local=pkg_manifest_local, + pkg_id_sequence=pkg_id_sequence, + # Installed packages are always excluded. + pkg_id_sequence_upgrade=[], + handle_error=handle_error, + ) + + _preferences_ui_redraw() + _preferences_ui_refresh_addons() + + +class BlPkgPkgUninstallMarked(Operator, _BlPkgCmdMixIn): + bl_idname = "bl_pkg.pkg_uninstall_marked" + bl_label = "Ext Package Uninstall_marked" + __slots__ = _BlPkgCmdMixIn.cls_slots + ( + "_repo_directories", + ) + + def exec_command_iter(self, is_modal): + from . import repo_cache_store + # TODO: check if the packages are already installed (notify the user). + # Perhaps re-install? + repos_all = extension_repos_read() + pkg_manifest_local_all = list(repo_cache_store.pkg_manifest_from_local_ensure( + error_fn=self.error_fn_from_exception, + )) + repo_pkg_map = _pkg_marked_by_repo(pkg_manifest_local_all) + package_count = 0 + + self._repo_directories = set() + self._theme_restore = _preferences_theme_state_create() + + # Track add-ons to disable before uninstalling. + handle_addons_info = [] + + cmd_batch = [] + for repo_index, pkg_id_sequence in sorted(repo_pkg_map.items()): + repo_item = repos_all[repo_index] + + # Filter out not installed. + pkg_manifest_local = repo_cache_store.refresh_local_from_directory( + directory=repo_item.directory, + error_fn=self.error_fn_from_exception, + ) + if pkg_manifest_local is None: + continue + pkg_id_sequence = [pkg_id for pkg_id in pkg_id_sequence if pkg_id in pkg_manifest_local] + if not pkg_id_sequence: + continue + + cmd_batch.append( + partial( + bl_extension_utils.pkg_uninstall, + directory=repo_item.directory, + pkg_id_sequence=pkg_id_sequence, + use_idle=is_modal, + )) + self._repo_directories.add(repo_item.directory) + package_count += len(pkg_id_sequence) + + handle_addons_info.append((repo_item, pkg_id_sequence)) + + if not cmd_batch: + self.report({'ERROR'}, "No installed packages marked") + return None + + # Lock repositories. + self.repo_lock = RepoLock(repo_directories=list(self._repo_directories), cookie=cookie_from_session()) + if lock_result_any_failed_with_report(self, self.repo_lock.acquire()): + return None + + for repo_item, pkg_id_sequence in handle_addons_info: + # No need to store the result (`_`) because the add-ons aren't going to be enabled again. + _, errors = _preferences_ensure_disabled( + repo_item=repo_item, + pkg_id_sequence=pkg_id_sequence, + default_set=True, + ) + + return bl_extension_utils.CommandBatch( + title="Uninstall {:d} Marked Package(s)".format(package_count), + batch=cmd_batch, + ) + + def exec_command_finish(self): + + # Unlock repositories. + lock_result_any_failed_with_report(self, self.repo_lock.release(), report_type='WARNING') + del self.repo_lock + + # Refresh installed packages for repositories that were operated on. + from . import repo_cache_store + for directory in self._repo_directories: + repo_cache_store.refresh_local_from_directory( + directory=directory, + error_fn=self.error_fn_from_exception, + ) + + _extensions_repo_sync_wheels(repo_cache_store) + + _preferences_theme_state_restore(self._theme_restore) + + _preferences_ui_redraw() + _preferences_ui_refresh_addons() + + +class BlPkgPkgInstallFiles(Operator, _BlPkgCmdMixIn): + """Install an extension from a file into a locally managed repository""" + bl_idname = "bl_pkg.pkg_install_files" + bl_label = "Install from Disk" + __slots__ = _BlPkgCmdMixIn.cls_slots + ( + "repo_directory", + "pkg_id_sequence" + ) + _drop_variables = None + + filter_glob: StringProperty(default="*.zip", options={'HIDDEN'}) + + directory: StringProperty( + name="Directory", + subtype='DIR_PATH', + default="", + ) + files: CollectionProperty( + type=bpy.types.OperatorFileListElement, + options={'HIDDEN', 'SKIP_SAVE'} + ) + + # Use for for scripts. + filepath: StringProperty( + subtype='FILE_PATH', + ) + + repo: EnumProperty( + name="Local Repository", + items=rna_prop_repo_enum_local_only_itemf, + description="The local repository to install extensions into", + ) + + enable_on_install: rna_prop_enable_on_install + + # Only used for code-path for dropping an extension. + url: rna_prop_url + + def exec_command_iter(self, is_modal): + from .bl_extension_utils import ( + pkg_manifest_dict_from_file_or_error, + ) + + self._addon_restore = [] + self._theme_restore = _preferences_theme_state_create() + + # Happens when run from scripts and this argument isn't passed in. + if not self.properties.is_property_set("repo"): + self.report({'ERROR'}, "Repository not set") + return None + + # Repository accessed. + repo_module_name = self.repo + repo_item = next( + (repo_item for repo_item in extension_repos_read() if repo_item.module == repo_module_name), + None, + ) + # This should really never happen as poll means this shouldn't be possible. + assert repo_item is not None + del repo_module_name + # Done with the repository. + + source_files = [os.path.join(file.name) for file in self.files] + source_directory = self.directory + # Support a single `filepath`, more convenient when calling from scripts. + if not (source_directory and source_files): + source_directory, source_file = os.path.split(self.filepath) + if not (source_directory and source_file): + # Be specific with this error as a vague message is confusing when files + # are passed via the command line. + if source_directory or source_file: + if source_file: + self.report({'ERROR'}, "Unable to install from relative path") + else: + self.report({'ERROR'}, "Unable to install a directory") + else: + self.report({'ERROR'}, "Unable to install from disk, no paths were defined") + return None + source_files = [source_file] + del source_file + assert len(source_files) > 0 + + # Make absolute paths. + source_files = [os.path.join(source_directory, filename) for filename in source_files] + + # Extract meta-data from package files. + # Note that errors are ignored here, let the underlying install operation do this. + pkg_id_sequence = [] + for source_filepath in source_files: + result = pkg_manifest_dict_from_file_or_error(source_filepath) + if isinstance(result, str): + continue + pkg_id = result["id"] + if pkg_id in pkg_id_sequence: + continue + pkg_id_sequence.append(pkg_id) + + directory = repo_item.directory + assert directory != "" + + # Collect package ID's. + self.repo_directory = directory + self.pkg_id_sequence = pkg_id_sequence + + # Detect upgrade. + if pkg_id_sequence: + from . import repo_cache_store + pkg_manifest_local = repo_cache_store.refresh_local_from_directory( + directory=self.repo_directory, + error_fn=self.error_fn_from_exception, + ) + if pkg_manifest_local is not None: + pkg_id_sequence_upgrade = [pkg_id for pkg_id in pkg_id_sequence if pkg_id in pkg_manifest_local] + if pkg_id_sequence_upgrade: + result, errors = _preferences_ensure_disabled( + repo_item=repo_item, + pkg_id_sequence=pkg_id_sequence_upgrade, + default_set=False, + ) + self._addon_restore.append((repo_item, pkg_id_sequence_upgrade, result)) + del repo_cache_store, pkg_manifest_local + + # Lock repositories. + self.repo_lock = RepoLock(repo_directories=[repo_item.directory], cookie=cookie_from_session()) + if lock_result_any_failed_with_report(self, self.repo_lock.acquire()): + return None + + return bl_extension_utils.CommandBatch( + title="Install Package Files", + batch=[ + partial( + bl_extension_utils.pkg_install_files, + directory=directory, + files=source_files, + use_idle=is_modal, + ) + ], + ) + + def exec_command_finish(self): + + # Refresh installed packages for repositories that were operated on. + from . import repo_cache_store + + # Re-generate JSON meta-data from TOML files (needed for offline repository). + repo_cache_store.refresh_remote_from_directory( + directory=self.repo_directory, + error_fn=self.error_fn_from_exception, + force=True, + ) + + # Unlock repositories. + lock_result_any_failed_with_report(self, self.repo_lock.release(), report_type='WARNING') + del self.repo_lock + + pkg_manifest_local = repo_cache_store.refresh_local_from_directory( + directory=self.repo_directory, + error_fn=self.error_fn_from_exception, + ) + + _extensions_repo_sync_wheels(repo_cache_store) + + # TODO: it would be nice to include this message in the banner. + + def handle_error(ex): + self.report({'ERROR'}, str(ex)) + + _preferences_ensure_enabled_all( + addon_restore=self._addon_restore, + handle_error=handle_error, + ) + _preferences_theme_state_restore(self._theme_restore) + + if self._addon_restore: + pkg_id_sequence_upgrade = self._addon_restore[0][1] + else: + pkg_id_sequence_upgrade = [] + + if self.enable_on_install: + _preferences_install_post_enable_on_install( + directory=self.repo_directory, + pkg_manifest_local=pkg_manifest_local, + pkg_id_sequence=self.pkg_id_sequence, + pkg_id_sequence_upgrade=pkg_id_sequence_upgrade, + handle_error=handle_error, + ) + + _preferences_ui_redraw() + _preferences_ui_refresh_addons() + + @classmethod + def poll(cls, context): + if next(repo_iter_valid_local_only(context), None) is None: + cls.poll_message_set("There must be at least one \"Local\" repository set to install extensions into") + return False + return True + + def invoke(self, context, event): + if self.properties.is_property_set("url"): + return self._invoke_for_drop(context, event) + + # Ensure the value is marked as set (else an error is reported). + self.repo = self.repo + + context.window_manager.fileselect_add(self) + return {'RUNNING_MODAL'} + + def draw(self, context): + if self._drop_variables is not None: + return self._draw_for_drop(context) + + # Override draw because the repository names may be over-long and not fit well in the UI. + # Show the text & repository names in two separate rows. + layout = self.layout + col = layout.column() + col.label(text="Local Repository:") + col.prop(self, "repo", text="") + + layout.prop(self, "enable_on_install") + + def _invoke_for_drop(self, context, event): + self._drop_variables = True + # Drop logic. + url = self.url + print("DROP FILE:", url) + + from .bl_extension_ops import repo_iter_valid_local_only + from .bl_extension_utils import pkg_manifest_dict_from_file_or_error + + if not list(repo_iter_valid_local_only(bpy.context)): + self.report({'ERROR'}, "No Local Repositories") + return {'CANCELLED'} + + if isinstance(result := pkg_manifest_dict_from_file_or_error(url), str): + self.report({'ERROR'}, "Error in manifest {:s}".format(result)) + return {'CANCELLED'} + + pkg_id = result["id"] + pkg_type = result["type"] + del result + + self._drop_variables = pkg_id, pkg_type + + # Set to it's self to the property is considered "set". + self.repo = self.repo + self.filepath = url + + wm = context.window_manager + wm.invoke_props_dialog(self) + + return {'RUNNING_MODAL'} + + def _draw_for_drop(self, context): + + layout = self.layout + layout.operator_context = 'EXEC_DEFAULT' + + pkg_id, pkg_type = self._drop_variables + + layout.label(text="Local Repository") + layout.prop(self, "repo", text="") + + layout.prop(self, "enable_on_install", text=rna_prop_enable_on_install_type_map[pkg_type]) + + +class BlPkgPkgInstall(Operator, _BlPkgCmdMixIn): + bl_idname = "bl_pkg.pkg_install" + bl_label = "Install Extension" + __slots__ = _BlPkgCmdMixIn.cls_slots + + _drop_variables = None + + repo_directory: rna_prop_directory + repo_index: rna_prop_repo_index + + pkg_id: rna_prop_pkg_id + + enable_on_install: rna_prop_enable_on_install + + # Only used for code-path for dropping an extension. + url: rna_prop_url + + def exec_command_iter(self, is_modal): + self._addon_restore = [] + self._theme_restore = _preferences_theme_state_create() + + directory = _repo_dir_and_index_get(self.repo_index, self.repo_directory, self.report) + if not directory: + return None + self.repo_directory = directory + + if (repo_item := _extensions_repo_from_directory_and_report(directory, self.report)) is None: + return None + + if not (pkg_id := self.pkg_id): + self.report({'ERROR'}, "Package ID not set") + return None + + # Detect upgrade. + from . import repo_cache_store + pkg_manifest_local = repo_cache_store.refresh_local_from_directory( + directory=self.repo_directory, + error_fn=self.error_fn_from_exception, + ) + is_installed = pkg_manifest_local is not None and (pkg_id in pkg_manifest_local) + del repo_cache_store, pkg_manifest_local + + if is_installed: + pkg_id_sequence = (pkg_id,) + result, errors = _preferences_ensure_disabled( + repo_item=repo_item, + pkg_id_sequence=pkg_id_sequence, + default_set=False, + ) + self._addon_restore.append((repo_item, pkg_id_sequence, result)) + del pkg_id_sequence + + # Lock repositories. + self.repo_lock = RepoLock(repo_directories=[repo_item.directory], cookie=cookie_from_session()) + if lock_result_any_failed_with_report(self, self.repo_lock.acquire()): + return None + + return bl_extension_utils.CommandBatch( + title="Install Package", + batch=[ + partial( + bl_extension_utils.pkg_install, + directory=directory, + remote_url=repo_item.remote_url, + pkg_id_sequence=(pkg_id,), + online_user_agent=online_user_agent_from_blender(), + use_cache=repo_item.use_cache, + use_idle=is_modal, + ) + ], + ) + + def exec_command_finish(self): + + # Unlock repositories. + lock_result_any_failed_with_report(self, self.repo_lock.release(), report_type='WARNING') + del self.repo_lock + + # Refresh installed packages for repositories that were operated on. + from . import repo_cache_store + pkg_manifest_local = repo_cache_store.refresh_local_from_directory( + directory=self.repo_directory, + error_fn=self.error_fn_from_exception, + ) + + _extensions_repo_sync_wheels(repo_cache_store) + + # TODO: it would be nice to include this message in the banner. + def handle_error(ex): + self.report({'ERROR'}, str(ex)) + + _preferences_ensure_enabled_all( + addon_restore=self._addon_restore, + handle_error=handle_error, + ) + _preferences_theme_state_restore(self._theme_restore) + + if self._addon_restore: + pkg_id_sequence_upgrade = self._addon_restore[0][1] + else: + pkg_id_sequence_upgrade = [] + + if self.enable_on_install: + _preferences_install_post_enable_on_install( + directory=self.repo_directory, + pkg_manifest_local=pkg_manifest_local, + pkg_id_sequence=(self.pkg_id,), + pkg_id_sequence_upgrade=pkg_id_sequence_upgrade, + handle_error=handle_error, + ) + + _preferences_ui_redraw() + _preferences_ui_refresh_addons() + + def invoke(self, context, event): + # Only for drop logic! + if self.properties.is_property_set("url"): + return self._invoke_for_drop(context, event) + + return self.execute(context) + + def _invoke_for_drop(self, context, event): + url = self.url + print("DROP URL:", url) + + _preferences_ensure_sync() + + repo_index, repo_name, pkg_id, item_remote, item_local = extension_url_find_repo_index_and_pkg_id(url) + + if repo_index == -1: + self.report({'ERROR'}, "Extension: URL not found in remote repositories!\n{:s}".format(url)) + return {'CANCELLED'} + + if item_local is not None: + self.report({'ERROR'}, "Extension: \"{:s}\" Already installed!".format(pkg_id)) + return {'CANCELLED'} + + self._drop_variables = repo_index, repo_name, pkg_id, item_remote + + self.repo_index = repo_index + self.pkg_id = pkg_id + + wm = context.window_manager + wm.invoke_props_dialog(self) + return {'RUNNING_MODAL'} + + def draw(self, context): + if self._drop_variables is not None: + return self._draw_for_drop(context) + + def _draw_for_drop(self, context): + from .bl_extension_ui import ( + size_as_fmt_string, + ) + layout = self.layout + + repo_index, repo_name, pkg_id, item_remote = self._drop_variables + + layout.label(text="Do you want to install the following {:s}?".format(item_remote["type"])) + + col = layout.column(align=True) + col.label(text="Name: {:s}".format(item_remote["name"])) + col.label(text="Repository: {:s}".format(repo_name)) + col.label(text="Size: {:s}".format(size_as_fmt_string(item_remote["archive_size"], precision=0))) + del col + + layout.separator() + + layout.prop(self, "enable_on_install", text=rna_prop_enable_on_install_type_map[item_remote["type"]]) + + +class BlPkgPkgUninstall(Operator, _BlPkgCmdMixIn): + bl_idname = "bl_pkg.pkg_uninstall" + bl_label = "Ext Package Uninstall" + __slots__ = _BlPkgCmdMixIn.cls_slots + + repo_directory: rna_prop_directory + repo_index: rna_prop_repo_index + + pkg_id: rna_prop_pkg_id + + def exec_command_iter(self, is_modal): + + self._theme_restore = _preferences_theme_state_create() + + directory = _repo_dir_and_index_get(self.repo_index, self.repo_directory, self.report) + if not directory: + return None + self.repo_directory = directory + + if (repo_item := _extensions_repo_from_directory_and_report(directory, self.report)) is None: + return None + + if not (pkg_id := self.pkg_id): + self.report({'ERROR'}, "Package ID not set") + return None + + _, errors = _preferences_ensure_disabled( + repo_item=repo_item, + pkg_id_sequence=(pkg_id,), + default_set=True, + ) + + # Lock repositories. + self.repo_lock = RepoLock(repo_directories=[repo_item.directory], cookie=cookie_from_session()) + if lock_result_any_failed_with_report(self, self.repo_lock.acquire()): + return None + + return bl_extension_utils.CommandBatch( + title="Uninstall Package", + batch=[ + partial( + bl_extension_utils.pkg_uninstall, + directory=directory, + pkg_id_sequence=(pkg_id, ), + use_idle=is_modal, + ), + ], + ) + + def exec_command_finish(self): + + # Refresh installed packages for repositories that were operated on. + from . import repo_cache_store + + repo_item = _extensions_repo_from_directory(self.repo_directory) + if repo_item.remote_url == "": + # Re-generate JSON meta-data from TOML files (needed for offline repository). + # NOTE: This could be slow with many local extensions, + # we could simply remove the package that was uninstalled. + repo_cache_store.refresh_remote_from_directory( + directory=self.repo_directory, + error_fn=self.error_fn_from_exception, + force=True, + ) + del repo_item + + # Unlock repositories. + lock_result_any_failed_with_report(self, self.repo_lock.release(), report_type='WARNING') + del self.repo_lock + + repo_cache_store.refresh_local_from_directory( + directory=self.repo_directory, + error_fn=self.error_fn_from_exception, + ) + + _extensions_repo_sync_wheels(repo_cache_store) + + _preferences_theme_state_restore(self._theme_restore) + + _preferences_ui_redraw() + _preferences_ui_refresh_addons() + + +class BlPkgPkgDisable_TODO(Operator): + """Turn off this extension""" + bl_idname = "bl_pkg.extension_disable" + bl_label = "Disable extension" + + def execute(self, _context): + self.report({'WARNING'}, "Disabling themes is not yet supported") + return {'CANCELLED'} + + +class BlPkgPkgThemeEnable(Operator): + """Turn off this theme""" + bl_idname = "bl_pkg.extension_theme_enable" + bl_label = "Enable theme extension" + + pkg_id: rna_prop_pkg_id + repo_index: rna_prop_repo_index + + def execute(self, context): + self.repo_index + repo_item = extension_repos_read_index(self.repo_index) + extension_theme_enable(repo_item.directory, self.pkg_id) + print(repo_item.directory, self.pkg_id) + return {'FINISHED'} + + +class BlPkgPkgThemeDisable(Operator): + """Turn off this theme""" + bl_idname = "bl_pkg.extension_theme_disable" + bl_label = "Disable theme extension" + + pkg_id: rna_prop_pkg_id + repo_index: rna_prop_repo_index + + def execute(self, context): + import os + repo_item = extension_repos_read_index(self.repo_index) + dirpath = os.path.join(repo_item.directory, self.pkg_id) + if os.path.samefile(dirpath, os.path.dirname(context.preferences.themes[0].filepath)): + bpy.ops.preferences.reset_default_theme() + return {'FINISHED'} + + +# ----------------------------------------------------------------------------- +# Non Wrapped Actions +# +# These actions don't wrap command line access. +# +# NOTE: create/destroy might not be best names. + + +class BlPkgDisplayErrorsClear(Operator): + bl_idname = "bl_pkg.pkg_display_errors_clear" + bl_label = "Clear Status" + + def execute(self, _context): + from .bl_extension_ui import display_errors + display_errors.clear() + _preferences_ui_redraw() + return {'FINISHED'} + + +class BlPkgStatusClear(Operator): + bl_idname = "bl_pkg.pkg_status_clear" + bl_label = "Clear Status" + + def execute(self, _context): + repo_status_text.running = False + repo_status_text.log.clear() + _preferences_ui_redraw() + return {'FINISHED'} + + +class BlPkgPkgMarkSet(Operator): + bl_idname = "bl_pkg.pkg_mark_set" + bl_label = "Mark Package" + + pkg_id: rna_prop_pkg_id + repo_index: rna_prop_repo_index + + def execute(self, _context): + key = (self.pkg_id, self.repo_index) + blender_extension_mark.add(key) + _preferences_ui_redraw() + return {'FINISHED'} + + +class BlPkgPkgMarkClear(Operator): + bl_idname = "bl_pkg.pkg_mark_clear" + bl_label = "Mark Package" + + pkg_id: rna_prop_pkg_id + repo_index: rna_prop_repo_index + + def execute(self, _context): + key = (self.pkg_id, self.repo_index) + blender_extension_mark.discard(key) + _preferences_ui_redraw() + return {'FINISHED'} + + +class BlPkgPkgShowSet(Operator): + bl_idname = "bl_pkg.pkg_show_set" + bl_label = "Show Package Set" + + pkg_id: rna_prop_pkg_id + repo_index: rna_prop_repo_index + + def execute(self, _context): + key = (self.pkg_id, self.repo_index) + blender_extension_show.add(key) + _preferences_ui_redraw() + return {'FINISHED'} + + +class BlPkgPkgShowClear(Operator): + bl_idname = "bl_pkg.pkg_show_clear" + bl_label = "Show Package Clear" + + pkg_id: rna_prop_pkg_id + repo_index: rna_prop_repo_index + + def execute(self, _context): + key = (self.pkg_id, self.repo_index) + blender_extension_show.discard(key) + _preferences_ui_redraw() + return {'FINISHED'} + + +class BlPkgPkgShowSettings(Operator): + bl_idname = "bl_pkg.pkg_show_settings" + bl_label = "Show Settings" + + pkg_id: rna_prop_pkg_id + repo_index: rna_prop_repo_index + + def execute(self, _context): + repo_item = extension_repos_read_index(self.repo_index) + bpy.ops.preferences.addon_show(module="bl_ext.{:s}.{:s}".format(repo_item.module, self.pkg_id)) + return {'FINISHED'} + + +# ----------------------------------------------------------------------------- +# Testing Operators +# + + +class BlPkgObsoleteMarked(Operator): + """Zeroes package versions, useful for development - to test upgrading""" + bl_idname = "bl_pkg.obsolete_marked" + bl_label = "Obsolete Marked" + + def execute(self, _context): + from . import ( + repo_cache_store, + ) + + repos_all = extension_repos_read() + pkg_manifest_local_all = list(repo_cache_store.pkg_manifest_from_local_ensure(error_fn=print)) + repo_pkg_map = _pkg_marked_by_repo(pkg_manifest_local_all) + found = False + + repos_lock = [repos_all[repo_index].directory for repo_index in sorted(repo_pkg_map.keys())] + + with RepoLockContext(repo_directories=repos_lock, cookie=cookie_from_session()) as lock_result: + if lock_result_any_failed_with_report(self, lock_result): + return {'CANCELLED'} + + directories_update = set() + + for repo_index, pkg_id_sequence in sorted(repo_pkg_map.items()): + repo_item = repos_all[repo_index] + pkg_manifest_local = repo_cache_store.refresh_local_from_directory( + repo_item.directory, + error_fn=print, + ) + found_for_repo = False + for pkg_id in pkg_id_sequence: + is_installed = pkg_id in pkg_manifest_local + if not is_installed: + continue + + bl_extension_utils.pkg_make_obsolete_for_testing(repo_item.directory, pkg_id) + found = True + found_for_repo = True + + if found_for_repo: + directories_update.add(repo_item.directory) + + if not found: + self.report({'ERROR'}, "No installed packages marked") + return {'CANCELLED'} + + for directory in directories_update: + repo_cache_store.refresh_remote_from_directory( + directory=directory, + error_fn=print, + force=True, + ) + repo_cache_store.refresh_local_from_directory( + directory=directory, + error_fn=print, + ) + _preferences_ui_redraw() + + return {'FINISHED'} + + +class BlPkgRepoLock(Operator): + """Lock repositories - to test locking""" + bl_idname = "bl_pkg.repo_lock" + bl_label = "Lock Repository (Testing)" + + lock = None + + def execute(self, _context): + repos_all = extension_repos_read() + repos_lock = [repo_item.directory for repo_item in repos_all] + + lock_handle = RepoLock(repo_directories=repos_lock, cookie=cookie_from_session()) + lock_result = lock_handle.acquire() + if lock_result_any_failed_with_report(self, lock_result): + # At least one lock failed, unlock all and return. + lock_handle.release() + return {'CANCELLED'} + + self.report({'INFO'}, "Locked {:d} repos(s)".format(len(lock_result))) + BlPkgRepoLock.lock = lock_handle + return {'FINISHED'} + + +class BlPkgRepoUnlock(Operator): + """Unlock repositories - to test unlocking""" + bl_idname = "bl_pkg.repo_unlock" + bl_label = "Unlock Repository (Testing)" + + def execute(self, _context): + lock_handle = BlPkgRepoLock.lock + if lock_handle is None: + self.report({'ERROR'}, "Lock not held!") + return {'CANCELLED'} + + lock_result = lock_handle.release() + + BlPkgRepoLock.lock = None + + if lock_result_any_failed_with_report(self, lock_result): + # This isn't canceled, but there were issues unlocking. + return {'FINISHED'} + + self.report({'INFO'}, "Unlocked {:d} repos(s)".format(len(lock_result))) + return {'FINISHED'} + + +# NOTE: this is a modified version of `PREFERENCES_OT_addon_show`. +# It would make most sense to extend this operator to support showing extensions to upgrade (eventually). +class BlPkgShowUpgrade(Operator): + """Show add-on preferences""" + bl_idname = "bl_pkg.extensions_show_for_update" + bl_label = "" + bl_options = {'INTERNAL'} + + def execute(self, context): + wm = context.window_manager + prefs = context.preferences + + prefs.active_section = 'ADDONS' + prefs.view.show_addons_enabled_only = False + + # Show only extensions that will be updated. + wm.extension_installed_only = False + wm.extension_updates_only = True + + bpy.ops.screen.userpref_show('INVOKE_DEFAULT') + + return {'FINISHED'} + + +class BlPkgOnlineAccess(Operator): + """Handle online access""" + bl_idname = "bl_pkg.extension_online_access" + bl_label = "" + bl_options = {'INTERNAL'} + + enable: BoolProperty( + name="Enable", + default=False, + ) + + def execute(self, context): + prefs = context.preferences + + remote_url = "https://extensions.blender.org/api/v1/extensions" + + if self.enable: + extension_repos = prefs.extensions.repos + repo_found = None + for repo in extension_repos: + if repo.remote_url == remote_url: + repo_found = repo + break + if repo_found: + repo_found.enabled = True + else: + # While not expected, we want to know if this ever occurs, don't fail silently. + self.report({'WARNING'}, "Repository \"{:s}\" not found!".format(remote_url)) + + prefs.extensions.use_online_access_handled = True + + return {'FINISHED'} + + +class BlPkgEnableNotInstalled(Operator): + """Turn on this extension""" + bl_idname = "bl_pkg.extensions_enable_not_installed" + bl_label = "Enable Extension" + + @classmethod + def poll(cls, context): + cls.poll_message_set("Extension needs to be installed before it can be enabled") + return False + + def execute(self, context): + # This operator only exists to be able to show disabled check-boxes for extensions + # while giving users a reasonable explanation on why is that. + return {'CANCELLED'} + + +# ----------------------------------------------------------------------------- +# Register +# +classes = ( + BlPkgRepoSync, + BlPkgRepoSyncAll, + + BlPkgPkgInstallFiles, + BlPkgPkgInstall, + BlPkgPkgUninstall, + BlPkgPkgDisable_TODO, + + BlPkgPkgThemeEnable, + BlPkgPkgThemeDisable, + + BlPkgPkgUpgradeAll, + BlPkgPkgInstallMarked, + BlPkgPkgUninstallMarked, + + # UI only operator (to select a package). + BlPkgDisplayErrorsClear, + BlPkgStatusClear, + BlPkgPkgShowSet, + BlPkgPkgShowClear, + BlPkgPkgMarkSet, + BlPkgPkgMarkClear, + BlPkgPkgShowSettings, + + BlPkgObsoleteMarked, + BlPkgRepoLock, + BlPkgRepoUnlock, + + BlPkgShowUpgrade, + BlPkgOnlineAccess, + + # Dummy, just shows a message. + BlPkgEnableNotInstalled, + + # Dummy commands (for testing). + BlPkgDummyProgress, +) + + +def register(): + for cls in classes: + bpy.utils.register_class(cls) + + +def unregister(): + for cls in classes: + bpy.utils.unregister_class(cls) + + +if __name__ == "__main__": + register() diff --git a/scripts/addons_core/bl_pkg/bl_extension_ui.py b/scripts/addons_core/bl_pkg/bl_extension_ui.py new file mode 100644 index 00000000000..9b8478066ee --- /dev/null +++ b/scripts/addons_core/bl_pkg/bl_extension_ui.py @@ -0,0 +1,807 @@ +# SPDX-FileCopyrightText: 2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +GUI (WARNING) this is a hack! +Written to allow a UI without modifying Blender. +""" + +__all__ = ( + "display_errors", + "register", + "unregister", +) + +import bpy + +from bpy.types import ( + Menu, + Panel, +) + +from bl_ui.space_userpref import ( + USERPREF_PT_addons, +) + +from . import repo_status_text + + +# ----------------------------------------------------------------------------- +# Generic Utilities + + +def size_as_fmt_string(num: float, *, precision: int = 1) -> str: + for unit in ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB"): + if abs(num) < 1024.0: + return "{:3.{:d}f}{:s}".format(num, precision, unit) + num /= 1024.0 + unit = "yb" + return "{:.{:d}f}{:s}".format(num, precision, unit) + + +def sizes_as_percentage_string(size_partial: int, size_final: int) -> str: + if size_final == 0: + percent = 0.0 + else: + size_partial = min(size_partial, size_final) + percent = size_partial / size_final + + return "{:-6.2f}%".format(percent * 100) + + +def license_info_to_text(license_list): + # See: https://spdx.org/licenses/ + # - Note that we could include all, for now only common, GPL compatible licenses. + # - Note that many of the human descriptions are not especially more humanly readable + # than the short versions, so it's questionable if we should attempt to add all of these. + _spdx_id_to_text = { + "GPL-2.0-only": "GNU General Public License v2.0 only", + "GPL-2.0-or-later": "GNU General Public License v2.0 or later", + "GPL-3.0-only": "GNU General Public License v3.0 only", + "GPL-3.0-or-later": "GNU General Public License v3.0 or later", + } + result = [] + for item in license_list: + if item.startswith("SPDX:"): + item = item[5:] + item = _spdx_id_to_text.get(item, item) + result.append(item) + return ", ".join(result) + + +def pkg_repo_and_id_from_theme_path(repos_all, filepath): + import os + if not filepath: + return None + + # Strip the `theme.xml` filename. + dirpath = os.path.dirname(filepath) + repo_directory, pkg_id = os.path.split(dirpath) + for repo_index, repo in enumerate(repos_all): + if not os.path.samefile(repo_directory, repo.directory): + continue + return repo_index, pkg_id + return None + + +# ----------------------------------------------------------------------------- +# Extensions UI (Legacy) + +def extensions_panel_draw_legacy_addons( + layout, + context, + *, + search_lower, + enabled_only, + installed_only, + used_addon_module_name_map, +): + # NOTE: this duplicates logic from `USERPREF_PT_addons` eventually this logic should be used instead. + # Don't de-duplicate the logic as this is a temporary state - as long as extensions remains experimental. + import addon_utils + from bpy.app.translations import ( + pgettext_iface as iface_, + ) + from .bl_extension_ops import ( + pkg_info_check_exclude_filter_ex, + ) + + addons = [ + (mod, addon_utils.module_bl_info(mod)) + for mod in addon_utils.modules(refresh=False) + ] + + # Initialized on demand. + user_addon_paths = [] + + for mod, bl_info in addons: + module_name = mod.__name__ + is_extension = addon_utils.check_extension(module_name) + if is_extension: + continue + + if search_lower and ( + not pkg_info_check_exclude_filter_ex( + bl_info["name"], + bl_info["description"], + search_lower, + ) + ): + continue + + is_enabled = module_name in used_addon_module_name_map + if enabled_only and (not is_enabled): + continue + + col_box = layout.column() + box = col_box.box() + colsub = box.column() + row = colsub.row(align=True) + + row.operator( + "preferences.addon_expand", + icon='DISCLOSURE_TRI_DOWN' if bl_info["show_expanded"] else 'DISCLOSURE_TRI_RIGHT', + emboss=False, + ).module = module_name + + row.operator( + "preferences.addon_disable" if is_enabled else "preferences.addon_enable", + icon='CHECKBOX_HLT' if is_enabled else 'CHECKBOX_DEHLT', text="", + emboss=False, + ).module = module_name + + sub = row.row() + sub.active = is_enabled + sub.label(text="Legacy: " + bl_info["name"]) + + if bl_info["warning"]: + sub.label(icon='ERROR') + + row_right = row.row() + row_right.alignment = 'RIGHT' + + row_right.label(text="Installed ") + row_right.active = False + + if bl_info["show_expanded"]: + split = box.split(factor=0.15) + col_a = split.column() + col_b = split.column() + if value := bl_info["description"]: + col_a.label(text="Description:") + col_b.label(text=iface_(value)) + + col_a.label(text="File:") + col_b.label(text=mod.__file__, translate=False) + + if value := bl_info["author"]: + col_a.label(text="Author:") + col_b.label(text=value.split("<", 1)[0].rstrip(), translate=False) + if value := bl_info["version"]: + col_a.label(text="Version:") + col_b.label(text=".".join(str(x) for x in value), translate=False) + if value := bl_info["warning"]: + col_a.label(text="Warning:") + col_b.label(text=" " + iface_(value), icon='ERROR') + del value + + # Include for consistency. + col_a.label(text="Type:") + col_b.label(text="add-on") + + user_addon = USERPREF_PT_addons.is_user_addon(mod, user_addon_paths) + + if bl_info["doc_url"] or bl_info.get("tracker_url"): + split = box.row().split(factor=0.15) + split.label(text="Internet:") + sub = split.row() + if bl_info["doc_url"]: + sub.operator( + "wm.url_open", text="Documentation", icon='HELP', + ).url = bl_info["doc_url"] + # Only add "Report a Bug" button if tracker_url is set + # or the add-on is bundled (use official tracker then). + if bl_info.get("tracker_url"): + sub.operator( + "wm.url_open", text="Report a Bug", icon='URL', + ).url = bl_info["tracker_url"] + elif not user_addon: + addon_info = ( + "Name: %s %s\n" + "Author: %s\n" + ) % (bl_info["name"], str(bl_info["version"]), bl_info["author"]) + props = sub.operator( + "wm.url_open_preset", text="Report a Bug", icon='URL', + ) + props.type = 'BUG_ADDON' + props.id = addon_info + + if user_addon: + rowsub = col_b.row() + rowsub.alignment = 'RIGHT' + rowsub.operator( + "preferences.addon_remove", text="Uninstall", icon='CANCEL', + ).module = module_name + + if is_enabled: + if (addon_preferences := used_addon_module_name_map[module_name].preferences) is not None: + USERPREF_PT_addons.draw_addon_preferences(layout, context, addon_preferences) + + +# ----------------------------------------------------------------------------- +# Extensions UI + +class display_errors: + """ + This singleton class is used to store errors which are generated while drawing, + note that these errors are reasonably obscure, examples are: + - Failure to parse the repository JSON file. + - Failure to access the file-system for reading where the repository is stored. + + The current and previous state are compared, when they match no drawing is done, + this allows the current display errors to be dismissed. + """ + errors_prev = [] + errors_curr = [] + + @staticmethod + def clear(): + display_errors.errors_prev = display_errors.errors_curr + + @staticmethod + def draw(layout): + if display_errors.errors_curr == display_errors.errors_prev: + return + box_header = layout.box() + # Don't clip longer names. + row = box_header.split(factor=0.9) + row.label(text="Repository Access Errors:", icon='ERROR') + rowsub = row.row(align=True) + rowsub.alignment = 'RIGHT' + rowsub.operator("bl_pkg.pkg_display_errors_clear", text="", icon='X', emboss=False) + + box_contents = box_header.box() + for err in display_errors.errors_curr: + box_contents.label(text=err) + + +def extensions_panel_draw_online_extensions_request_impl( + self, + context, +): + layout = self.layout + layout_header, layout_panel = layout.panel("advanced", default_closed=False) + layout_header.label(text="Online Extensions") + if layout_panel is not None: + # Text wrapping isn't supported, manually wrap. + for line in ( + "Welcome! Access community-made add-ons and themes from the", + "extensions.blender.org repository.", + "", + "This requires internet access.", + ): + layout_panel.label(text=line) + + row = layout.row() + row.operator("bl_pkg.extension_online_access", text="Dismiss", icon='X').enable = False + row.operator("bl_pkg.extension_online_access", text="Enable Repository", icon='CHECKMARK').enable = True + + +def extensions_panel_draw_impl( + self, + context, + search_lower, + filter_by_type, + enabled_only, + updates_only, + installed_only, + show_legacy_addons, + show_development, +): + """ + Show all the items... we may want to paginate at some point. + """ + import os + from .bl_extension_ops import ( + blender_extension_mark, + blender_extension_show, + extension_repos_read, + pkg_info_check_exclude_filter, + repo_cache_store_refresh_from_prefs, + ) + + from . import repo_cache_store + + # This isn't elegant, but the preferences aren't available on registration. + if not repo_cache_store.is_init(): + repo_cache_store_refresh_from_prefs() + + layout = self.layout + + prefs = context.preferences + + if updates_only: + installed_only = True + show_legacy_addons = False + + # Define a top-most column to place warnings (if-any). + # Needed so the warnings aren't mixed in with other content. + layout_topmost = layout.column() + + repos_all = extension_repos_read() + + # To access enabled add-ons. + show_addons = filter_by_type in {"", "add-on"} + show_themes = filter_by_type in {"", "theme"} + if show_addons: + used_addon_module_name_map = {addon.module: addon for addon in prefs.addons} + if show_themes: + active_theme_info = pkg_repo_and_id_from_theme_path(repos_all, prefs.themes[0].filepath) + + # Collect exceptions accessing repositories, and optionally show them. + errors_on_draw = [] + + remote_ex = None + local_ex = None + + def error_fn_remote(ex): + nonlocal remote_ex + remote_ex = ex + + def error_fn_local(ex): + nonlocal remote_ex + remote_ex = ex + + for repo_index, ( + pkg_manifest_remote, + pkg_manifest_local, + ) in enumerate(zip( + repo_cache_store.pkg_manifest_from_remote_ensure(error_fn=error_fn_remote), + repo_cache_store.pkg_manifest_from_local_ensure(error_fn=error_fn_local), + )): + # Show any exceptions created while accessing the JSON, + # if the JSON has an IO error while being read or if the directory doesn't exist. + # In general users should _not_ see these kinds of errors however we cannot prevent + # IO errors in general and it is better to show a warning than to ignore the error entirely + # or cause a trace-back which breaks the UI. + if (remote_ex is not None) or (local_ex is not None): + repo = repos_all[repo_index] + # NOTE: `FileNotFoundError` occurs when a repository has been added but has not update with its remote. + # We may want a way for users to know a repository is missing from the view and they need to run update + # to access its extensions. + if remote_ex is not None: + if isinstance(remote_ex, FileNotFoundError) and (remote_ex.filename == repo.directory): + pass + else: + errors_on_draw.append("Remote of \"{:s}\": {:s}".format(repo.name, str(remote_ex))) + remote_ex = None + + if local_ex is not None: + if isinstance(local_ex, FileNotFoundError) and (local_ex.filename == repo.directory): + pass + else: + errors_on_draw.append("Local of \"{:s}\": {:s}".format(repo.name, str(local_ex))) + local_ex = None + continue + + if pkg_manifest_remote is None: + repo = repos_all[repo_index] + has_remote = (repo.remote_url != "") + if has_remote: + # NOTE: it would be nice to detect when the repository ran sync and it failed. + # This isn't such an important distinction though, the main thing users should be aware of + # is that a "sync" is required. + errors_on_draw.append("Repository: \"{:s}\" must sync with the remote repository.".format(repo.name)) + del repo + continue + else: + repo = repos_all[repo_index] + has_remote = (repo.remote_url != "") + del repo + + for pkg_id, item_remote in pkg_manifest_remote.items(): + if filter_by_type and (filter_by_type != item_remote["type"]): + continue + if search_lower and (not pkg_info_check_exclude_filter(item_remote, search_lower)): + continue + + item_local = pkg_manifest_local.get(pkg_id) + is_installed = item_local is not None + + if installed_only and (is_installed == 0): + continue + + is_addon = False + is_theme = False + match item_remote["type"]: + case "add-on": + is_addon = True + case "theme": + is_theme = True + + if is_addon: + if is_installed: + # Currently we only need to know the module name once installed. + addon_module_name = "bl_ext.{:s}.{:s}".format(repos_all[repo_index].module, pkg_id) + is_enabled = addon_module_name in used_addon_module_name_map + + else: + is_enabled = False + addon_module_name = None + elif is_theme: + is_enabled = (repo_index, pkg_id) == active_theme_info + addon_module_name = None + else: + # TODO: ability to disable. + is_enabled = is_installed + addon_module_name = None + + if enabled_only and (not is_enabled): + continue + + item_version = item_remote["version"] + if item_local is None: + item_local_version = None + is_outdated = False + else: + item_local_version = item_local["version"] + is_outdated = item_local_version != item_version + + if updates_only: + if not is_outdated: + continue + + key = (pkg_id, repo_index) + if show_development: + mark = key in blender_extension_mark + show = key in blender_extension_show + del key + + box = layout.box() + + # Left align so the operator text isn't centered. + colsub = box.column() + row = colsub.row(align=True) + # row.label + if show: + props = row.operator("bl_pkg.pkg_show_clear", text="", icon='DISCLOSURE_TRI_DOWN', emboss=False) + else: + props = row.operator("bl_pkg.pkg_show_set", text="", icon='DISCLOSURE_TRI_RIGHT', emboss=False) + props.pkg_id = pkg_id + props.repo_index = repo_index + del props + + if is_installed: + if is_addon: + row.operator( + "preferences.addon_disable" if is_enabled else "preferences.addon_enable", + icon='CHECKBOX_HLT' if is_enabled else 'CHECKBOX_DEHLT', + text="", + emboss=False, + ).module = addon_module_name + elif is_theme: + props = row.operator( + "bl_pkg.extension_theme_disable" if is_enabled else "bl_pkg.extension_theme_enable", + icon='CHECKBOX_HLT' if is_enabled else 'CHECKBOX_DEHLT', + text="", + emboss=False, + ) + props.repo_index = repo_index + props.pkg_id = pkg_id + del props + else: + # Use a place-holder checkbox icon to avoid odd text alignment when mixing with installed add-ons. + # Non add-ons have no concept of "enabled" right now, use installed. + row.operator( + "bl_pkg.extension_disable", + text="", + icon='CHECKBOX_HLT', + emboss=False, + ) + else: + # Not installed, always placeholder. + row.operator("bl_pkg.extensions_enable_not_installed", text="", icon='CHECKBOX_DEHLT', emboss=False) + + if show_development: + if mark: + props = row.operator("bl_pkg.pkg_mark_clear", text="", icon='RADIOBUT_ON', emboss=False) + else: + props = row.operator("bl_pkg.pkg_mark_set", text="", icon='RADIOBUT_OFF', emboss=False) + props.pkg_id = pkg_id + props.repo_index = repo_index + del props + + sub = row.row() + sub.active = is_enabled + sub.label(text=item_remote["name"]) + del sub + + row_right = row.row() + row_right.alignment = 'RIGHT' + + if has_remote: + if is_installed: + # Include uninstall below. + if is_outdated: + props = row_right.operator("bl_pkg.pkg_install", text="Update") + props.repo_index = repo_index + props.pkg_id = pkg_id + del props + else: + # Right space for alignment with the button. + row_right.label(text="Installed ") + row_right.active = False + else: + props = row_right.operator("bl_pkg.pkg_install", text="Install") + props.repo_index = repo_index + props.pkg_id = pkg_id + del props + else: + # Right space for alignment with the button. + row_right.label(text="Installed ") + row_right.active = False + + if show: + split = box.split(factor=0.15) + col_a = split.column() + col_b = split.column() + + col_a.label(text="Description:") + # The full description may be multiple lines (not yet supported by Blender's UI). + col_b.label(text=item_remote["tagline"]) + + if is_installed: + col_a.label(text="Path:") + col_b.label(text=os.path.join(repos_all[repo_index].directory, pkg_id), translate=False) + + # Remove the maintainers email while it's not private, showing prominently + # could cause maintainers to get direct emails instead of issue tracking systems. + col_a.label(text="Maintainer:") + col_b.label(text=item_remote["maintainer"].split("<", 1)[0].rstrip(), translate=False) + + col_a.label(text="License:") + col_b.label(text=license_info_to_text(item_remote["license"])) + + col_a.label(text="Version:") + if is_outdated: + col_b.label(text="{:s} ({:s} available)".format(item_local_version, item_version)) + else: + col_b.label(text=item_version) + + if has_remote: + col_a.label(text="Size:") + col_b.label(text=size_as_fmt_string(item_remote["archive_size"])) + + if not filter_by_type: + col_a.label(text="Type:") + col_b.label(text=item_remote["type"]) + + if len(repos_all) > 1: + col_a.label(text="Repository:") + col_b.label(text=repos_all[repo_index].name) + + if value := item_remote.get("website"): + col_a.label(text="Internet:") + # Use half size button, for legacy add-ons there are two, here there is one + # however one large button looks silly, so use a half size still. + col_b.split(factor=0.5).operator("wm.url_open", text="Website", icon='HELP').url = value + del value + + # Note that we could allow removing extensions from non-remote extension repos + # although this is destructive, so don't enable this right now. + if is_installed: + rowsub = col_b.row() + rowsub.alignment = 'RIGHT' + props = rowsub.operator("bl_pkg.pkg_uninstall", text="Uninstall") + props.repo_index = repo_index + props.pkg_id = pkg_id + del props, rowsub + + # Show addon user preferences. + if is_enabled and is_addon: + if (addon_preferences := used_addon_module_name_map[addon_module_name].preferences) is not None: + USERPREF_PT_addons.draw_addon_preferences(layout, context, addon_preferences) + + if show_addons and show_legacy_addons: + extensions_panel_draw_legacy_addons( + layout, + context, + search_lower=search_lower, + enabled_only=enabled_only, + installed_only=installed_only, + used_addon_module_name_map=used_addon_module_name_map, + ) + + # Finally show any errors in a single panel which can be dismissed. + display_errors.errors_curr = errors_on_draw + if errors_on_draw: + display_errors.draw(layout_topmost) + + +class USERPREF_PT_extensions_bl_pkg_filter(Panel): + bl_label = "Extensions Filter" + + bl_space_type = 'TOPBAR' # dummy. + bl_region_type = 'HEADER' + bl_ui_units_x = 13 + + def draw(self, context): + layout = self.layout + + wm = context.window_manager + + col = layout.column(heading="Show Only") + col.use_property_split = True + col.prop(wm, "extension_enabled_only", text="Enabled Extensions") + col.prop(wm, "extension_updates_only", text="Updates Available") + sub = col.column() + sub.active = (not wm.extension_enabled_only) and (not wm.extension_updates_only) + sub.prop(wm, "extension_installed_only", text="Installed Extensions") + + col = layout.column(heading="Show") + col.use_property_split = True + sub = col.column() + sub.active = (not wm.extension_updates_only) + sub.prop(wm, "extension_show_legacy_addons", text="Legacy Add-ons") + + +class USERPREF_MT_extensions_bl_pkg_settings(Menu): + bl_label = "Extension Settings" + + def draw(self, context): + layout = self.layout + + prefs = context.preferences + + addon_prefs = prefs.addons[__package__].preferences + + layout.operator("bl_pkg.repo_sync_all", text="Check for Updates", icon='FILE_REFRESH') + + layout.separator() + + layout.operator("bl_pkg.pkg_upgrade_all", text="Install Available Updates", icon='IMPORT') + layout.operator("bl_pkg.pkg_install_files", text="Install from Disk") + layout.operator("preferences.addon_install", text="Install Legacy Add-on") + + if prefs.experimental.use_extension_utils: + layout.separator() + + layout.prop(addon_prefs, "show_development_reports") + + layout.separator() + + # We might want to expose this for all users, the purpose of this + # is to refresh after changes have been made to the repos outside of Blender + # it's disputable if this is a common case. + layout.operator("preferences.addon_refresh", text="Refresh (file-system)", icon='FILE_REFRESH') + layout.separator() + + layout.operator("bl_pkg.pkg_install_marked", text="Install Marked", icon='IMPORT') + layout.operator("bl_pkg.pkg_uninstall_marked", text="Uninstall Marked", icon='X') + layout.operator("bl_pkg.obsolete_marked") + + layout.separator() + + layout.operator("bl_pkg.repo_lock") + layout.operator("bl_pkg.repo_unlock") + + +def extensions_panel_draw(panel, context): + prefs = context.preferences + + if not prefs.experimental.use_extension_repos: + # Unexpected, the extension is disabled but this add-on is. + # In this case don't show the UI as it is confusing. + return + + from .bl_extension_ops import ( + blender_filter_by_type_map, + ) + + addon_prefs = prefs.addons[__package__].preferences + + show_development = prefs.experimental.use_extension_utils + show_development_reports = show_development and addon_prefs.show_development_reports + + wm = context.window_manager + layout = panel.layout + + row = layout.split(factor=0.5) + row_a = row.row() + row_a.prop(wm, "extension_search", text="", icon='VIEWZOOM') + row_b = row.row(align=True) + row_b.prop(wm, "extension_type", text="") + row_b.popover("USERPREF_PT_extensions_bl_pkg_filter", text="", icon='FILTER') + + row_b.separator() + row_b.popover("USERPREF_PT_extensions_repos", text="Repositories") + + row_b.separator() + row_b.menu("USERPREF_MT_extensions_bl_pkg_settings", text="", icon='DOWNARROW_HLT') + del row, row_a, row_b + + if show_development_reports: + show_status = bool(repo_status_text.log) + else: + # Only show if running and there is progress to display. + show_status = bool(repo_status_text.log) and repo_status_text.running + if show_status: + show_status = False + for ty, msg in repo_status_text.log: + if ty == 'PROGRESS': + show_status = True + + if show_status: + box = layout.box() + # Don't clip longer names. + row = box.split(factor=0.9, align=True) + if repo_status_text.running: + row.label(text=repo_status_text.title + "...", icon='INFO') + else: + row.label(text=repo_status_text.title, icon='INFO') + if show_development_reports: + rowsub = row.row(align=True) + rowsub.alignment = 'RIGHT' + rowsub.operator("bl_pkg.pkg_status_clear", text="", icon='X', emboss=False) + boxsub = box.box() + for ty, msg in repo_status_text.log: + if ty == 'STATUS': + boxsub.label(text=msg) + elif ty == 'PROGRESS': + msg_str, progress_unit, progress, progress_range = msg + if progress <= progress_range: + boxsub.progress( + factor=progress / progress_range, + text="{:s}, {:s}".format( + sizes_as_percentage_string(progress, progress_range), + msg_str, + ), + ) + elif progress_unit == 'BYTE': + boxsub.progress(factor=0.0, text="{:s}, {:s}".format(msg_str, size_as_fmt_string(progress))) + else: + # We might want to support other types. + boxsub.progress(factor=0.0, text="{:s}, {:d}".format(msg_str, progress)) + else: + boxsub.label(text="{:s}: {:s}".format(ty, msg)) + + # Hide when running. + if repo_status_text.running: + return + + if not prefs.extensions.use_online_access_handled: + extensions_panel_draw_online_extensions_request_impl(panel, context) + + extensions_panel_draw_impl( + panel, + context, + wm.extension_search.lower(), + blender_filter_by_type_map[wm.extension_type], + wm.extension_enabled_only, + wm.extension_updates_only, + wm.extension_installed_only, + wm.extension_show_legacy_addons, + show_development, + ) + + +classes = ( + # Pop-overs. + USERPREF_PT_extensions_bl_pkg_filter, + USERPREF_MT_extensions_bl_pkg_settings, +) + + +def register(): + USERPREF_PT_addons.append(extensions_panel_draw) + + for cls in classes: + bpy.utils.register_class(cls) + + +def unregister(): + USERPREF_PT_addons.remove(extensions_panel_draw) + + for cls in reversed(classes): + bpy.utils.unregister_class(cls) diff --git a/scripts/addons_core/bl_pkg/bl_extension_utils.py b/scripts/addons_core/bl_pkg/bl_extension_utils.py new file mode 100644 index 00000000000..fbf9f3d992e --- /dev/null +++ b/scripts/addons_core/bl_pkg/bl_extension_utils.py @@ -0,0 +1,1305 @@ +# SPDX-FileCopyrightText: 2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +Non-blocking access to package management. + +- No ``bpy`` module use. +""" + +__all__ = ( + # Public Repository Actions. + "repo_sync", + "repo_upgrade", + "repo_listing", + + # Public Package Actions. + "pkg_install_files", + "pkg_install", + "pkg_uninstall", + + "pkg_make_obsolete_for_testing", + + "dummy_progress", + + # Public Stand-Alone Utilities. + "pkg_theme_file_list", + "file_mtime_or_none", + + # Public API. + "json_from_filepath", + "toml_from_filepath", + "json_to_filepath", + + "pkg_manifest_dict_is_valid_or_error", + "pkg_manifest_dict_from_file_or_error", + "pkg_manifest_archive_url_abs_from_remote_url", + + "CommandBatch", + "RepoCacheStore", + + # Directory Lock. + "RepoLock", + "RepoLockContext", +) + +import json +import os +import sys +import signal +import stat +import subprocess +import time +import tomllib + + +from typing import ( + Any, + Callable, + Generator, + IO, + List, + Optional, + Dict, + NamedTuple, + Sequence, + Set, + Tuple, + Union, +) + +BASE_DIR = os.path.abspath(os.path.dirname(__file__)) + +BLENDER_EXT_CMD = ( + # When run from within Blender, it will point to Blender's local Python binary. + sys.executable, + os.path.normpath(os.path.join(BASE_DIR, "cli", "blender_ext.py")), +) + +# This directory is in the local repository. +REPO_LOCAL_PRIVATE_DIR = ".blender_ext" +# Locate inside `REPO_LOCAL_PRIVATE_DIR`. +REPO_LOCAL_PRIVATE_LOCK = "bl_ext_repo.lock" + +PKG_REPO_LIST_FILENAME = "bl_ext_repo.json" +PKG_MANIFEST_FILENAME_TOML = "blender_manifest.toml" +PKG_EXT = ".zip" + +# Add this to the local JSON file. +REPO_LOCAL_JSON = os.path.join(REPO_LOCAL_PRIVATE_DIR, PKG_REPO_LIST_FILENAME) + +# An item we communicate back to Blender. +InfoItem = Tuple[str, Any] +InfoItemSeq = Sequence[InfoItem] + +COMPLETE_ITEM = ('DONE', "") + +# Time to wait when there is no output, avoid 0 as it causes high CPU usage. +IDLE_WAIT_ON_READ = 0.05 +# IDLE_WAIT_ON_READ = 0.2 + + +# ----------------------------------------------------------------------------- +# Internal Functions. +# + +if sys.platform == "win32": + # See: https://stackoverflow.com/a/35052424/432509 + def file_handle_make_non_blocking(file_handle: IO[bytes]) -> None: + # Constant could define globally but avoid polluting the name-space + # thanks to: https://stackoverflow.com/questions/34504970 + import msvcrt + from ctypes import ( + POINTER, + WinError, + byref, + windll, + wintypes, + ) + from ctypes.wintypes import ( + BOOL, + DWORD, + HANDLE, + ) + + LPDWORD = POINTER(DWORD) + + PIPE_NOWAIT = wintypes.DWORD(0x00000001) + + # Set non-blocking. + SetNamedPipeHandleState = windll.kernel32.SetNamedPipeHandleState + SetNamedPipeHandleState.argtypes = [HANDLE, LPDWORD, LPDWORD, LPDWORD] + SetNamedPipeHandleState.restype = BOOL + os_handle = msvcrt.get_osfhandle(file_handle.fileno()) + res = windll.kernel32.SetNamedPipeHandleState(os_handle, byref(PIPE_NOWAIT), None, None) + if res == 0: + print(WinError()) + + def file_handle_non_blocking_is_error_blocking(ex: BaseException) -> bool: + if not isinstance(ex, OSError): + return False + from ctypes import GetLastError + ERROR_NO_DATA = 232 + # This is sometimes zero, `ex.args == (22, "Invalid argument")` + # This could be checked but for now ignore all zero errors. + return (GetLastError() in {0, ERROR_NO_DATA}) + +else: + def file_handle_make_non_blocking(file_handle: IO[bytes]) -> None: + import fcntl + # Get current `file_handle` flags. + flags = fcntl.fcntl(file_handle.fileno(), fcntl.F_GETFL) + fcntl.fcntl(file_handle, fcntl.F_SETFL, flags | os.O_NONBLOCK) + + def file_handle_non_blocking_is_error_blocking(ex: BaseException) -> bool: + if not isinstance(ex, BlockingIOError): + return False + return True + + +def file_mtime_or_none(filepath: str) -> Optional[int]: + try: + # For some reason `mypy` thinks this is a float. + return int(os.stat(filepath)[stat.ST_MTIME]) + except FileNotFoundError: + return None + + +def scandir_with_demoted_errors(path: str) -> Generator[os.DirEntry[str], None, None]: + try: + for entry in os.scandir(path): + yield entry + except BaseException as ex: + print("Error: scandir", ex) + + +# ----------------------------------------------------------------------------- +# Call JSON. +# + +def non_blocking_call(cmd: Sequence[str]) -> subprocess.Popen[bytes]: + # pylint: disable-next=consider-using-with + ps = subprocess.Popen(cmd, stdout=subprocess.PIPE) + stdout = ps.stdout + assert stdout is not None + # Needed so whatever is available can be read (without waiting). + file_handle_make_non_blocking(stdout) + return ps + + +def command_output_from_json_0( + args: Sequence[str], + use_idle: bool, +) -> Generator[InfoItemSeq, bool, None]: + cmd = [*BLENDER_EXT_CMD, *args, "--output-type=JSON_0"] + ps = non_blocking_call(cmd) + stdout = ps.stdout + assert stdout is not None + chunk_list = [] + request_exit_signal_sent = False + + while True: + # It's possible this is multiple chunks. + try: + chunk = stdout.read() + except BaseException as ex: + if not file_handle_non_blocking_is_error_blocking(ex): + raise ex + chunk = b'' + + json_messages = [] + + if not chunk: + if ps.poll() is not None: + break + if use_idle: + time.sleep(IDLE_WAIT_ON_READ) + elif (chunk_zero_index := chunk.find(b'\0')) == -1: + chunk_list.append(chunk) + else: + # Extract contiguous data from `chunk_list`. + chunk_list.append(chunk[:chunk_zero_index]) + + json_bytes_list = [b''.join(chunk_list)] + chunk_list.clear() + + # There may be data afterwards, even whole chunks. + if chunk_zero_index + 1 != len(chunk): + chunk = chunk[chunk_zero_index + 1:] + # Add whole chunks. + while (chunk_zero_index := chunk.find(b'\0')) != -1: + json_bytes_list.append(chunk[:chunk_zero_index]) + chunk = chunk[chunk_zero_index + 1:] + if chunk: + chunk_list.append(chunk) + + request_exit = False + + for json_bytes in json_bytes_list: + json_data = json.loads(json_bytes.decode("utf-8")) + + assert len(json_data) == 2 + assert isinstance(json_data[0], str) + + json_messages.append((json_data[0], json_data[1])) + + # Yield even when `json_messages`, otherwise this generator can block. + # It also means a request to exit might not be responded to soon enough. + request_exit = yield json_messages + if request_exit and not request_exit_signal_sent: + ps.send_signal(signal.SIGINT) + request_exit_signal_sent = True + + +# ----------------------------------------------------------------------------- +# Internal Functions. +# + + +def repositories_validate_or_errors(repos: Sequence[str]) -> Optional[InfoItemSeq]: + return None + + +# ----------------------------------------------------------------------------- +# Public Stand-Alone Utilities +# + +def pkg_theme_file_list(directory: str, pkg_idname: str) -> Tuple[str, List[str]]: + theme_dir = os.path.join(directory, pkg_idname) + theme_files = [ + filename for entry in os.scandir(theme_dir) + if ((not entry.is_dir()) and + (not (filename := entry.name).startswith(".")) and + filename.lower().endswith(".xml")) + ] + theme_files.sort() + return theme_dir, theme_files + + +# ----------------------------------------------------------------------------- +# Public Repository Actions +# + +def repo_sync( + *, + directory: str, + remote_url: str, + online_user_agent: str, + use_idle: bool, + force_exit_ok: bool = False, + extension_override: str = "", +) -> Generator[InfoItemSeq, None, None]: + """ + Implementation: + ``bpy.ops.ext.repo_sync(directory)``. + """ + yield from command_output_from_json_0([ + "sync", + "--local-dir", directory, + "--remote-url", remote_url, + "--online-user-agent", online_user_agent, + *(("--force-exit-ok",) if force_exit_ok else ()), + *(("--extension-override", extension_override) if extension_override else ()), + ], use_idle=use_idle) + yield [COMPLETE_ITEM] + + +def repo_upgrade( + *, + directory: str, + remote_url: str, + online_user_agent: str, + use_idle: bool, +) -> Generator[InfoItemSeq, None, None]: + """ + Implementation: + ``bpy.ops.ext.repo_upgrade(directory)``. + """ + yield from command_output_from_json_0([ + "upgrade", + "--local-dir", directory, + "--remote-url", remote_url, + "--online-user-agent", online_user_agent, + ], use_idle=use_idle) + yield [COMPLETE_ITEM] + + +def repo_listing( + *, + repos: Sequence[str], +) -> Generator[InfoItemSeq, None, None]: + """ + Implementation: + ``bpy.ops.ext.repo_listing(directory)``. + """ + if result := repositories_validate_or_errors(repos): + yield result + return + + yield [COMPLETE_ITEM] + + +# ----------------------------------------------------------------------------- +# Public Package Actions +# + +def pkg_install_files( + *, + directory: str, + files: Sequence[str], + use_idle: bool, +) -> Generator[InfoItemSeq, None, None]: + """ + Implementation: + ``bpy.ops.ext.pkg_install_files(directory, files)``. + """ + yield from command_output_from_json_0([ + "install-files", *files, + "--local-dir", directory, + ], use_idle=use_idle) + yield [COMPLETE_ITEM] + + +def pkg_install( + *, + directory: str, + remote_url: str, + pkg_id_sequence: Sequence[str], + online_user_agent: str, + use_cache: bool, + use_idle: bool, +) -> Generator[InfoItemSeq, None, None]: + """ + Implementation: + ``bpy.ops.ext.pkg_install(directory, pkg_id)``. + """ + yield from command_output_from_json_0([ + "install", ",".join(pkg_id_sequence), + "--local-dir", directory, + "--remote-url", remote_url, + "--online-user-agent", online_user_agent, + "--local-cache", str(int(use_cache)), + ], use_idle=use_idle) + yield [COMPLETE_ITEM] + + +def pkg_uninstall( + *, + directory: str, + pkg_id_sequence: Sequence[str], + use_idle: bool, +) -> Generator[InfoItemSeq, None, None]: + """ + Implementation: + ``bpy.ops.ext.pkg_uninstall(directory, pkg_id)``. + """ + yield from command_output_from_json_0([ + "uninstall", ",".join(pkg_id_sequence), + "--local-dir", directory, + ], use_idle=use_idle) + yield [COMPLETE_ITEM] + + +# ----------------------------------------------------------------------------- +# Public Demo Actions +# + +def dummy_progress( + *, + use_idle: bool, +) -> Generator[InfoItemSeq, bool, None]: + """ + Implementation: + ``bpy.ops.ext.dummy_progress()``. + """ + yield from command_output_from_json_0(["dummy-progress", "--time-duration=1.0"], use_idle=use_idle) + yield [COMPLETE_ITEM] + + +# ----------------------------------------------------------------------------- +# Public (non-command-line-wrapping) functions +# + +def json_from_filepath(filepath_json: str) -> Optional[Dict[str, Any]]: + if os.path.exists(filepath_json): + with open(filepath_json, "r", encoding="utf-8") as fh: + result = json.loads(fh.read()) + assert isinstance(result, dict) + return result + return None + + +def toml_from_filepath(filepath_json: str) -> Optional[Dict[str, Any]]: + if os.path.exists(filepath_json): + with open(filepath_json, "r", encoding="utf-8") as fh: + return tomllib.loads(fh.read()) + return None + + +def json_to_filepath(filepath_json: str, data: Any) -> None: + with open(filepath_json, "w", encoding="utf-8") as fh: + fh.write(json.dumps(data)) + + +def pkg_make_obsolete_for_testing(local_dir: str, pkg_id: str) -> None: + import re + filepath = os.path.join(local_dir, pkg_id, PKG_MANIFEST_FILENAME_TOML) + # Weak! use basic matching to replace the version, not nice but OK as a debugging option. + with open(filepath, "r", encoding="utf-8") as fh: + data = fh.read() + + def key_replace(match: re.Match[str]) -> str: + return "version = \"0.0.0\"" + + data = re.sub(r"^\s*version\s*=\s*\"[^\"]+\"", key_replace, data, flags=re.MULTILINE) + with open(filepath, "w", encoding="utf-8") as fh: + fh.write(data) + + +def pkg_manifest_dict_is_valid_or_error( + data: Dict[str, Any], + from_repo: bool, + strict: bool, +) -> Optional[str]: + # Exception! In in general `cli` shouldn't be considered a Python module, + # it's validation function is handy to reuse. + from .cli.blender_ext import pkg_manifest_from_dict_and_validate + assert "id" in data + result = pkg_manifest_from_dict_and_validate(data, from_repo=from_repo, strict=strict) + if isinstance(result, str): + return result + return None + + +def pkg_manifest_dict_from_file_or_error( + filepath: str, +) -> Union[Dict[str, Any], str]: + from .cli.blender_ext import pkg_manifest_from_archive_and_validate + result = pkg_manifest_from_archive_and_validate(filepath) + if isinstance(result, str): + return result + # Else convert the named-tuple into a dictionary. + result_dict = result._asdict() + assert isinstance(result_dict, dict) + return result_dict + + +def pkg_manifest_archive_url_abs_from_remote_url(remote_url: str, archive_url: str) -> str: + if archive_url.startswith("./"): + if ( + len(remote_url) > len(PKG_REPO_LIST_FILENAME) and + remote_url.endswith(PKG_REPO_LIST_FILENAME) and + (remote_url[-(len(PKG_REPO_LIST_FILENAME) + 1)] in {"\\", "/"}) + ): + # The URL contains the JSON name, strip this off before adding the package name. + archive_url = remote_url[:-len(PKG_REPO_LIST_FILENAME)] + archive_url[2:] + elif remote_url.startswith(("http://", "https://", "file://")): + # Simply add to the URL. + archive_url = remote_url.rstrip("/") + archive_url[1:] + else: + # Handle as a regular path. + archive_url = os.path.join(remote_url, archive_url[2:]) + return archive_url + + +def pkg_repo_cache_clear(local_dir: str) -> None: + local_cache_dir = os.path.join(local_dir, ".blender_ext", "cache") + if not os.path.isdir(local_cache_dir): + return + + for entry in scandir_with_demoted_errors(local_cache_dir): + if entry.is_dir(follow_symlinks=False): + continue + if not entry.name.endswith(PKG_EXT): + continue + + # Should never fail unless the file-system has permissions issues or corruption. + try: + os.unlink(entry.path) + except BaseException as ex: + print("Error: unlink", ex) + + +# ----------------------------------------------------------------------------- +# Public Command Pool (non-command-line wrapper) +# + +InfoItemCallable = Callable[[], Generator[InfoItemSeq, bool, None]] + + +class CommandBatchItem: + __slots__ = ( + "fn_with_args", + "fn_iter", + "status", + "has_error", + "has_warning", + "msg_log", + "msg_log_len_last", + + "msg_type", + "msg_info", + ) + + STATUS_NOT_YET_STARTED = 0 + STATUS_RUNNING = 1 + STATUS_COMPLETE = 2 + + def __init__(self, fn_with_args: InfoItemCallable): + self.fn_with_args = fn_with_args + self.fn_iter: Optional[Generator[InfoItemSeq, bool, None]] = None + self.status = CommandBatchItem.STATUS_NOT_YET_STARTED + self.has_error = False + self.has_warning = False + self.msg_log: List[Tuple[str, Any]] = [] + self.msg_log_len_last = 0 + self.msg_type = "" + self.msg_info = "" + + def invoke(self) -> Generator[InfoItemSeq, bool, None]: + return self.fn_with_args() + + +class CommandBatch_ExecNonBlockingResult(NamedTuple): + # A message list for each command, aligned to `CommandBatchItem._batch`. + messages: Tuple[List[Tuple[str, str]], ...] + # When true, the status of all commands is `CommandBatchItem.STATUS_COMPLETE`. + all_complete: bool + # When true, `calc_status_data` will return a different result. + status_data_changed: bool + + +class CommandBatch_StatusFlag(NamedTuple): + flag: int + failure_count: int + count: int + + +class CommandBatch: + __slots__ = ( + "title", + + "_batch", + "_request_exit", + "_log_added_since_accessed", + ) + + def __init__( + self, + *, + title: str, + batch: Sequence[InfoItemCallable], + ): + self.title = title + self._batch = [CommandBatchItem(fn_with_args) for fn_with_args in batch] + self._request_exit = False + self._log_added_since_accessed = True + + def _exec_blocking_single( + self, + report_fn: Callable[[str, str], None], + request_exit_fn: Callable[[], bool], + ) -> bool: + for cmd in self._batch: + assert cmd.fn_iter is None + cmd.fn_iter = cmd.invoke() + request_exit: Optional[bool] = None + while True: + try: + # Request `request_exit` starts of as None, then it's a boolean. + json_messages = cmd.fn_iter.send(request_exit) # type: ignore + except StopIteration: + break + + for ty, msg in json_messages: + report_fn(ty, msg) + + if request_exit is None: + request_exit = False + + if request_exit is True: + break + if request_exit is None: + return True + return request_exit + + def _exec_blocking_multi( + self, + *, + report_fn: Callable[[str, str], None], + request_exit_fn: Callable[[], bool], + ) -> bool: + # TODO, concurrent execution. + return self._exec_blocking_single(report_fn, request_exit_fn) + + def exec_blocking( + self, + report_fn: Callable[[str, str], None], + request_exit_fn: Callable[[], bool], + concurrent: bool, + ) -> bool: + # Blocking execution & finish. + if concurrent: + return self._exec_blocking_multi( + report_fn=report_fn, + request_exit_fn=request_exit_fn, + ) + return self._exec_blocking_single(report_fn, request_exit_fn) + + def exec_non_blocking( + self, + *, + request_exit: bool, + ) -> CommandBatch_ExecNonBlockingResult: + """ + Return the result of running multiple commands. + """ + command_output: Tuple[List[Tuple[str, str]], ...] = tuple([] for _ in range(len(self._batch))) + + if request_exit: + self._request_exit = True + + status_data_changed = False + + complete_count = 0 + for cmd_index in reversed(range(len(self._batch))): + cmd = self._batch[cmd_index] + if cmd.status == CommandBatchItem.STATUS_COMPLETE: + complete_count += 1 + continue + + send_arg: Optional[bool] = self._request_exit + + # First time initialization. + if cmd.fn_iter is None: + cmd.fn_iter = cmd.invoke() + cmd.status = CommandBatchItem.STATUS_RUNNING + status_data_changed = True + send_arg = None + + try: + json_messages = cmd.fn_iter.send(send_arg) # type: ignore + except StopIteration: + # FIXME: This should not happen, we should get a "DONE" instead. + cmd.status = CommandBatchItem.STATUS_COMPLETE + complete_count += 1 + status_data_changed = True + continue + + if json_messages: + for ty, msg in json_messages: + self._log_added_since_accessed = True + + cmd.msg_type = ty + cmd.msg_info = msg + if ty == 'DONE': + assert msg == "" + cmd.status = CommandBatchItem.STATUS_COMPLETE + complete_count += 1 + status_data_changed = True + break + + command_output[cmd_index].append((ty, msg)) + if ty != 'PROGRESS': + if ty == 'ERROR': + if not cmd.has_error: + cmd.has_error = True + status_data_changed = True + elif ty == 'WARNING': + if not cmd.has_warning: + cmd.has_warning = True + status_data_changed = True + cmd.msg_log.append((ty, msg)) + + # Check if all are complete. + assert complete_count == len([cmd for cmd in self._batch if cmd.status == CommandBatchItem.STATUS_COMPLETE]) + all_complete = (complete_count == len(self._batch)) + return CommandBatch_ExecNonBlockingResult( + messages=command_output, + all_complete=all_complete, + status_data_changed=status_data_changed, + ) + + def calc_status_string(self) -> List[str]: + return [ + "{:s}: {:s}".format(cmd.msg_type, cmd.msg_info) + for cmd in self._batch if (cmd.msg_type or cmd.msg_info) + ] + + def calc_status_data(self) -> CommandBatch_StatusFlag: + """ + A single string for all commands + """ + status_flag = 0 + failure_count = 0 + for cmd in self._batch: + status_flag |= 1 << cmd.status + if cmd.has_error or cmd.has_warning: + failure_count += 1 + return CommandBatch_StatusFlag( + flag=status_flag, + failure_count=failure_count, + count=len(self._batch), + ) + + @staticmethod + def calc_status_text_icon_from_data(status_data: CommandBatch_StatusFlag, update_count: int) -> Tuple[str, str]: + # Generate a nice UI string for a status-bar & splash screen (must be short). + # + # NOTE: this is (arguably) UI logic, it's just nice to have it here + # as it avoids using low-level flags externally. + # + # FIXME: this text assumed a "sync" operation. + if status_data.failure_count == 0: + fail_text = "" + elif status_data.failure_count == status_data.count: + fail_text = ", failed" + else: + fail_text = ", some actions failed" + + if status_data.flag == 1 << CommandBatchItem.STATUS_NOT_YET_STARTED: + return "Starting Extension Updates{:s}".format(fail_text), 'SORTTIME' + if status_data.flag == 1 << CommandBatchItem.STATUS_COMPLETE: + if update_count > 0: + # NOTE: the UI design in #120612 has the number of extensions available in icon. + # Include in the text as this is not yet supported. + return "Extensions Updates Available ({:d}){:s}".format(update_count, fail_text), 'INTERNET' + return "All Extensions Up-to-date{:s}".format(fail_text), 'CHECKMARK' + if status_data.flag & 1 << CommandBatchItem.STATUS_RUNNING: + return "Checking for Extension Updates{:s}".format(fail_text), 'SORTTIME' + + # Should never reach this line! + return "Internal error, unknown state!{:s}".format(fail_text), 'ERROR' + + def calc_status_log_or_none(self) -> Optional[List[Tuple[str, str]]]: + """ + Return the log or None if there were no changes since the last call. + """ + if self._log_added_since_accessed is False: + return None + self._log_added_since_accessed = False + + return [ + (ty, msg) + for cmd in self._batch + for ty, msg in (cmd.msg_log + ([(cmd.msg_type, cmd.msg_info)] if cmd.msg_type == 'PROGRESS' else [])) + ] + + def calc_status_log_since_last_request_or_none(self) -> Optional[List[List[Tuple[str, str]]]]: + """ + Return a list of new errors per command or None when none are found. + """ + result: List[List[Tuple[str, str]]] = [[] for _ in range(len(self._batch))] + found = False + for cmd_index, cmd in enumerate(self._batch): + msg_log_len = len(cmd.msg_log) + if cmd.msg_log_len_last == msg_log_len: + continue + assert cmd.msg_log_len_last < msg_log_len + result[cmd_index] = cmd.msg_log[cmd.msg_log_len_last:] + cmd.msg_log_len_last = len(cmd.msg_log) + found = True + + return result if found else None + + +# ----------------------------------------------------------------------------- +# Public Repo Cache (non-command-line wrapper) +# + +class _RepoCacheEntry: + __slots__ = ( + "directory", + "remote_url", + + "_pkg_manifest_local", + "_pkg_manifest_remote", + "_pkg_manifest_remote_mtime", + "_pkg_manifest_remote_has_warning" + ) + + def __init__(self, directory: str, remote_url: str) -> None: + assert directory != "" + self.directory = directory + self.remote_url = remote_url + # Manifest data per package loaded from the packages local JSON. + self._pkg_manifest_local: Optional[Dict[str, Dict[str, Any]]] = None + self._pkg_manifest_remote: Optional[Dict[str, Dict[str, Any]]] = None + self._pkg_manifest_remote_mtime = 0 + # Avoid many noisy prints. + self._pkg_manifest_remote_has_warning = False + + def _json_data_ensure( + self, + *, + error_fn: Callable[[BaseException], None], + check_files: bool = False, + ignore_missing: bool = False, + ) -> Any: + if self._pkg_manifest_remote is not None: + if check_files: + self._json_data_refresh(error_fn=error_fn) + return self._pkg_manifest_remote + + filepath_json = os.path.join(self.directory, REPO_LOCAL_JSON) + + try: + self._pkg_manifest_remote = json_from_filepath(filepath_json) + except BaseException as ex: + self._pkg_manifest_remote = None + error_fn(ex) + + self._pkg_manifest_local = None + if self._pkg_manifest_remote is not None: + json_mtime = file_mtime_or_none(filepath_json) + assert json_mtime is not None + self._pkg_manifest_remote_mtime = json_mtime + self._pkg_manifest_local = None + self._pkg_manifest_remote_has_warning = False + else: + if not ignore_missing: + # NOTE: this warning will occur when setting up a new repository. + # It could be removed but it's also useful to know when the JSON is missing. + if self.remote_url: + if not self._pkg_manifest_remote_has_warning: + print("Repository file:", filepath_json, "not found, sync required!") + self._pkg_manifest_remote_has_warning = True + + return self._pkg_manifest_remote + + def _json_data_refresh_from_toml( + self, + *, + error_fn: Callable[[BaseException], None], + force: bool = False, + ) -> None: + assert self.remote_url == "" + # Since there is no remote repo the ID name is defined by the directory name only. + local_json_data = self.pkg_manifest_from_local_ensure(error_fn=error_fn) + if local_json_data is None: + return + + filepath_json = os.path.join(self.directory, REPO_LOCAL_JSON) + + # We might want to adjust where this happens, create the directory here + # because this could be a fresh repo might not have been initialized until now. + directory = os.path.dirname(filepath_json) + try: + # A symbolic-link that's followed (good), if it exists and is a file an error is raised here and returned. + if not os.path.isdir(directory): + os.makedirs(directory, exist_ok=True) + except BaseException as ex: + error_fn(ex) + return + del directory + + with open(filepath_json, "w", encoding="utf-8") as fh: + # Indent because it can be useful to check this file if there are any issues. + + # Begin: transform to list with ID's in item. + # TODO: this transform can probably be removed and the internal format can change + # to use the same structure as the actual JSON. + local_json_data_compat = { + "version": "v1", + "blocklist": [], + "data": [ + {"id": pkg_idname, **value} + for pkg_idname, value in local_json_data.items() + ], + } + # End: compatibility change. + + fh.write(json.dumps(local_json_data_compat, indent=2)) + + def _json_data_refresh( + self, + *, + error_fn: Callable[[BaseException], None], + force: bool = False, + ) -> None: + if force or (self._pkg_manifest_remote is None) or (self._pkg_manifest_remote_mtime == 0): + self._pkg_manifest_remote = None + self._pkg_manifest_remote_mtime = 0 + self._pkg_manifest_local = None + + # Detect a local-only repository, there is no server to sync with + # so generate the JSON from the TOML files. + # While redundant this avoids having support multiple code-paths for local-only/remote repos. + if self.remote_url == "": + self._json_data_refresh_from_toml(error_fn=error_fn, force=force) + + filepath_json = os.path.join(self.directory, REPO_LOCAL_JSON) + mtime_test = file_mtime_or_none(filepath_json) + if self._pkg_manifest_remote is not None: + # TODO: check the time of every installed package. + if mtime_test == self._pkg_manifest_remote_mtime: + return + + try: + self._pkg_manifest_remote = json_from_filepath(filepath_json) + except BaseException as ex: + self._pkg_manifest_remote = None + error_fn(ex) + + self._pkg_manifest_local = None + if self._pkg_manifest_remote is not None: + json_mtime = file_mtime_or_none(filepath_json) + assert json_mtime is not None + self._pkg_manifest_remote_mtime = json_mtime + + def pkg_manifest_from_local_ensure( + self, + *, + error_fn: Callable[[BaseException], None], + ignore_missing: bool = False, + ) -> Optional[Dict[str, Dict[str, Any]]]: + # Important for local-only repositories (where the directory name defines the ID). + has_remote = self.remote_url != "" + + if self._pkg_manifest_local is None: + self._json_data_ensure( + ignore_missing=ignore_missing, + error_fn=error_fn, + ) + pkg_manifest_local = {} + try: + dir_entries = os.scandir(self.directory) + except BaseException as ex: + dir_entries = None + error_fn(ex) + + for entry in (dir_entries if dir_entries is not None else ()): + # Only check directories. + if not entry.is_dir(follow_symlinks=True): + continue + + filename = entry.name + + # Simply ignore these paths without any warnings (accounts for `.git`, `__pycache__`, etc). + if filename.startswith((".", "_")): + continue + + # Report any paths that cannot be used. + if not filename.isidentifier(): + error_fn(Exception("\"{:s}\" is not a supported module name, skipping".format( + os.path.join(self.directory, filename) + ))) + continue + + filepath_toml = os.path.join(self.directory, filename, PKG_MANIFEST_FILENAME_TOML) + try: + item_local = toml_from_filepath(filepath_toml) + except BaseException as ex: + item_local = None + error_fn(ex) + + if item_local is None: + continue + + pkg_idname = item_local["id"] + if has_remote: + # This should never happen, the user may have manually renamed a directory. + if pkg_idname != filename: + print("Skipping package with inconsistent name: \"{:s}\" mismatch \"{:s}\"".format( + filename, + pkg_idname, + )) + continue + else: + pkg_idname = filename + + # Validate so local-only packages with invalid manifests aren't used. + if (error_str := pkg_manifest_dict_is_valid_or_error(item_local, from_repo=False, strict=False)): + error_fn(Exception(error_str)) + continue + + pkg_manifest_local[pkg_idname] = item_local + self._pkg_manifest_local = pkg_manifest_local + return self._pkg_manifest_local + + def pkg_manifest_from_remote_ensure( + self, + *, + error_fn: Callable[[BaseException], None], + ignore_missing: bool = False, + ) -> Optional[Dict[str, Dict[str, Any]]]: + if self._pkg_manifest_remote is None: + self._json_data_ensure( + ignore_missing=ignore_missing, + error_fn=error_fn, + ) + return self._pkg_manifest_remote + + def force_local_refresh(self) -> None: + self._pkg_manifest_local = None + + +class RepoCacheStore: + __slots__ = ( + "_repos", + "_is_init", + ) + + def __init__(self) -> None: + self._repos: List[_RepoCacheEntry] = [] + self._is_init = False + + def is_init(self) -> bool: + return self._is_init + + def refresh_from_repos( + self, *, + repos: List[Tuple[str, str]], + force: bool = False, + ) -> None: + """ + Initialize or update repositories. + """ + repos_prev = {} + if not force: + for repo_entry in self._repos: + repos_prev[repo_entry.directory, repo_entry.remote_url] = repo_entry + self._repos.clear() + + for directory, remote_url in repos: + repo_entry_test = repos_prev.get((directory, remote_url)) + if repo_entry_test is None: + repo_entry_test = _RepoCacheEntry(directory, remote_url) + self._repos.append(repo_entry_test) + self._is_init = True + + def refresh_remote_from_directory( + self, + directory: str, + *, + error_fn: Callable[[BaseException], None], + force: bool = False, + ) -> None: + for repo_entry in self._repos: + if directory == repo_entry.directory: + repo_entry._json_data_refresh(force=force, error_fn=error_fn) + return + raise ValueError("Directory {:s} not a known repo".format(directory)) + + def refresh_local_from_directory( + self, + directory: str, + *, + error_fn: Callable[[BaseException], None], + ignore_missing: bool = False, + directory_subset: Optional[Set[str]] = None, + ) -> Optional[Dict[str, Dict[str, Any]]]: + for repo_entry in self._repos: + if directory == repo_entry.directory: + # Force refresh. + repo_entry.force_local_refresh() + return repo_entry.pkg_manifest_from_local_ensure( + ignore_missing=ignore_missing, + error_fn=error_fn, + ) + raise ValueError("Directory {:s} not a known repo".format(directory)) + + def pkg_manifest_from_remote_ensure( + self, + *, + error_fn: Callable[[BaseException], None], + check_files: bool = False, + ignore_missing: bool = False, + directory_subset: Optional[Set[str]] = None, + ) -> Generator[Optional[Dict[str, Dict[str, Any]]], None, None]: + for repo_entry in self._repos: + if directory_subset is not None: + if repo_entry.directory not in directory_subset: + continue + + json_data = repo_entry._json_data_ensure( + check_files=check_files, + ignore_missing=ignore_missing, + error_fn=error_fn, + ) + if json_data is None: + # The repository may be fresh, not yet initialized. + yield None + else: + pkg_manifest_remote = {} + # "data" should always exist, it's not the purpose of this function to fully validate though. + json_items = json_data.get("data") + if json_items is None: + error_fn(ValueError("JSON was missing \"data\" key")) + yield None + else: + for item_remote in json_items: + # TODO(@ideasman42): we may want to include the "id", as part of moving to a new format + # the "id" used not to be part of each item so users of this API assume it's not. + # The `item_remote` could be used in-place however that needs further testing. + item_remove_copy = item_remote.copy() + pkg_idname = item_remove_copy.pop("id") + pkg_manifest_remote[pkg_idname] = item_remove_copy + yield pkg_manifest_remote + + def pkg_manifest_from_local_ensure( + self, + *, + error_fn: Callable[[BaseException], None], + check_files: bool = False, + directory_subset: Optional[Set[str]] = None, + ) -> Generator[Optional[Dict[str, Dict[str, Any]]], None, None]: + for repo_entry in self._repos: + if directory_subset is not None: + if repo_entry.directory not in directory_subset: + continue + if check_files: + repo_entry.force_local_refresh() + yield repo_entry.pkg_manifest_from_local_ensure(error_fn=error_fn) + + def clear(self) -> None: + self._repos.clear() + self._is_init = False + + +# ----------------------------------------------------------------------------- +# Public Repo Lock +# + + +class RepoLock: + """ + Lock multiple repositories, one or all may fail, + it's up to the caller to check. + + Access via the ``RepoLockContext`` where possible to avoid the lock being left held. + """ + __slots__ = ( + "_repo_directories", + "_repo_lock_files", + "_cookie", + "_held", + ) + + def __init__(self, *, repo_directories: Sequence[str], cookie: str): + """ + :arg repo_directories: + Directories to attempt to lock. + :arg cookie: + A path which is used as a reference. + It must point to a path that exists. + When a lock exists, check if the cookie path exists, if it doesn't, allow acquiring the lock. + """ + self._repo_directories = tuple(repo_directories) + self._repo_lock_files: List[Tuple[str, str]] = [] + self._held = False + self._cookie = cookie + + def __del__(self) -> None: + if not self._held: + return + sys.stderr.write("{:s}: freed without releasing lock!".format(type(self).__name__)) + + @staticmethod + def _is_locked_with_stale_cookie_removal(local_lock_file: str, cookie: str) -> Optional[str]: + if os.path.exists(local_lock_file): + try: + with open(local_lock_file, "r", encoding="utf8") as fh: + data = fh.read() + except BaseException as ex: + return "lock file could not be read: {:s}".format(str(ex)) + + # The lock is held. + if os.path.exists(data): + if data == cookie: + return "lock is already held by this session" + return "lock is held by other session: {:s}".format(data) + + # The lock is held (but stale), remove it. + try: + os.remove(local_lock_file) + except BaseException as ex: + return "lock file could not be removed: {:s}".format(str(ex)) + return None + + def acquire(self) -> Dict[str, Optional[str]]: + """ + Return directories and the lock status, + with None if locking succeeded. + """ + if self._held: + raise Exception("acquire(): called with an existing lock!") + if not os.path.exists(self._cookie): + raise Exception("acquire(): cookie doesn't exist! (when it should)") + + # Assume all succeed. + result: Dict[str, Optional[str]] = {directory: None for directory in self._repo_directories} + for directory in self._repo_directories: + local_private_dir = os.path.join(directory, REPO_LOCAL_PRIVATE_DIR) + + # This most likely exists, create if it doesn't. + if not os.path.isdir(local_private_dir): + os.makedirs(local_private_dir) + + local_lock_file = os.path.join(local_private_dir, REPO_LOCAL_PRIVATE_LOCK) + # Attempt to get the lock, kick out stale locks. + if (lock_msg := self._is_locked_with_stale_cookie_removal(local_lock_file, self._cookie)) is not None: + result[directory] = "Lock exists: {:s}".format(lock_msg) + continue + try: + with open(local_lock_file, "w", encoding="utf8") as fh: + fh.write(self._cookie) + except BaseException as ex: + result[directory] = "Lock could not be created: {:s}".format(str(ex)) + # Remove if it was created (but failed to write)... disk-full? + try: + os.remove(local_lock_file) + except BaseException: + pass + continue + + # Success, the file is locked. + self._repo_lock_files.append((directory, local_lock_file)) + self._held = True + return result + + def release(self) -> Dict[str, Optional[str]]: + # NOTE: lots of error checks here, mostly to give insights in the very unlikely case this fails. + if not self._held: + raise Exception("release(): called without a lock!") + + result: Dict[str, Optional[str]] = {directory: None for directory in self._repo_directories} + for directory, local_lock_file in self._repo_lock_files: + if not os.path.exists(local_lock_file): + result[directory] = "release(): lock missing when expected, continuing." + continue + try: + with open(local_lock_file, "r", encoding="utf8") as fh: + data = fh.read() + except BaseException as ex: + result[directory] = "release(): lock file could not be read: {:s}".format(str(ex)) + continue + # Owned by another application, this shouldn't happen. + if data != self._cookie: + result[directory] = "release(): lock was unexpectedly stolen by another program: {:s}".format(data) + continue + + # This is our lock file, we're allowed to remove it! + try: + os.remove(local_lock_file) + except BaseException as ex: + result[directory] = "release(): failed to remove file {!r}".format(ex) + + self._held = False + return result + + +class RepoLockContext: + __slots__ = ( + "_repo_lock", + ) + + def __init__(self, *, repo_directories: Sequence[str], cookie: str): + self._repo_lock = RepoLock(repo_directories=repo_directories, cookie=cookie) + + def __enter__(self) -> Dict[str, Optional[str]]: + return self._repo_lock.acquire() + + def __exit__(self, _ty: Any, _value: Any, _traceback: Any) -> None: + self._repo_lock.release() diff --git a/scripts/addons_core/bl_pkg/cli/blender_ext.py b/scripts/addons_core/bl_pkg/cli/blender_ext.py new file mode 100755 index 00000000000..888edd075fc --- /dev/null +++ b/scripts/addons_core/bl_pkg/cli/blender_ext.py @@ -0,0 +1,2923 @@ +#!/usr/bin/env python +# SPDX-FileCopyrightText: 2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +Command for managing Blender extensions. +""" + + +import argparse +import contextlib +import hashlib # for SHA1 check-summing files. +import io +import json +import os +import re +import shutil +import signal # Override `Ctrl-C`. +import sys +import tomllib +import urllib.error # For `URLError`. +import urllib.parse # For `urljoin`. +import urllib.request # For accessing remote `https://` paths. +import zipfile + + +from typing import ( + Any, + Dict, + Generator, + IO, + Optional, + Sequence, + List, + Set, + Tuple, + Callable, + NamedTuple, + Union, +) + +ArgsSubparseFn = Callable[["argparse._SubParsersAction[argparse.ArgumentParser]"], None] + +REQUEST_EXIT = False + +# When set, ignore broken pipe exceptions (these occur when the calling processes is closed). +FORCE_EXIT_OK = False + +# Expect the remote URL to contain JSON (don't append the JSON name to the path). +# File-system still append the expected JSON filename. +REMOTE_REPO_HAS_JSON_IMPLIED = True + + +def signal_handler_sigint(_sig: int, _frame: Any) -> None: + # pylint: disable-next=global-statement + global REQUEST_EXIT + REQUEST_EXIT = True + + +signal.signal(signal.SIGINT, signal_handler_sigint) + + +# A primitive type that can be communicated via message passing. +PrimType = Union[int, str] +PrimTypeOrSeq = Union[PrimType, Sequence[PrimType]] + +MessageFn = Callable[[str, PrimTypeOrSeq], bool] + +VERSION = "0.1" + +PKG_EXT = ".zip" +# PKG_JSON_INFO = "bl_ext_repo.json" + +PKG_REPO_LIST_FILENAME = "bl_ext_repo.json" + +# Only for building. +PKG_MANIFEST_FILENAME_TOML = "blender_manifest.toml" + +# This directory is in the local repository. +REPO_LOCAL_PRIVATE_DIR = ".blender_ext" + +MESSAGE_TYPES = {'STATUS', 'PROGRESS', 'WARN', 'ERROR', 'PATH', 'DONE'} + +RE_MANIFEST_SEMVER = re.compile( + r'^' + r'(?P0|[1-9]\d*)\.' + r'(?P0|[1-9]\d*)\.' + r'(?P0|[1-9]\d*)' + r'(?:-(?P(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?' + r'(?:\+(?P[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$' +) + +# Ensure names (for example), don't contain control characters. +RE_CONTROL_CHARS = re.compile(r'[\x00-\x1f\x7f-\x9f]') + +# Progress updates are displayed after each chunk of this size is downloaded. +# Small values add unnecessary overhead showing progress, large values will make +# progress not update often enough. +# +# Note that this could be dynamic although it's not a priority. +# +# 16kb to be responsive even on slow connections. +CHUNK_SIZE_DEFAULT = 1 << 14 + +# Standard out may be communicating with a parent process, +# arbitrary prints are NOT acceptable. + + +# pylint: disable-next=redefined-builtin +def print(*args: Any, **kw: Dict[str, Any]) -> None: + raise Exception("Illegal print(*({!r}), **{{{!r}}})".format(args, kw)) + + +def debug_stack_trace_to_file() -> None: + """ + Debugging. + """ + import inspect + stack = inspect.stack(context=1) + with open("/tmp/out.txt", "w") as fh: + for frame_info in stack[1:]: + fh.write("{:s}:{:d}: {:s}\n".format( + frame_info.filename, + frame_info.lineno, + frame_info.function, + )) + + +def message_done(msg_fn: MessageFn) -> bool: + """ + Print a non-fatal warning. + """ + return msg_fn("DONE", "") + + +def message_warn(msg_fn: MessageFn, s: str) -> bool: + """ + Print a non-fatal warning. + """ + return msg_fn("WARN", s) + + +def message_error(msg_fn: MessageFn, s: str) -> bool: + """ + Print a fatal error. + """ + return msg_fn("ERROR", s) + + +def message_status(msg_fn: MessageFn, s: str) -> bool: + """ + Print a status message. + """ + return msg_fn("STATUS", s) + + +def message_path(msg_fn: MessageFn, s: str) -> bool: + """ + Print a path. + """ + return msg_fn("PATH", s) + + +def message_progress(msg_fn: MessageFn, s: str, progress: int, progress_range: int, unit: str) -> bool: + """ + Print a progress update. + """ + assert unit == 'BYTE' + return msg_fn("PROGRESS", (s, unit, progress, progress_range)) + + +def force_exit_ok_enable() -> None: + global FORCE_EXIT_OK + FORCE_EXIT_OK = True + # Without this, some errors are printed on exit. + sys.unraisablehook = lambda _ex: None + + +# ----------------------------------------------------------------------------- +# Generic Functions + +def read_with_timeout(fh: IO[bytes], size: int, *, timeout_in_seconds: float) -> Optional[bytes]: + # TODO: implement timeout (TimeoutError). + return fh.read(size) + + +class CleanupPathsContext: + __slots__ = ( + "files", + "directories", + ) + + def __init__(self, *, files: Sequence[str], directories: Sequence[str]) -> None: + self.files = files + self.directories = directories + + def __enter__(self) -> "CleanupPathsContext": + return self + + def __exit__(self, _ty: Any, _value: Any, _traceback: Any) -> None: + for f in self.files: + if not os.path.exists(f): + continue + try: + os.unlink(f) + except Exception: + pass + + for d in self.directories: + if not os.path.exists(d): + continue + try: + shutil.rmtree(d) + except Exception: + pass + + +# ----------------------------------------------------------------------------- +# Generic Functions + +class PkgRepoData(NamedTuple): + version: str + blocklist: List[str] + data: List[Dict[str, Any]] + + +class PkgManifest(NamedTuple): + """Package Information.""" + schema_version: str + id: str + name: str + tagline: str + version: str + type: str + maintainer: str + license: List[str] + blender_version_min: str + + # Optional (set all defaults). + blender_version_max: Optional[str] = None + website: Optional[str] = None + copyright: Optional[List[str]] = None + permissions: Optional[List[str]] = None + tags: Optional[List[str]] = None + wheels: Optional[List[str]] = None + + +class PkgManifest_Archive(NamedTuple): + """Package Information with archive information.""" + # NOTE: no support for default values (unlike `PkgManifest`). + manifest: PkgManifest + archive_size: int + archive_hash: str + archive_url: str + + +# ----------------------------------------------------------------------------- +# Generic Functions + +def random_acii_lines(*, seed: Union[int, str], width: int) -> Generator[str, None, None]: + """ + Generate random ASCII text [A-Za-z0-9]. + Intended not to compress well, it's possible to simulate downloading a large package. + """ + import random + import string + + chars_init = string.ascii_letters + string.digits + chars = chars_init + while len(chars) < width: + chars = chars + chars_init + + r = random.Random(seed) + chars_list = list(chars) + while True: + r.shuffle(chars_list) + yield "".join(chars_list[:width]) + + +def sha256_from_file(filepath: str, block_size: int = 1 << 20, hash_prefix: bool = False) -> Tuple[int, str]: + """ + Returns an arbitrary sized unique ASCII string based on the file contents. + (exact hashing method may change). + """ + with open(filepath, 'rb') as fh: + size = 0 + sha256 = hashlib.new('sha256') + while True: + data = fh.read(block_size) + if not data: + break + sha256.update(data) + size += len(data) + # Skip the `0x`. + return size, ("sha256:" + sha256.hexdigest()) if hash_prefix else sha256.hexdigest() + + +def scandir_recursive_impl( + base_path: str, + path: str, + *, + filter_fn: Callable[[str], bool], +) -> Generator[Tuple[str, str], None, None]: + """Recursively yield DirEntry objects for given directory.""" + for entry in os.scandir(path): + if entry.is_symlink(): + continue + + entry_path = entry.path + entry_path_relateive = os.path.relpath(entry_path, base_path) + + if not filter_fn(entry_path_relateive): + continue + + if entry.is_dir(): + yield from scandir_recursive_impl( + base_path, + entry_path, + filter_fn=filter_fn, + ) + elif entry.is_file(): + yield entry_path, entry_path_relateive + + +def scandir_recursive( + path: str, + filter_fn: Callable[[str], bool], +) -> Generator[Tuple[str, str], None, None]: + yield from scandir_recursive_impl(path, path, filter_fn=filter_fn) + + +def filepath_skip_compress(filepath: str) -> bool: + """ + Return true when this file shouldn't be compressed while archiving. + Speeds up archive creation, especially for large ``*.whl`` files. + """ + # NOTE: for now use simple extension check, we could check the magic number too. + return filepath.lower().endswith(( + # Python wheels. + ".whl", + # Archives (exclude historic formats: `*.arj`, `*.lha` ... etc). + ".bz2", + ".gz", + ".lz4", + ".lzma", + ".rar", + ".xz", + ".zip", + ".zst", + # TAR combinations. + ".tbz2", + ".tgz", + ".txz", + ".tzst", + )) + + +def pkg_manifest_from_dict_and_validate_impl( + data: Dict[Any, Any], + *, + from_repo: bool, + all_errors: bool, + strict: bool, +) -> Union[PkgManifest, List[str]]: + error_list = [] + # Validate the dictionary. + if all_errors: + if (x := pkg_manifest_is_valid_or_error_all(data, from_repo=from_repo, strict=strict)) is not None: + error_list.extend(x) + else: + if (error_msg := pkg_manifest_is_valid_or_error(data, from_repo=from_repo, strict=strict)) is not None: + error_list.append(error_msg) + if not all_errors: + return error_list + + values: List[str] = [] + for key in PkgManifest._fields: + val = data.get(key, ...) + if val is ...: + val = PkgManifest._field_defaults.get(key, ...) + # `pkg_manifest_is_valid_or_error{_all}` will have caught this, assert all the same. + assert val is not ... + values.append(val) + + kw_args: Dict[str, Any] = dict(zip(PkgManifest._fields, values, strict=True)) + manifest = PkgManifest(**kw_args) + + if error_list: + assert all_errors + return error_list + + # There could be other validation, leave these as-is. + return manifest + + +def pkg_manifest_from_dict_and_validate( + data: Dict[Any, Any], + from_repo: bool, + strict: bool, +) -> Union[PkgManifest, str]: + manifest = pkg_manifest_from_dict_and_validate_impl(data, from_repo=from_repo, all_errors=False, strict=strict) + if isinstance(manifest, list): + return manifest[0] + return manifest + + +def pkg_manifest_from_dict_and_validate_all_errros( + data: Dict[Any, Any], + from_repo: bool, + strict: bool, +) -> Union[PkgManifest, List[str]]: + """ + Validate the manifest and return all errors. + """ + return pkg_manifest_from_dict_and_validate_impl(data, from_repo=from_repo, all_errors=True, strict=strict) + + +def pkg_manifest_archive_from_dict_and_validate( + data: Dict[Any, Any], + strict: bool, +) -> Union[PkgManifest_Archive, str]: + manifest = pkg_manifest_from_dict_and_validate(data, from_repo=True, strict=strict) + if isinstance(manifest, str): + return manifest + + assert isinstance(manifest, PkgManifest) + return PkgManifest_Archive( + manifest=manifest, + archive_size=data["archive_size"], + # Repositories that use their own hash generation may use capitals, + # ensure always lowercase for comparison (hashes generated here are always lowercase). + archive_hash=data["archive_hash"].lower(), + archive_url=data["archive_url"], + ) + + +def pkg_manifest_from_toml_and_validate_all_errors( + filepath: str, + strict: bool, +) -> Union[PkgManifest, List[str]]: + """ + This function is responsible for not letting invalid manifest from creating packages with ID names + or versions that would not properly install. + + The caller is expected to use exception handling and forward any errors to the user. + """ + try: + with open(filepath, "rb") as fh: + data = tomllib.load(fh) + except Exception as ex: + return [str(ex)] + + return pkg_manifest_from_dict_and_validate_all_errros(data, from_repo=False, strict=strict) + + +def pkg_zipfile_detect_subdir_or_none( + zip_fh: zipfile.ZipFile, +) -> Optional[str]: + if PKG_MANIFEST_FILENAME_TOML in zip_fh.NameToInfo: + return "" + # Support one directory containing the expected TOML. + # ZIP's always use "/" (not platform specific). + test_suffix = "/" + PKG_MANIFEST_FILENAME_TOML + + base_dir = None + for filename in zip_fh.NameToInfo.keys(): + if filename.startswith("."): + continue + if not filename.endswith(test_suffix): + continue + # Only a single directory (for sanity sake). + if filename.find("/", len(filename) - len(test_suffix)) == -1: + continue + + # Multiple packages in a single archive, bail out as this is not a supported scenario. + if base_dir is not None: + base_dir = None + break + + # Don't break in case there are multiple, in that case this function should return None. + base_dir = filename[:-len(PKG_MANIFEST_FILENAME_TOML)] + + return base_dir + + +def pkg_manifest_from_zipfile_and_validate_impl( + zip_fh: zipfile.ZipFile, + archive_subdir: str, + all_errors: bool, + strict: bool, +) -> Union[PkgManifest, List[str]]: + """ + Validate the manifest and return all errors. + """ + # `archive_subdir` from `pkg_zipfile_detect_subdir_or_none`. + assert archive_subdir == "" or archive_subdir.endswith("/") + + try: + file_content = zip_fh.read(archive_subdir + PKG_MANIFEST_FILENAME_TOML) + except KeyError: + # TODO: check if there is a nicer way to handle this? + # From a quick look there doesn't seem to be a good way + # to do this using public methods. + file_content = None + + if file_content is None: + return ["Archive does not contain a manifest"] + + manifest_dict = toml_from_bytes(file_content) + assert isinstance(manifest_dict, dict) + + # TODO: forward actual error. + if manifest_dict is None: + return ["Archive does not contain a manifest"] + return pkg_manifest_from_dict_and_validate_impl( + manifest_dict, + from_repo=False, + all_errors=all_errors, + strict=strict, + ) + + +def pkg_manifest_from_zipfile_and_validate( + zip_fh: zipfile.ZipFile, + archive_subdir: str, + strict: bool, +) -> Union[PkgManifest, str]: + manifest = pkg_manifest_from_zipfile_and_validate_impl( + zip_fh, + archive_subdir, + all_errors=False, + strict=strict, + ) + if isinstance(manifest, list): + return manifest[0] + return manifest + + +def pkg_manifest_from_zipfile_and_validate_all_errors( + zip_fh: zipfile.ZipFile, + archive_subdir: str, + strict: bool, +) -> Union[PkgManifest, List[str]]: + return pkg_manifest_from_zipfile_and_validate_impl( + zip_fh, + archive_subdir, + all_errors=True, + strict=strict, + ) + + +def pkg_manifest_from_archive_and_validate( + filepath: str, + strict: bool, +) -> Union[PkgManifest, str]: + try: + zip_fh_context = zipfile.ZipFile(filepath, mode="r") + except BaseException as ex: + return "Error extracting archive \"{:s}\"".format(str(ex)) + + with contextlib.closing(zip_fh_context) as zip_fh: + if (archive_subdir := pkg_zipfile_detect_subdir_or_none(zip_fh)) is None: + return "Archive has no manifest: \"{:s}\"".format(PKG_MANIFEST_FILENAME_TOML) + return pkg_manifest_from_zipfile_and_validate(zip_fh, archive_subdir, strict=strict) + + +def remote_url_get(url: str) -> str: + if REMOTE_REPO_HAS_JSON_IMPLIED: + return url + return urllib.parse.urljoin(url, PKG_REPO_LIST_FILENAME) + + +# ----------------------------------------------------------------------------- +# ZipFile Helpers + +def zipfile_make_root_directory( + zip_fh: zipfile.ZipFile, + root_dir: str, +) -> None: + """ + Make ``root_dir`` the new root of this ``zip_fh``, remove all other files. + """ + # WARNING: this works but it's not pretty, + # alternative solutions involve duplicating too much of ZipFile's internal logic. + assert root_dir.endswith("/") + filelist = zip_fh.filelist + filelist_copy = filelist[:] + filelist.clear() + for member in filelist_copy: + filename = member.filename + if not filename.startswith(root_dir): + continue + # Ensure the path is not _ony_ the directory (can happen for some ZIP files). + if not (filename := filename[len(root_dir):]): + continue + + member.filename = filename + filelist.append(member) + + +# ----------------------------------------------------------------------------- +# URL Downloading + +# Originally based on `urllib.request.urlretrieve`. + + +def url_retrieve_to_data_iter( + url: str, + *, + data: Optional[Any] = None, + headers: Dict[str, str], + chunk_size: int, + timeout_in_seconds: float, +) -> Generator[Tuple[bytes, int, Any], None, None]: + """ + Retrieve a URL into a temporary location on disk. + + Requires a URL argument. If a filename is passed, it is used as + the temporary file location. The reporthook argument should be + a callable that accepts a block number, a read size, and the + total file size of the URL target. The data argument should be + valid URL encoded data. + + If a filename is passed and the URL points to a local resource, + the result is a copy from local file to new file. + + Returns a tuple containing the path to the newly created + data file as well as the resulting HTTPMessage object. + """ + from urllib.error import ContentTooShortError + from urllib.request import urlopen + + request = urllib.request.Request( + url, + data=data, + headers=headers, + ) + + with contextlib.closing(urlopen(request, timeout=timeout_in_seconds)) as fp: + response_headers = fp.info() + + size = -1 + read = 0 + if "content-length" in response_headers: + size = int(response_headers["Content-Length"]) + + yield (b'', size, response_headers) + + if timeout_in_seconds == -1.0: + while True: + block = fp.read(chunk_size) + if not block: + break + read += len(block) + yield (block, size, response_headers) + else: + while True: + block = read_with_timeout(fp, chunk_size, timeout_in_seconds=timeout_in_seconds) + if not block: + break + read += len(block) + yield (block, size, response_headers) + + if size >= 0 and read < size: + raise ContentTooShortError( + "retrieval incomplete: got only %i out of %i bytes" % (read, size), + response_headers, + ) + + +def url_retrieve_to_filepath_iter( + url: str, + filepath: str, + *, + headers: Dict[str, str], + data: Optional[Any] = None, + chunk_size: int, + timeout_in_seconds: float, +) -> Generator[Tuple[int, int, Any], None, None]: + # Handle temporary file setup. + with open(filepath, 'wb') as fh_output: + for block, size, response_headers in url_retrieve_to_data_iter( + url, + headers=headers, + data=data, + chunk_size=chunk_size, + timeout_in_seconds=timeout_in_seconds, + ): + fh_output.write(block) + yield (len(block), size, response_headers) + + +def filepath_retrieve_to_filepath_iter( + filepath_src: str, + filepath: str, + *, + chunk_size: int, + timeout_in_seconds: float, +) -> Generator[Tuple[int, int], None, None]: + # TODO: `timeout_in_seconds`. + # Handle temporary file setup. + with open(filepath_src, 'rb') as fh_input: + size = os.fstat(fh_input.fileno()).st_size + with open(filepath, 'wb') as fh_output: + while (block := fh_input.read(chunk_size)): + fh_output.write(block) + yield (len(block), size) + + +def url_retrieve_to_data_iter_or_filesystem( + path: str, + is_filesystem: bool, + headers: Dict[str, str], + chunk_size: int, + timeout_in_seconds: float, +) -> Generator[bytes, None, None]: + if is_filesystem: + with open(path, "rb") as fh_source: + while (block := fh_source.read(chunk_size)): + yield block + else: + for ( + block, + _size, + _response_headers, + ) in url_retrieve_to_data_iter( + path, + headers=headers, + chunk_size=chunk_size, + timeout_in_seconds=timeout_in_seconds, + ): + yield block + + +def url_retrieve_to_filepath_iter_or_filesystem( + path: str, + filepath: str, + is_filesystem: bool, + headers: Dict[str, str], + chunk_size: int, + timeout_in_seconds: float, +) -> Generator[Tuple[int, int], None, None]: + if is_filesystem: + yield from filepath_retrieve_to_filepath_iter( + path, + filepath, + chunk_size=chunk_size, + timeout_in_seconds=timeout_in_seconds, + ) + else: + for (read, size, _response_headers) in url_retrieve_to_filepath_iter( + path, + filepath, + headers=headers, + chunk_size=chunk_size, + timeout_in_seconds=timeout_in_seconds, + ): + yield (read, size) + + +def pkg_idname_is_valid_or_error(pkg_idname: str) -> Optional[str]: + if not pkg_idname.isidentifier(): + return "Not a valid identifier" + if "__" in pkg_idname: + return "Only single separators are supported" + if pkg_idname.startswith("_"): + return "Names must not start with a \"_\"" + if pkg_idname.endswith("_"): + return "Names must not end with a \"_\"" + return None + + +# ----------------------------------------------------------------------------- +# Manifest Validation (Generic Callbacks) +# +# NOTE: regarding the `strict` argument, this was added because we may want to tighten +# guidelines without causing existing repositories to fail. +# +# Strict is used: +# - When building packages. +# - When validating packages from the command line. +# +# However manifests from severs that don't adhere to strict rules are not prevented from loading. + +def pkg_manifest_validate_field_nop( + value: Any, + strict: bool, +) -> Optional[str]: + _ = strict, value + return None + + +def pkg_manifest_validate_field_any_non_empty_string( + value: str, + strict: bool, +) -> Optional[str]: + _ = strict + if not value.strip(): + return "A non-empty string expected" + return None + + +def pkg_manifest_validate_field_any_non_empty_string_stripped_no_control_chars( + value: str, + strict: bool, +) -> Optional[str]: + _ = strict + value_strip = value.strip() + if not value_strip: + return "a non-empty string expected" + if value != value_strip: + return "text without leading/trailing white space expected" + for _ in RE_CONTROL_CHARS.finditer(value): + return "text without any control characters expected" + return None + + +def pkg_manifest_validate_field_any_list_of_non_empty_strings(value: List[Any], strict: bool) -> Optional[str]: + _ = strict + for i, tag in enumerate(value): + if not isinstance(tag, str): + return "at index {:d} must be a string not a {:s}".format(i, str(type(tag))) + if not tag.strip(): + return "at index {:d} must be a non-empty string".format(i) + return None + + +def pkg_manifest_validate_field_any_non_empty_list_of_non_empty_strings( + value: List[Any], + strict: bool, +) -> Optional[str]: + if not value: + return "list may not be empty" + + return pkg_manifest_validate_field_any_list_of_non_empty_strings(value, strict) + + +def pkg_manifest_validate_field_any_version( + value: str, + strict: bool, +) -> Optional[str]: + _ = strict + if not RE_MANIFEST_SEMVER.match(value): + return "to be a semantic-version, found {!r}".format(value) + return None + + +def pkg_manifest_validate_field_any_version_primitive( + value: str, + strict: bool, +) -> Optional[str]: + _ = strict + # Parse simple `1.2.3`, `1.2` & `1` numbers. + for number in value.split("."): + if not number.isdigit(): + return "must be numbers separated by single \".\" characters, found \"{:s}\"".format(value) + return None + + +def pkg_manifest_validate_field_any_version_primitive_or_empty( + value: str, + strict: bool, +) -> Optional[str]: + if value: + return pkg_manifest_validate_field_any_version_primitive(value, strict) + return None + +# ----------------------------------------------------------------------------- +# Manifest Validation (Specific Callbacks) + + +def pkg_manifest_validate_field_idname(value: str, strict: bool) -> Optional[str]: + _ = strict + return pkg_idname_is_valid_or_error(value) + + +def pkg_manifest_validate_field_type(value: str, strict: bool) -> Optional[str]: + _ = strict + # NOTE: add "keymap" in the future. + value_expected = {"add-on", "theme"} + if value not in value_expected: + return "Expected to be one of [{:s}], found {!r}".format(", ".join(value_expected), value) + return None + + +def pkg_manifest_validate_field_tagline(value: str, strict: bool) -> Optional[str]: + if strict: + if (error := pkg_manifest_validate_field_any_non_empty_string_stripped_no_control_chars(value, strict)) is not None: + return error + + # Additional requirements. + if len(value) > 64: + return "a value no longer than 64 characters expected, found {:d}".format(len(value)) + # As we don't have a reliable (unicode aware) punctuation check, just check the last character is alpha/numeric. + if value[-1].isalnum(): + pass # OK. + elif value[-1] in {")", "]", "}"}: + pass # Allow closing brackets (sometimes used to mention formats). + else: + return "alpha-numeric suffix expected, the string must not end with punctuation" + else: + if (error := pkg_manifest_validate_field_any_non_empty_string(value, strict)) is not None: + return error + + return None + + +def pkg_manifest_validate_field_wheels( + value: List[Any], + strict: bool, +) -> Optional[str]: + if (error := pkg_manifest_validate_field_any_list_of_non_empty_strings(value, strict)) is not None: + return error + # Enforce naming spec: + # https://packaging.python.org/en/latest/specifications/binary-distribution-format/#file-name-convention + # This also defines the name spec: + filename_spec = "{distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl" + + for wheel in value: + if "\\" in wheel: + return "wheel paths must use forward slashes, found {!r}".format(wheel) + + wheel_filename = os.path.basename(wheel) + if not wheel_filename.lower().endswith(".whl"): + return "wheel paths must end with \".whl\", found {!r}".format(wheel) + + wheel_filename_split = wheel_filename.split("-") + if not (5 <= len(wheel_filename_split) <= 6): + return "wheel filename must follow the spec \"{:s}\", found {!r}".format(filename_spec, wheel_filename) + + return None + + +def pkg_manifest_validate_field_archive_size( + value: int, + strict: bool, +) -> Optional[str]: + _ = strict + if value <= 0: + return "to be a positive integer, found {!r}".format(value) + return None + + +def pkg_manifest_validate_field_archive_hash( + value: str, + strict: bool, +) -> Optional[str]: + _ = strict + import string + # Expect: `sha256:{HASH}`. + # In the future we may support multiple hash types. + value_hash_type, value_sep, x_val_hash_data = value.partition(":") + if not value_sep: + return "Must have a \":\" separator {!r}".format(value) + del value_sep + if value_hash_type == "sha256": + if len(x_val_hash_data) != 64 or x_val_hash_data.strip(string.hexdigits): + return "Must be 64 hex-digits, found {!r}".format(value) + else: + return "Must use a prefix in [\"sha256\"], found {!r}".format(value_hash_type) + return None + + +# Keep in sync with `PkgManifest`. +# key, type, check_fn. +pkg_manifest_known_keys_and_types: Tuple[ + Tuple[str, type, Callable[[Any, bool], Optional[str]]], + ..., +] = ( + ("id", str, pkg_manifest_validate_field_idname), + ("schema_version", str, pkg_manifest_validate_field_any_version), + ("name", str, pkg_manifest_validate_field_any_non_empty_string_stripped_no_control_chars), + ("tagline", str, pkg_manifest_validate_field_tagline), + ("version", str, pkg_manifest_validate_field_any_version), + ("type", str, pkg_manifest_validate_field_type), + ("maintainer", str, pkg_manifest_validate_field_any_non_empty_string_stripped_no_control_chars), + ("license", list, pkg_manifest_validate_field_any_non_empty_list_of_non_empty_strings), + ("blender_version_min", str, pkg_manifest_validate_field_any_version_primitive), + + # Optional. + ("blender_version_max", str, pkg_manifest_validate_field_any_version_primitive_or_empty), + ("website", str, pkg_manifest_validate_field_any_non_empty_string_stripped_no_control_chars), + ("copyright", list, pkg_manifest_validate_field_any_non_empty_list_of_non_empty_strings), + ("permissions", list, pkg_manifest_validate_field_any_list_of_non_empty_strings), + ("tags", list, pkg_manifest_validate_field_any_non_empty_list_of_non_empty_strings), + ("wheels", list, pkg_manifest_validate_field_wheels), +) + +# Keep in sync with `PkgManifest_Archive`. +pkg_manifest_known_keys_and_types_from_repo: Tuple[ + Tuple[str, type, Callable[[Any, bool], Optional[str]]], + ..., +] = ( + ("archive_size", int, pkg_manifest_validate_field_archive_size), + ("archive_hash", str, pkg_manifest_validate_field_archive_hash), + ("archive_url", str, pkg_manifest_validate_field_nop), +) + + +# ----------------------------------------------------------------------------- +# Manifest Validation + +def pkg_manifest_is_valid_or_error_impl( + data: Dict[str, Any], + *, + from_repo: bool, + all_errors: bool, + strict: bool, +) -> Optional[List[str]]: + if not isinstance(data, dict): + return ["Expected value to be a dict, not a {!r}".format(type(data))] + + assert len(pkg_manifest_known_keys_and_types) == len(PkgManifest._fields) + # -1 because the manifest is an item. + assert len(pkg_manifest_known_keys_and_types_from_repo) == len(PkgManifest_Archive._fields) - 1 + + error_list = [] + + value_extract: Dict[str, Optional[object]] = {} + for known_types in ( + (pkg_manifest_known_keys_and_types, pkg_manifest_known_keys_and_types_from_repo) if from_repo else + (pkg_manifest_known_keys_and_types, ) + ): + for x_key, x_ty, x_check_fn in known_types: + is_default_value = False + x_val = data.get(x_key, ...) + if x_val is ...: + x_val = PkgManifest._field_defaults.get(x_key, ...) + if from_repo: + if x_val is ...: + x_val = PkgManifest_Archive._field_defaults.get(x_key, ...) + if x_val is ...: + error_list.append("missing \"{:s}\"".format(x_key)) + if not all_errors: + return error_list + else: + is_default_value = True + value_extract[x_key] = x_val + continue + + # When the default value is None, skip all type checks. + if not (is_default_value and x_val is None): + if x_ty is None: + pass + elif isinstance(x_val, x_ty): + pass + else: + error_list.append("\"{:s}\" must be a {:s}, not a {:s}".format( + x_key, + x_ty.__name__, + type(x_val).__name__, + )) + if not all_errors: + return error_list + continue + + if (error_msg := x_check_fn(x_val, strict)) is not None: + error_list.append("key \"{:s}\" invalid: {:s}".format(x_key, error_msg)) + if not all_errors: + return error_list + continue + + value_extract[x_key] = x_val + + if error_list: + assert all_errors + return error_list + + return None + + +def pkg_manifest_is_valid_or_error( + data: Dict[str, Any], + *, + from_repo: bool, + strict: bool, +) -> Optional[str]: + error_list = pkg_manifest_is_valid_or_error_impl( + data, + from_repo=from_repo, + all_errors=False, + strict=strict, + ) + if isinstance(error_list, list): + return error_list[0] + return None + + +def pkg_manifest_is_valid_or_error_all( + data: Dict[str, Any], + *, + from_repo: bool, + strict: bool, +) -> Optional[List[str]]: + return pkg_manifest_is_valid_or_error_impl( + data, + from_repo=from_repo, + all_errors=True, + strict=strict, + ) + + +# ----------------------------------------------------------------------------- +# Standalone Utilities + + +def url_request_headers_create(*, accept_json: bool, user_agent: str) -> Dict[str, str]: + headers = {} + if accept_json: + # Default for JSON requests this allows top-level URL's to be used. + headers["Accept"] = "application/json" + + if user_agent: + # Typically: `Blender/4.2.0 (Linux x84_64; cycle=alpha)`. + headers["User-Agent"] = user_agent + return headers + + +def repo_json_is_valid_or_error(filepath: str) -> Optional[str]: + if not os.path.exists(filepath): + return "File missing: " + filepath + + try: + with open(filepath, "r", encoding="utf-8") as fh: + result = json.load(fh) + except BaseException as ex: + return str(ex) + + if not isinstance(result, dict): + return "Expected a dictionary, not a {!r}".format(type(result)) + + if (value := result.get("version")) is None: + return "Expected a \"version\" key which was not found" + if not isinstance(value, str): + return "Expected \"version\" value to be a version string" + + if (value := result.get("blocklist")) is not None: + if not isinstance(value, list): + return "Expected \"blocklist\" to be a list, not a {:s}".format(str(type(value))) + for item in value: + if isinstance(item, str): + continue + return "Expected \"blocklist\" to be a list of strings, found {:s}".format(str(type(item))) + + if (value := result.get("data")) is None: + return "Expected a \"data\" key which was not found" + if not isinstance(value, list): + return "Expected \"data\" value to be a list" + + for i, item in enumerate(value): + + if (pkg_idname := item.get("id")) is None: + return "Expected item at index {:d} to have an \"id\"".format(i) + + if not isinstance(pkg_idname, str): + return "Expected item at index {:d} to have a string id, not a {!r}".format(i, type(pkg_idname)) + + if (error_msg := pkg_idname_is_valid_or_error(pkg_idname)) is not None: + return "Expected key at index {:d} to be an identifier, \"{:s}\" failed: {:s}".format( + i, pkg_idname, error_msg, + ) + + if (error_msg := pkg_manifest_is_valid_or_error(item, from_repo=True, strict=False)) is not None: + return "Error at index {:d}: {:s}".format(i, error_msg) + + return None + + +def pkg_manifest_toml_is_valid_or_error(filepath: str, strict: bool) -> Tuple[Optional[str], Dict[str, Any]]: + if not os.path.exists(filepath): + return "File missing: " + filepath, {} + + try: + with open(filepath, "rb") as fh: + result = tomllib.load(fh) + except BaseException as ex: + return str(ex), {} + + error = pkg_manifest_is_valid_or_error(result, from_repo=False, strict=strict) + if error is not None: + return error, {} + return None, result + + +def toml_from_bytes(data: bytes) -> Optional[Dict[str, Any]]: + result = tomllib.loads(data.decode('utf-8')) + assert isinstance(result, dict) + return result + + +def toml_from_filepath(filepath: str) -> Optional[Dict[str, Any]]: + with open(filepath, "rb") as fh: + data = fh.read() + result = toml_from_bytes(data) + return result + + +def repo_local_private_dir(*, local_dir: str) -> str: + """ + Ensure the repos hidden directory exists. + """ + return os.path.join(local_dir, REPO_LOCAL_PRIVATE_DIR) + + +def repo_local_private_dir_ensure(*, local_dir: str) -> str: + """ + Ensure the repos hidden directory exists. + """ + local_private_dir = repo_local_private_dir(local_dir=local_dir) + if not os.path.isdir(local_private_dir): + os.mkdir(local_private_dir) + return local_private_dir + + +def repo_local_private_dir_ensure_with_subdir(*, local_dir: str, subdir: str) -> str: + """ + Return a local directory used to cache package downloads. + """ + local_private_subdir = os.path.join(repo_local_private_dir_ensure(local_dir=local_dir), subdir) + if not os.path.isdir(local_private_subdir): + os.mkdir(local_private_subdir) + return local_private_subdir + + +def repo_sync_from_remote( + *, + msg_fn: MessageFn, + remote_url: str, + local_dir: str, + online_user_agent: str, + timeout_in_seconds: float, + extension_override: str, +) -> bool: + """ + Load package information into the local path. + """ + request_exit = False + request_exit |= message_status(msg_fn, "Sync repo: {:s}".format(remote_url)) + if request_exit: + return False + + is_repo_filesystem = repo_is_filesystem(remote_url=remote_url) + if is_repo_filesystem: + remote_json_path = os.path.join(remote_url, PKG_REPO_LIST_FILENAME) + else: + remote_json_path = remote_url_get(remote_url) + + local_private_dir = repo_local_private_dir_ensure(local_dir=local_dir) + local_json_path = os.path.join(local_private_dir, PKG_REPO_LIST_FILENAME) + local_json_path_temp = local_json_path + "@" + + assert extension_override != "@" + if extension_override: + local_json_path = local_json_path + extension_override + + if os.path.exists(local_json_path_temp): + os.unlink(local_json_path_temp) + + with CleanupPathsContext(files=(local_json_path_temp,), directories=()): + # TODO: time-out. + request_exit |= message_status(msg_fn, "Sync downloading remote data") + if request_exit: + return False + + # No progress for file copying, assume local file system is fast enough. + # `shutil.copyfile(remote_json_path, local_json_path_temp)`. + try: + read_total = 0 + for (read, size) in url_retrieve_to_filepath_iter_or_filesystem( + remote_json_path, + local_json_path_temp, + is_filesystem=is_repo_filesystem, + headers=url_request_headers_create(accept_json=True, user_agent=online_user_agent), + chunk_size=CHUNK_SIZE_DEFAULT, + timeout_in_seconds=timeout_in_seconds, + ): + request_exit |= message_progress(msg_fn, "Downloading...", read_total, size, 'BYTE') + if request_exit: + break + read_total += read + del read_total + + except FileNotFoundError as ex: + message_error(msg_fn, "sync: file-not-found ({:s}) reading {!r}!".format(str(ex), remote_url)) + return False + except TimeoutError as ex: + message_error(msg_fn, "sync: timeout ({:s}) reading {!r}!".format(str(ex), remote_url)) + return False + except urllib.error.URLError as ex: + message_error(msg_fn, "sync: URL error ({:s}) reading {!r}!".format(str(ex), remote_url)) + return False + except BaseException as ex: + message_error(msg_fn, "sync: unexpected error ({:s}) reading {!r}!".format(str(ex), remote_url)) + return False + + if request_exit: + return False + + error_msg = repo_json_is_valid_or_error(local_json_path_temp) + if error_msg is not None: + message_error(msg_fn, "sync: invalid manifest ({:s}) reading {!r}!".format(error_msg, remote_url)) + return False + del error_msg + + request_exit |= message_status(msg_fn, "Sync complete: {:s}".format(remote_url)) + if request_exit: + return False + + if os.path.exists(local_json_path): + os.unlink(local_json_path) + + # If this is a valid JSON, overwrite the existing file. + os.rename(local_json_path_temp, local_json_path) + + if extension_override: + request_exit |= message_path(msg_fn, os.path.relpath(local_json_path, local_dir)) + + return True + + +def repo_pkginfo_from_local(*, local_dir: str) -> Optional[Dict[str, Any]]: + """ + Load package cache. + """ + local_private_dir = repo_local_private_dir(local_dir=local_dir) + local_json_path = os.path.join(local_private_dir, PKG_REPO_LIST_FILENAME) + if not os.path.exists(local_json_path): + return None + + with open(local_json_path, "r", encoding="utf-8") as fh: + result = json.load(fh) + assert isinstance(result, dict) + + return result + + +def pkg_repo_dat_from_json(json_data: Dict[str, Any]) -> PkgRepoData: + result_new = PkgRepoData( + version=json_data.get("version", "v1"), + blocklist=json_data.get("blocklist", []), + data=json_data.get("data", []), + ) + return result_new + + +def repo_pkginfo_from_local_with_idname_as_key(*, local_dir: str) -> Optional[PkgRepoData]: + result = repo_pkginfo_from_local(local_dir=local_dir) + if result is None: + return None + return pkg_repo_dat_from_json(result) + + +def repo_is_filesystem(*, remote_url: str) -> bool: + if remote_url.startswith(("https://", "http://")): + return False + return True + + +# ----------------------------------------------------------------------------- +# Generate Argument Handlers + +def arg_handle_int_as_bool(value: str) -> bool: + result = int(value) + if result not in {0, 1}: + raise argparse.ArgumentTypeError("Expected a 0 or 1") + return bool(result) + + +def arg_handle_str_as_package_names(value: str) -> Sequence[str]: + result = value.split(",") + for pkg_idname in result: + if (error_msg := pkg_idname_is_valid_or_error(pkg_idname)) is not None: + raise argparse.ArgumentTypeError("Invalid name \"{:s}\". {:s}".format(pkg_idname, error_msg)) + return result + + +# ----------------------------------------------------------------------------- +# Generate Repository + + +def generic_arg_package_list_positional(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + dest="packages", + type=str, + help=( + "The packages to operate on (separated by ``,`` without spaces)." + ), + ) + + +def generic_arg_file_list_positional(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + dest="files", + type=str, + nargs="+", + help=( + "The files to operate on (one or more arguments)." + ), + ) + + +def generic_arg_repo_dir(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + "--repo-dir", + dest="repo_dir", + type=str, + help=( + "The remote repository directory." + ), + required=True, + ) + + +def generic_arg_remote_url(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + "--remote-url", + dest="remote_url", + type=str, + help=( + "The remote repository URL." + ), + required=True, + ) + + +def generic_arg_local_dir(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + "--local-dir", + dest="local_dir", + type=str, + help=( + "The local checkout." + ), + required=True, + ) + + +# Only for authoring. +def generic_arg_package_source_path_positional(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + dest="source_path", + type=str, + nargs="?", + default=".", + metavar="SOURCE_PATH", + help=( + "The package source path " + "(either directory containing package files or the package archive).\n" + "This path must containing a ``{:s}`` manifest.\n" + "\n" + "The current directory ``.`` is default.".format(PKG_MANIFEST_FILENAME_TOML) + ), + ) + + +def generic_arg_package_source_dir(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + "--source-dir", + dest="source_dir", + type=str, + help=( + "The package source directory containing a ``{:s}`` manifest.".format(PKG_MANIFEST_FILENAME_TOML) + ), + default=".", + ) + + +def generic_arg_package_output_dir(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + "--output-dir", + dest="output_dir", + type=str, + help=( + "The package output directory." + ), + default=".", + ) + + +def generic_arg_package_output_filepath(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + "--output-filepath", + dest="output_filepath", + type=str, + help=( + "The package output filepath (should include a ``{:s}`` extension).".format(PKG_EXT) + ), + default=".", + ) + + +def generic_arg_output_type(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + "--output-type", + dest="output_type", + type=str, + choices=('TEXT', 'JSON', 'JSON_0'), + default='TEXT', + help=( + "The output type:\n" + "\n" + "- TEXT: Plain text.\n" + "- JSON: Separated by new-lines.\n" + "- JSON_0: Separated null bytes.\n" + ), + required=False, + ) + + +def generic_arg_local_cache(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + "--local-cache", + dest="local_cache", + type=arg_handle_int_as_bool, + help=( + "Use local cache, when disabled packages always download from remote." + ), + default=True, + required=False, + ) + + +def generic_arg_online_user_agent(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + "--online-user-agent", + dest="online_user_agent", + type=str, + help=( + "Use user-agent used for making web requests. " + "Some web sites will reject requests when unset." + ), + default="", + required=False, + ) + + +def generic_arg_timeout(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + "--timeout", + dest="timeout", + type=float, + help=( + "Timeout when reading from remote location." + ), + default=10.0, + required=False, + ) + + +def generic_arg_ignore_broken_pipe(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + "--force-exit-ok", + dest="force_exit_ok", + action="store_true", + default=False, + help=( + "Silently ignore broken pipe, use when the caller may disconnect." + ), + ) + + +def generic_arg_extension_override(subparse: argparse.ArgumentParser) -> None: + subparse.add_argument( + "--extension-override", + dest="extension_override", + type=str, + help=( + "Use a non-standard extension. " + "When a non-empty string, this extension is appended to paths written to. " + "This allows the actual repository file to be left untouched so it can be replaced " + "by the caller which can handle locking the repository." + ), + default="", + required=False, + ) + + +class subcmd_server: + + def __new__(cls) -> Any: + raise RuntimeError("{:s} should not be instantiated".format(cls)) + + @staticmethod + def generate( + msg_fn: MessageFn, + *, + repo_dir: str, + ) -> bool: + + is_repo_filesystem = repo_is_filesystem(remote_url=repo_dir) + if not is_repo_filesystem: + message_error(msg_fn, "Directory: {!r} must be local!".format(repo_dir)) + return False + + if not os.path.isdir(repo_dir): + message_error(msg_fn, "Directory: {!r} not found!".format(repo_dir)) + return False + + repo_data_idname_unique: Set[str] = set() + repo_data: List[Dict[str, Any]] = [] + # Write package meta-data into each directory. + repo_gen_dict = { + "version": "1", + "blocklist": [], + "data": repo_data, + } + for entry in os.scandir(repo_dir): + if not entry.name.endswith(PKG_EXT): + continue + + # Harmless, but skip directories. + if entry.is_dir(): + message_warn(msg_fn, "found unexpected directory {!r}".format(entry.name)) + continue + + filename = entry.name + filepath = os.path.join(repo_dir, filename) + manifest = pkg_manifest_from_archive_and_validate(filepath, strict=False) + if isinstance(manifest, str): + message_warn(msg_fn, "archive validation failed {!r}, error: {:s}".format(filepath, manifest)) + continue + manifest_dict = manifest._asdict() + + repo_data_idname_unique_len = len(repo_data_idname_unique) + repo_data_idname_unique.add(manifest_dict["id"]) + if len(repo_data_idname_unique) == repo_data_idname_unique_len: + message_warn(msg_fn, "archive found with duplicate id {!r}, {!r}".format(manifest_dict["id"], filepath)) + continue + + # Call all optional keys so the JSON never contains `null` items. + for key, value in list(manifest_dict.items()): + if value is None: + del manifest_dict[key] + + # These are added, ensure they don't exist. + has_key_error = False + for key in ("archive_url", "archive_size", "archive_hash"): + if key not in manifest_dict: + continue + message_warn( + msg_fn, + "malformed meta-data from {!r}, contains key it shouldn't: {:s}".format(filepath, key), + ) + has_key_error = True + if has_key_error: + continue + + # A relative URL. + manifest_dict["archive_url"] = "./" + filename + + # Add archive variables, see: `PkgManifest_Archive`. + ( + manifest_dict["archive_size"], + manifest_dict["archive_hash"], + ) = sha256_from_file(filepath, hash_prefix=True) + + repo_data.append(manifest_dict) + + filepath_repo_json = os.path.join(repo_dir, PKG_REPO_LIST_FILENAME) + + with open(filepath_repo_json, "w", encoding="utf-8") as fh: + json.dump(repo_gen_dict, fh, indent=2) + message_status(msg_fn, "found {:d} packages.".format(len(repo_data))) + + return True + + +class subcmd_client: + + def __new__(cls) -> Any: + raise RuntimeError("{:s} should not be instantiated".format(cls)) + + @staticmethod + def list_packages( + msg_fn: MessageFn, + remote_url: str, + online_user_agent: str, + timeout_in_seconds: float, + ) -> bool: + is_repo_filesystem = repo_is_filesystem(remote_url=remote_url) + if is_repo_filesystem: + if not os.path.isdir(remote_url): + message_error(msg_fn, "Directory: {!r} not found!".format(remote_url)) + return False + + if is_repo_filesystem: + filepath_repo_json = os.path.join(remote_url, PKG_REPO_LIST_FILENAME) + if not os.path.exists(filepath_repo_json): + message_error(msg_fn, "File: {!r} not found!".format(filepath_repo_json)) + return False + else: + filepath_repo_json = remote_url_get(remote_url) + + # TODO: validate JSON content. + try: + result = io.BytesIO() + for block in url_retrieve_to_data_iter_or_filesystem( + filepath_repo_json, + is_filesystem=is_repo_filesystem, + headers=url_request_headers_create(accept_json=True, user_agent=online_user_agent), + chunk_size=CHUNK_SIZE_DEFAULT, + timeout_in_seconds=timeout_in_seconds, + ): + result.write(block) + + except FileNotFoundError as ex: + message_error(msg_fn, "list: file-not-found ({:s}) reading {!r}!".format(str(ex), remote_url)) + return False + except TimeoutError as ex: + message_error(msg_fn, "list: timeout ({:s}) reading {!r}!".format(str(ex), remote_url)) + return False + except urllib.error.URLError as ex: + message_error(msg_fn, "list: URL error ({:s}) reading {!r}!".format(str(ex), remote_url)) + return False + except BaseException as ex: + message_error(msg_fn, "list: unexpected error ({:s}) reading {!r}!".format(str(ex), remote_url)) + return False + + result_str = result.getvalue().decode("utf-8") + del result + + repo_gen_dict = pkg_repo_dat_from_json(json.loads(result_str)) + + items: List[Dict[str, Any]] = repo_gen_dict.data + items.sort(key=lambda elem: elem.get("id", "")) + + request_exit = False + for elem in items: + request_exit |= message_status( + msg_fn, + "{:s}({:s}): {:s}".format(elem.get("id"), elem.get("version"), elem.get("name")), + ) + if request_exit: + return False + + return True + + @staticmethod + def sync( + msg_fn: MessageFn, + *, + remote_url: str, + local_dir: str, + online_user_agent: str, + timeout_in_seconds: float, + force_exit_ok: bool, + extension_override: str, + ) -> bool: + if force_exit_ok: + force_exit_ok_enable() + + success = repo_sync_from_remote( + msg_fn=msg_fn, + remote_url=remote_url, + local_dir=local_dir, + online_user_agent=online_user_agent, + timeout_in_seconds=timeout_in_seconds, + extension_override=extension_override, + ) + return success + + @staticmethod + def _install_package_from_file_impl( + msg_fn: MessageFn, + *, + local_dir: str, + filepath_archive: str, + manifest_compare: Optional[PkgManifest], + ) -> bool: + # Implement installing a package to a repository. + # Used for installing from local cache as well as installing a local package from a file. + + # Remove `filepath_local_pkg_temp` if this block exits. + directories_to_clean: List[str] = [] + with CleanupPathsContext(files=(), directories=directories_to_clean): + try: + zip_fh_context = zipfile.ZipFile(filepath_archive, mode="r") + except BaseException as ex: + message_warn( + msg_fn, + "Error extracting archive: {:s}".format(str(ex)), + ) + return False + + with contextlib.closing(zip_fh_context) as zip_fh: + archive_subdir = pkg_zipfile_detect_subdir_or_none(zip_fh) + if archive_subdir is None: + message_warn( + msg_fn, + "Missing manifest from: {:s}".format(filepath_archive), + ) + return False + + manifest = pkg_manifest_from_zipfile_and_validate(zip_fh, archive_subdir, strict=False) + if isinstance(manifest, str): + message_warn( + msg_fn, + "Error loading manifest from: {:s}".format(manifest), + ) + return False + + if manifest_compare is not None: + # The archive ID name must match the server name, + # otherwise the package will install but not be able to collate + # the installed package with the remote ID. + if manifest_compare.id != manifest.id: + message_warn( + msg_fn, + "Package ID mismatch (remote: \"{:s}\", archive: \"{:s}\")".format( + manifest_compare.id, + manifest.id, + ) + ) + return False + if manifest_compare.version != manifest.version: + message_warn( + msg_fn, + "Package version mismatch (remote: \"{:s}\", archive: \"{:s}\")".format( + manifest_compare.version, + manifest.version, + ) + ) + return False + + # We have the cache, extract it to a directory. + # This will be a directory. + filepath_local_pkg = os.path.join(local_dir, manifest.id) + + # First extract into a temporary directory, validate the package is not corrupt, + # then move the package to it's expected location. + filepath_local_pkg_temp = filepath_local_pkg + "@" + + # It's unlikely this exist, nevertheless if it does - it must be removed. + if os.path.isdir(filepath_local_pkg_temp): + shutil.rmtree(filepath_local_pkg_temp) + + directories_to_clean.append(filepath_local_pkg_temp) + + if archive_subdir: + zipfile_make_root_directory(zip_fh, archive_subdir) + del archive_subdir + + try: + for member in zip_fh.infolist(): + zip_fh.extract(member, filepath_local_pkg_temp) + except BaseException as ex: + message_warn( + msg_fn, + "Failed to extract files for \"{:s}\": {:s}".format(manifest.id, str(ex)), + ) + return False + + is_reinstall = False + if os.path.isdir(filepath_local_pkg): + shutil.rmtree(filepath_local_pkg) + is_reinstall = True + + os.rename(filepath_local_pkg_temp, filepath_local_pkg) + directories_to_clean.remove(filepath_local_pkg_temp) + + if is_reinstall: + message_status(msg_fn, "Re-Installed \"{:s}\"".format(manifest.id)) + else: + message_status(msg_fn, "Installed \"{:s}\"".format(manifest.id)) + + return True + + @staticmethod + def install_packages_from_files( + msg_fn: MessageFn, + *, + local_dir: str, + package_files: Sequence[str], + ) -> bool: + if not os.path.exists(local_dir): + message_error(msg_fn, "destination directory \"{:s}\" does not exist".format(local_dir)) + return False + + # This is a simple file extraction, the main difference is that it validates the manifest before installing. + directories_to_clean: List[str] = [] + with CleanupPathsContext(files=(), directories=directories_to_clean): + for filepath_archive in package_files: + if not subcmd_client._install_package_from_file_impl( + msg_fn, + local_dir=local_dir, + filepath_archive=filepath_archive, + # There is no manifest from the repository, leave this unset. + manifest_compare=None, + ): + # The package failed to install. + continue + + return True + + @staticmethod + def install_packages( + msg_fn: MessageFn, + *, + remote_url: str, + local_dir: str, + local_cache: bool, + packages: Sequence[str], + online_user_agent: str, + timeout_in_seconds: float, + ) -> bool: + # Extract... + is_repo_filesystem = repo_is_filesystem(remote_url=remote_url) + pkg_repo_data = repo_pkginfo_from_local_with_idname_as_key(local_dir=local_dir) + if pkg_repo_data is None: + # TODO: raise warning. + return False + + # Most likely this doesn't have duplicates,but any errors procured by duplicates + # are likely to be obtuse enough that it's better to guarantee there are none. + packages = tuple(sorted(set(packages))) + + # Ensure a private directory so a local cache can be created. + local_cache_dir = repo_local_private_dir_ensure_with_subdir(local_dir=local_dir, subdir="cache") + + # TODO: this could be optimized to only lookup known ID's. + json_data_pkg_info_map: Dict[str, Dict[str, Any]] = { + pkg_info["id"]: pkg_info for pkg_info in pkg_repo_data.data + } + + has_error = False + packages_info: List[PkgManifest_Archive] = [] + for pkg_idname in packages: + pkg_info = json_data_pkg_info_map.get(pkg_idname) + if pkg_info is None: + message_error(msg_fn, "Package \"{:s}\", not found".format(pkg_idname)) + has_error = True + continue + manifest_archive = pkg_manifest_archive_from_dict_and_validate(pkg_info, strict=False) + if isinstance(manifest_archive, str): + message_error(msg_fn, "Package malformed meta-data for \"{:s}\", error: {:s}".format( + pkg_idname, + manifest_archive, + )) + has_error = True + continue + + packages_info.append(manifest_archive) + + if has_error: + return False + del has_error + + request_exit = False + + # Ensure all cache is cleared (when `local_cache` is disabled) no matter the cause of exiting. + files_to_clean: List[str] = [] + with CleanupPathsContext(files=files_to_clean, directories=()): + for manifest_archive in packages_info: + pkg_idname = manifest_archive.manifest.id + # Archive name. + archive_size_expected = manifest_archive.archive_size + archive_hash_expected = manifest_archive.archive_hash + pkg_archive_url = manifest_archive.archive_url + + # Local path. + filepath_local_cache_archive = os.path.join(local_cache_dir, pkg_idname + PKG_EXT) + + if not local_cache: + files_to_clean.append(filepath_local_cache_archive) + + # Remote path. + if pkg_archive_url.startswith("./"): + if is_repo_filesystem: + filepath_remote_archive = os.path.join(remote_url, pkg_archive_url[2:]) + else: + if REMOTE_REPO_HAS_JSON_IMPLIED: + # TODO: use `urllib.parse.urlsplit(..)`. + # NOTE: strip the path until the directory. + # Convert: `https://foo.bar/bl_ext_repo.json` -> https://foo.bar/ARCHIVE_NAME + filepath_remote_archive = urllib.parse.urljoin( + remote_url.rpartition("/")[0], + pkg_archive_url[2:], + ) + else: + filepath_remote_archive = urllib.parse.urljoin(remote_url, pkg_archive_url[2:]) + is_pkg_filesystem = is_repo_filesystem + else: + filepath_remote_archive = pkg_archive_url + is_pkg_filesystem = repo_is_filesystem(remote_url=pkg_archive_url) + + # Check if the cache should be used. + found = False + if os.path.exists(filepath_local_cache_archive): + if ( + local_cache and ( + archive_size_expected, + archive_hash_expected, + ) == sha256_from_file(filepath_local_cache_archive, hash_prefix=True) + ): + found = True + else: + os.unlink(filepath_local_cache_archive) + + if not found: + # Create `filepath_local_cache_archive`. + filename_archive_size_test = 0 + sha256 = hashlib.new('sha256') + + # NOTE(@ideasman42): There is more logic in the try/except block than I'd like. + # Refactoring could be done to avoid that but it ends up making logic difficult to follow. + try: + with open(filepath_local_cache_archive, "wb") as fh_cache: + for block in url_retrieve_to_data_iter_or_filesystem( + filepath_remote_archive, + is_filesystem=is_pkg_filesystem, + headers=url_request_headers_create(accept_json=False, user_agent=online_user_agent), + chunk_size=CHUNK_SIZE_DEFAULT, + timeout_in_seconds=timeout_in_seconds, + ): + request_exit |= message_progress( + msg_fn, + "Downloading \"{:s}\"".format(pkg_idname), + filename_archive_size_test, + archive_size_expected, + 'BYTE', + ) + if request_exit: + break + fh_cache.write(block) + sha256.update(block) + filename_archive_size_test += len(block) + + except FileNotFoundError as ex: + message_error( + msg_fn, + "install: file-not-found ({:s}) reading {!r}!".format(str(ex), filepath_remote_archive), + ) + return False + except TimeoutError as ex: + message_error( + msg_fn, + "install: timeout ({:s}) reading {!r}!".format(str(ex), filepath_remote_archive), + ) + return False + except urllib.error.URLError as ex: + message_error( + msg_fn, + "install: URL error ({:s}) reading {!r}!".format(str(ex), filepath_remote_archive), + ) + return False + except BaseException as ex: + message_error( + msg_fn, + "install: unexpected error ({:s}) reading {!r}!".format(str(ex), filepath_remote_archive), + ) + return False + + if request_exit: + return False + + # Validate: + if filename_archive_size_test != archive_size_expected: + message_warn(msg_fn, "Archive size mismatch \"{:s}\", expected {:d}, was {:d}".format( + pkg_idname, + archive_size_expected, + filename_archive_size_test, + )) + return False + filename_archive_hash_test = "sha256:" + sha256.hexdigest() + if filename_archive_hash_test != archive_hash_expected: + message_warn(msg_fn, "Archive checksum mismatch \"{:s}\", expected {:s}, was {:s}".format( + pkg_idname, + archive_hash_expected, + filename_archive_hash_test, + )) + return False + del filename_archive_size_test + del filename_archive_hash_test + del found + del filepath_local_cache_archive + + # All packages have been downloaded, install them. + for manifest_archive in packages_info: + filepath_local_cache_archive = os.path.join(local_cache_dir, manifest_archive.manifest.id + PKG_EXT) + + if not subcmd_client._install_package_from_file_impl( + msg_fn, + local_dir=local_dir, + filepath_archive=filepath_local_cache_archive, + manifest_compare=manifest_archive.manifest, + ): + # The package failed to install. + continue + + return True + + @staticmethod + def uninstall_packages( + msg_fn: MessageFn, + *, + local_dir: str, + packages: Sequence[str], + ) -> bool: + if not os.path.isdir(local_dir): + message_error(msg_fn, "Missing local \"{:s}\"".format(local_dir)) + return False + + # Most likely this doesn't have duplicates,but any errors procured by duplicates + # are likely to be obtuse enough that it's better to guarantee there are none. + packages = tuple(sorted(set(packages))) + + packages_valid = [] + + error = False + for pkg_idname in packages: + # As this simply removes the directories right now, + # validate this path cannot be used for an unexpected outcome, + # or using `../../` to remove directories that shouldn't. + if (pkg_idname in {"", ".", ".."}) or ("\\" in pkg_idname or "/" in pkg_idname): + message_error(msg_fn, "Package name invalid \"{:s}\"".format(pkg_idname)) + error = True + continue + + # This will be a directory. + filepath_local_pkg = os.path.join(local_dir, pkg_idname) + if not os.path.isdir(filepath_local_pkg): + message_error(msg_fn, "Package not found \"{:s}\"".format(pkg_idname)) + error = True + continue + + packages_valid.append(pkg_idname) + del filepath_local_pkg + + if error: + return False + + # Ensure a private directory so a local cache can be created. + # TODO: don't create (it's only accessed for file removal). + local_cache_dir = repo_local_private_dir_ensure_with_subdir(local_dir=local_dir, subdir="cache") + + files_to_clean: List[str] = [] + with CleanupPathsContext(files=files_to_clean, directories=()): + for pkg_idname in packages_valid: + filepath_local_pkg = os.path.join(local_dir, pkg_idname) + try: + shutil.rmtree(filepath_local_pkg) + except BaseException as ex: + message_error(msg_fn, "Failure to remove \"{:s}\" with error ({:s})".format(pkg_idname, str(ex))) + continue + + message_status(msg_fn, "Removed \"{:s}\"".format(pkg_idname)) + + filepath_local_cache_archive = os.path.join(local_cache_dir, pkg_idname + PKG_EXT) + if os.path.exists(filepath_local_cache_archive): + files_to_clean.append(filepath_local_cache_archive) + + return True + + +class subcmd_author: + + @staticmethod + def build( + msg_fn: MessageFn, + *, + pkg_source_dir: str, + pkg_output_dir: str, + pkg_output_filepath: str, + ) -> bool: + if not os.path.isdir(pkg_source_dir): + message_error(msg_fn, "Missing local \"{:s}\"".format(pkg_source_dir)) + return False + + if pkg_output_dir != "." and pkg_output_filepath != ".": + message_error(msg_fn, "Both output directory & output filepath set, set one or the other") + return False + + pkg_manifest_filepath = os.path.join(pkg_source_dir, PKG_MANIFEST_FILENAME_TOML) + + if not os.path.exists(pkg_manifest_filepath): + message_error(msg_fn, "File \"{:s}\" not found!".format(pkg_manifest_filepath)) + return False + + manifest = pkg_manifest_from_toml_and_validate_all_errors(pkg_manifest_filepath, strict=True) + if isinstance(manifest, list): + for error_msg in manifest: + message_error(msg_fn, "Error parsing TOML \"{:s}\" {:s}".format(pkg_manifest_filepath, error_msg)) + return False + + pkg_filename = manifest.id + PKG_EXT + + if pkg_output_filepath != ".": + outfile = pkg_output_filepath + else: + outfile = os.path.join(pkg_output_dir, pkg_filename) + + outfile_temp = outfile + "@" + + filenames_root_exclude = { + pkg_filename, + # It's possible a temporary file exists from a previous run which was not cleaned up. + # Although in general this should be cleaned up - power failure etc may mean it exists. + pkg_filename + "@", + # This is added, converted from the TOML. + PKG_REPO_LIST_FILENAME, + + # We could exclude the manifest: `PKG_MANIFEST_FILENAME_TOML` + # but it's now used so a generation step isn't needed. + } + + request_exit = False + + request_exit |= message_status(msg_fn, "Building {:s}".format(pkg_filename)) + if request_exit: + return False + + with CleanupPathsContext(files=(outfile_temp,), directories=()): + try: + zip_fh_context = zipfile.ZipFile(outfile_temp, 'w', zipfile.ZIP_LZMA) + except BaseException as ex: + message_status(msg_fn, "Error creating archive \"{:s}\"".format(str(ex))) + return False + + with contextlib.closing(zip_fh_context) as zip_fh: + for filepath_abs, filepath_rel in scandir_recursive( + pkg_source_dir, + # Be more advanced in the future, for now ignore dot-files (`.git`) .. etc. + filter_fn=lambda x: not x.startswith(".") + ): + if filepath_rel in filenames_root_exclude: + continue + + # Handy for testing that sub-directories: + # zip_fh.write(filepath_abs, manifest.id + "/" + filepath_rel) + compress_type = zipfile.ZIP_STORED if filepath_skip_compress(filepath_abs) else None + try: + zip_fh.write(filepath_abs, filepath_rel, compress_type=compress_type) + except BaseException as ex: + message_status(msg_fn, "Error adding to archive \"{:s}\"".format(str(ex))) + return False + + request_exit |= message_status(msg_fn, "complete") + if request_exit: + return False + + if os.path.exists(outfile): + os.unlink(outfile) + os.rename(outfile_temp, outfile) + + message_status(msg_fn, "created \"{:s}\", {:d}".format(outfile, os.path.getsize(outfile))) + return True + + @staticmethod + def _validate_directory( + msg_fn: MessageFn, + *, + pkg_source_dir: str, + ) -> bool: + pkg_manifest_filepath = os.path.join(pkg_source_dir, PKG_MANIFEST_FILENAME_TOML) + + if not os.path.exists(pkg_manifest_filepath): + message_error(msg_fn, "Error, file \"{:s}\" not found!".format(pkg_manifest_filepath)) + return False + + # Demote errors to status as the function of this action is to check the manifest is stable. + manifest = pkg_manifest_from_toml_and_validate_all_errors(pkg_manifest_filepath, strict=True) + if isinstance(manifest, list): + message_status(msg_fn, "Error parsing TOML \"{:s}\"".format(pkg_manifest_filepath)) + for error_msg in manifest: + message_status(msg_fn, error_msg) + return False + + expected_files = [] + if manifest.type == "add-on": + expected_files.append("__init__.py") + ok = True + for filepath in expected_files: + if not os.path.exists(os.path.join(pkg_source_dir, filepath)): + message_status(msg_fn, "Error, file missing from {:s}: \"{:s}\"".format( + manifest.type, + filepath, + )) + ok = False + if not ok: + return False + + message_status(msg_fn, "Success parsing TOML in \"{:s}\"".format(pkg_source_dir)) + return True + + @staticmethod + def _validate_archive( + msg_fn: MessageFn, + *, + pkg_source_archive: str, + ) -> bool: + # NOTE(@ideasman42): having `_validate_directory` & `_validate_archive` + # use separate code-paths isn't ideal in some respects however currently the difference + # doesn't cause a lot of duplication. + # + # Operate on the archive directly because: + # - Validating the manifest *does* use shared logic. + # - It's faster for large archives or archives with a large number of files + # which will litter the file-system. + # - There is already a validation function which is used before installing an archive. + # + # If it's ever causes too much code-duplication we can always + # extract the archive into a temporary directory and run validation there. + + try: + zip_fh_context = zipfile.ZipFile(pkg_source_archive, mode="r") + except BaseException as ex: + message_status(msg_fn, "Error extracting archive \"{:s}\"".format(str(ex))) + return False + + with contextlib.closing(zip_fh_context) as zip_fh: + if (archive_subdir := pkg_zipfile_detect_subdir_or_none(zip_fh)) is None: + message_status(msg_fn, "Error, archive has no manifest: \"{:s}\"".format(PKG_MANIFEST_FILENAME_TOML)) + return False + # Demote errors to status as the function of this action is to check the manifest is stable. + manifest = pkg_manifest_from_zipfile_and_validate_all_errors(zip_fh, archive_subdir, strict=True) + if isinstance(manifest, list): + message_status(msg_fn, "Error parsing TOML in \"{:s}\"".format(pkg_source_archive)) + for error_msg in manifest: + message_status(msg_fn, error_msg) + return False + + # NOTE: this is arguably *not* manifest validation, the check could be refactored out. + # Currently we always want to check both and it's useful to do that while the informatio + expected_files = [] + if manifest.type == "add-on": + if archive_subdir: + assert archive_subdir.endswith("/") + expected_files.append(archive_subdir + "__init__.py") + else: + expected_files.append("__init__.py") + ok = True + for filepath in expected_files: + if zip_fh.NameToInfo.get(filepath) is None: + message_status(msg_fn, "Error, file missing from {:s}: \"{:s}\"".format( + manifest.type, + filepath, + )) + ok = False + if not ok: + return False + + message_status(msg_fn, "Success parsing TOML in \"{:s}\"".format(pkg_source_archive)) + return True + + @staticmethod + def validate( + msg_fn: MessageFn, + *, + source_path: str, + ) -> bool: + if os.path.isdir(source_path): + result = subcmd_author._validate_directory(msg_fn, pkg_source_dir=source_path) + else: + result = subcmd_author._validate_archive(msg_fn, pkg_source_archive=source_path) + return result + + +class subcmd_dummy: + + @staticmethod + def repo( + msg_fn: MessageFn, + *, + repo_dir: str, + package_names: Sequence[str], + ) -> bool: + + def msg_fn_no_done(ty: str, data: PrimTypeOrSeq) -> bool: + if ty == 'DONE': + return False + return msg_fn(ty, data) + + # Ensure package names are valid. + package_names = tuple(set(package_names)) + for pkg_idname in package_names: + if (error_msg := pkg_idname_is_valid_or_error(pkg_idname)) is None: + continue + message_error( + msg_fn, + "key \"id\", \"{:s}\" doesn't match expected format, \"{:s}\"".format(pkg_idname, error_msg), + ) + return False + + if not repo_is_filesystem(remote_url=repo_dir): + message_error(msg_fn, "Generating a repository on a remote path is not supported") + return False + + # Unlike most other commands, create the repo_dir it doesn't already exist. + if not os.path.exists(repo_dir): + try: + os.makedirs(repo_dir) + except BaseException as ex: + message_error(msg_fn, "Failed to create \"{:s}\" with error: {!r}".format(repo_dir, ex)) + return False + + import tempfile + for pkg_idname in package_names: + with tempfile.TemporaryDirectory(suffix="pkg-repo") as temp_dir_source: + pkg_src_dir = os.path.join(temp_dir_source, pkg_idname) + os.makedirs(pkg_src_dir) + pkg_name = pkg_idname.replace("_", " ").title() + with open(os.path.join(pkg_src_dir, PKG_MANIFEST_FILENAME_TOML), "w", encoding="utf-8") as fh: + fh.write("""# Example\n""") + fh.write("""schema_version = "1.0.0"\n""") + fh.write("""id = "{:s}"\n""".format(pkg_idname)) + fh.write("""name = "{:s}"\n""".format(pkg_name)) + fh.write("""type = "add-on"\n""") + fh.write("""tags = ["UV"]\n""") + fh.write("""maintainer = "Maintainer Name "\n""") + fh.write("""license = ["SPDX:GPL-2.0-or-later"]\n""") + fh.write("""version = "1.0.0"\n""") + fh.write("""tagline = "This is a tagline"\n""") + fh.write("""blender_version_min = "0.0.0"\n""") + + with open(os.path.join(pkg_src_dir, "__init__.py"), "w", encoding="utf-8") as fh: + fh.write(""" +def register(): + print("Register:", __name__) + +def unregister(): + print("Unregister:", __name__) +""") + + fh.write("""# Dummy.\n""") + # Generate some random ASCII-data. + for i, line in enumerate(random_acii_lines(seed=pkg_idname, width=80)): + if i > 1000: + break + fh.write("# {:s}\n".format(line)) + + # Write a sub-directory (check this is working). + docs = os.path.join(pkg_src_dir, "docs") + os.makedirs(docs) + with open(os.path.join(docs, "readme.txt"), "w", encoding="utf-8") as fh: + fh.write("Example readme.") + + # `{cmd} build --pkg-source-dir {pkg_src_dir} --pkg-output-dir {repo_dir}`. + if not subcmd_author.build( + msg_fn_no_done, + pkg_source_dir=pkg_src_dir, + pkg_output_dir=repo_dir, + pkg_output_filepath=".", + ): + # Error running command. + return False + + # `{cmd} server-generate --repo-dir {repo_dir}`. + if not subcmd_server.generate( + msg_fn_no_done, + repo_dir=repo_dir, + ): + # Error running command. + return False + + message_done(msg_fn) + return True + + @staticmethod + def progress( + msg_fn: MessageFn, + *, + time_duration: float, + time_delay: float, + ) -> bool: + import time + request_exit = False + time_start = time.time() if (time_duration > 0.0) else 0.0 + size_beg = 0 + size_end = 100 + while time_duration == 0.0 or (time.time() - time_start < time_duration): + request_exit |= message_progress(msg_fn, "Demo", size_beg, size_end, 'BYTE') + if request_exit: + break + size_beg += 1 + if size_beg > size_end: + size_beg = 0 + time.sleep(time_delay) + if request_exit: + message_done(msg_fn) + return False + + message_done(msg_fn) + return True + + +# ----------------------------------------------------------------------------- +# Server Manipulating Actions + + +def argparse_create_server_generate( + subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]", + args_internal: bool, +) -> None: + subparse = subparsers.add_parser( + "server-generate", + help="Create a listing from all packages.", + description=( + "Generate a listing of all packages stored in a directory.\n" + "This can be used to host packages which only requires static-file hosting." + ), + formatter_class=argparse.RawTextHelpFormatter, + ) + + generic_arg_repo_dir(subparse) + if args_internal: + generic_arg_output_type(subparse) + + subparse.set_defaults( + func=lambda args: subcmd_server.generate( + msg_fn_from_args(args), + repo_dir=args.repo_dir, + ), + ) + + +# ----------------------------------------------------------------------------- +# Client Queries + +def argparse_create_client_list(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None: + subparse = subparsers.add_parser( + "list", + help="List all available packages.", + description="List all available packages.", + formatter_class=argparse.RawTextHelpFormatter, + ) + + generic_arg_remote_url(subparse) + generic_arg_local_dir(subparse) + generic_arg_online_user_agent(subparse) + + generic_arg_output_type(subparse) + generic_arg_timeout(subparse) + + subparse.set_defaults( + func=lambda args: subcmd_client.list_packages( + msg_fn_from_args(args), + args.remote_url, + online_user_agent=args.online_user_agent, + timeout_in_seconds=args.timeout, + ), + ) + + +# ----------------------------------------------------------------------------- +# Client Manipulating Actions + +def argparse_create_client_sync(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None: + subparse = subparsers.add_parser( + "sync", + help="Refresh from the remote repository.", + description="Refresh from remote repository (sync).", + formatter_class=argparse.RawTextHelpFormatter, + ) + + generic_arg_remote_url(subparse) + generic_arg_local_dir(subparse) + generic_arg_online_user_agent(subparse) + + generic_arg_output_type(subparse) + generic_arg_timeout(subparse) + generic_arg_ignore_broken_pipe(subparse) + generic_arg_extension_override(subparse) + + subparse.set_defaults( + func=lambda args: subcmd_client.sync( + msg_fn_from_args(args), + remote_url=args.remote_url, + local_dir=args.local_dir, + online_user_agent=args.online_user_agent, + timeout_in_seconds=args.timeout, + force_exit_ok=args.force_exit_ok, + extension_override=args.extension_override, + ), + ) + + +def argparse_create_client_install_files(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None: + subparse = subparsers.add_parser( + "install-files", + help="Install package from the file-system.", + description="Install packages from the file-system.", + formatter_class=argparse.RawTextHelpFormatter, + ) + generic_arg_file_list_positional(subparse) + + generic_arg_local_dir(subparse) + generic_arg_output_type(subparse) + + subparse.set_defaults( + func=lambda args: subcmd_client.install_packages_from_files( + msg_fn_from_args(args), + local_dir=args.local_dir, + package_files=args.files, + ), + ) + + +def argparse_create_client_install(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None: + subparse = subparsers.add_parser( + "install", + help="Install package.", + description="Install the package.", + formatter_class=argparse.RawTextHelpFormatter, + ) + generic_arg_package_list_positional(subparse) + + generic_arg_remote_url(subparse) + generic_arg_local_dir(subparse) + generic_arg_local_cache(subparse) + generic_arg_online_user_agent(subparse) + + generic_arg_output_type(subparse) + generic_arg_timeout(subparse) + + subparse.set_defaults( + func=lambda args: subcmd_client.install_packages( + msg_fn_from_args(args), + remote_url=args.remote_url, + local_dir=args.local_dir, + local_cache=args.local_cache, + packages=args.packages.split(","), + online_user_agent=args.online_user_agent, + timeout_in_seconds=args.timeout, + ), + ) + + +def argparse_create_client_uninstall(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None: + subparse = subparsers.add_parser( + "uninstall", + help="Uninstall a package.", + description="Uninstall the package.", + formatter_class=argparse.RawTextHelpFormatter, + ) + generic_arg_package_list_positional(subparse) + + generic_arg_local_dir(subparse) + generic_arg_output_type(subparse) + + subparse.set_defaults( + func=lambda args: subcmd_client.uninstall_packages( + msg_fn_from_args(args), + local_dir=args.local_dir, + packages=args.packages.split(","), + ), + ) + + +# ----------------------------------------------------------------------------- +# Authoring Actions + +def argparse_create_author_build( + subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]", + args_internal: bool, +) -> None: + subparse = subparsers.add_parser( + "build", + help="Build a package.", + description="Build a package in the current directory.", + formatter_class=argparse.RawTextHelpFormatter, + ) + + generic_arg_package_source_dir(subparse) + generic_arg_package_output_dir(subparse) + generic_arg_package_output_filepath(subparse) + + if args_internal: + generic_arg_output_type(subparse) + + subparse.set_defaults( + func=lambda args: subcmd_author.build( + msg_fn_from_args(args), + pkg_source_dir=args.source_dir, + pkg_output_dir=args.output_dir, + pkg_output_filepath=args.output_filepath, + ), + ) + + +def argparse_create_author_validate( + subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]", + args_internal: bool, +) -> None: + subparse = subparsers.add_parser( + "validate", + help="Validate a package.", + description="Validate the package meta-data in the current directory.", + formatter_class=argparse.RawTextHelpFormatter, + ) + generic_arg_package_source_path_positional(subparse) + + if args_internal: + generic_arg_output_type(subparse) + + subparse.set_defaults( + func=lambda args: subcmd_author.validate( + msg_fn_from_args(args), + source_path=args.source_path, + ), + ) + + +# ----------------------------------------------------------------------------- +# Dummy Repo + + +def argparse_create_dummy_repo(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None: + subparse = subparsers.add_parser( + "dummy-repo", + help="Create a dummy repository.", + description="Create a dummy repository, intended for testing.", + formatter_class=argparse.RawTextHelpFormatter, + ) + + subparse.add_argument( + "--package-names", + dest="package_names", + type=arg_handle_str_as_package_names, + help=( + "Comma separated list of package names to create (no-spaces)." + ), + required=True, + ) + + generic_arg_output_type(subparse) + generic_arg_repo_dir(subparse) + + subparse.set_defaults( + func=lambda args: subcmd_dummy.repo( + msg_fn_from_args(args), + repo_dir=args.repo_dir, + package_names=args.package_names, + ), + ) + +# ----------------------------------------------------------------------------- +# Dummy Output + + +def argparse_create_dummy_progress(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None: + subparse = subparsers.add_parser( + "dummy-progress", + help="Dummy progress output.", + description="Demo output.", + formatter_class=argparse.RawTextHelpFormatter, + ) + + subparse.add_argument( + "--time-duration", + dest="time_duration", + type=float, + help=( + "How long to run the demo for (zero to run forever)." + ), + default=0.0, + ) + subparse.add_argument( + "--time-delay", + dest="time_delay", + type=float, + help=( + "Delay between updates." + ), + default=0.05, + ) + + generic_arg_output_type(subparse) + + subparse.set_defaults( + func=lambda args: subcmd_dummy.progress( + msg_fn_from_args(args), + time_duration=args.time_duration, + time_delay=args.time_delay, + ), + ) + + +def argparse_create( + args_internal: bool = True, + args_extra_subcommands_fn: Optional[ArgsSubparseFn] = None, + prog: Optional[str] = None, +) -> argparse.ArgumentParser: + + parser = argparse.ArgumentParser( + prog=prog or "blender_ext", + description=__doc__, + formatter_class=argparse.RawTextHelpFormatter, + ) + + subparsers = parser.add_subparsers( + title="subcommands", + description="", + help="", + ) + + argparse_create_server_generate(subparsers, args_internal) + + if args_internal: + # Queries. + argparse_create_client_list(subparsers) + + # Manipulating Actions. + argparse_create_client_sync(subparsers) + argparse_create_client_install_files(subparsers) + argparse_create_client_install(subparsers) + argparse_create_client_uninstall(subparsers) + + # Dummy commands. + argparse_create_dummy_repo(subparsers) + argparse_create_dummy_progress(subparsers) + + # Authoring Commands. + argparse_create_author_build(subparsers, args_internal) + argparse_create_author_validate(subparsers, args_internal) + + if args_extra_subcommands_fn is not None: + args_extra_subcommands_fn(subparsers) + + return parser + + +# ----------------------------------------------------------------------------- +# Message Printing Functions + +# Follows `MessageFn` convention. +def msg_print_text(ty: str, data: PrimTypeOrSeq) -> bool: + assert ty in MESSAGE_TYPES + + if isinstance(data, (list, tuple)): + data_str = ", ".join(str(x) for x in data) + else: + data_str = str(data) + + # Don't prefix status as it's noise for users. + if ty == 'STATUS': + sys.stdout.write("{:s}\n".format(data_str)) + else: + sys.stdout.write("{:s}: {:s}\n".format(ty, data_str)) + + return REQUEST_EXIT + + +def msg_print_json_impl(ty: str, data: PrimTypeOrSeq) -> bool: + assert ty in MESSAGE_TYPES + sys.stdout.write(json.dumps([ty, data])) + return REQUEST_EXIT + + +def msg_print_json(ty: str, data: PrimTypeOrSeq) -> bool: + msg_print_json_impl(ty, data) + sys.stdout.write("\n") + sys.stdout.flush() + return REQUEST_EXIT + + +def msg_print_json_0(ty: str, data: PrimTypeOrSeq) -> bool: + msg_print_json_impl(ty, data) + sys.stdout.write("\0") + sys.stdout.flush() + return REQUEST_EXIT + + +def msg_fn_from_args(args: argparse.Namespace) -> MessageFn: + # Will be None when running form Blender. + output_type = getattr(args, "output_type", 'TEXT') + + match output_type: + case 'JSON': + return msg_print_json + case 'JSON_0': + return msg_print_json_0 + case 'TEXT': + return msg_print_text + + raise Exception("Unknown output!") + + +def main( + argv: Optional[List[str]] = None, + args_internal: bool = True, + args_extra_subcommands_fn: Optional[ArgsSubparseFn] = None, + prog: Optional[str] = None, +) -> int: + + # Needed on WIN32 which doesn't default to `utf-8`. + for fh in (sys.stdout, sys.stderr): + # While this is typically the case, is only guaranteed to be `TextIO` so check `reconfigure` is available. + if not isinstance(fh, io.TextIOWrapper): + continue + if fh.encoding.lower().partition(":")[0] == "utf-8": + continue + fh.reconfigure(encoding="utf-8") + + if "--version" in sys.argv: + sys.stdout.write("{:s}\n".format(VERSION)) + return 0 + + parser = argparse_create( + args_internal=args_internal, + args_extra_subcommands_fn=args_extra_subcommands_fn, + prog=prog, + ) + args = parser.parse_args(argv) + # Call sub-parser callback. + if not hasattr(args, "func"): + parser.print_help() + return 0 + + result = args.func(args) + assert isinstance(result, bool) + return 0 if result else 1 + + +if __name__ == "__main__": + try: + sys.exit(main()) + except BrokenPipeError as ex: + # When called for notifications, a broken pipe may occur if the caller closes soon after activating. + # In this case a broken pipe is expected and not something we want to avoid. + # This most only ever be set if canceling will *not* leave the repository in a corrupt sate. + if not FORCE_EXIT_OK: + raise ex diff --git a/scripts/addons_core/bl_pkg/example_extension/AUTHORS b/scripts/addons_core/bl_pkg/example_extension/AUTHORS new file mode 100644 index 00000000000..d9da561a33e --- /dev/null +++ b/scripts/addons_core/bl_pkg/example_extension/AUTHORS @@ -0,0 +1,5 @@ +# Authors + +Campbell Barton +Sergey Sharybin + diff --git a/scripts/addons_core/bl_pkg/example_extension/__init__.py b/scripts/addons_core/bl_pkg/example_extension/__init__.py new file mode 100644 index 00000000000..6cff06cebf5 --- /dev/null +++ b/scripts/addons_core/bl_pkg/example_extension/__init__.py @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +def register(): + print("Register", __package__) + + +def unregister(): + print("UnRegister", __package__) diff --git a/scripts/addons_core/bl_pkg/example_extension/blender_manifest.toml b/scripts/addons_core/bl_pkg/example_extension/blender_manifest.toml new file mode 100644 index 00000000000..4b33ba168b2 --- /dev/null +++ b/scripts/addons_core/bl_pkg/example_extension/blender_manifest.toml @@ -0,0 +1,17 @@ +# Example +schema_version = "1.0.0" + +id = "my_example_package" +name = "Test Package" +tagline = "Single line description" +type = "add-on" +tags = ["UV", "Modeling"] +version = "0.2.0" +blender_version_min = "2.80.0" +maintainer = "Maintainer Name" +license = [ + "SPDX:CC-0" +] +copyright = [ + "Developer Name" +] diff --git a/scripts/addons_core/bl_pkg/readme.rst b/scripts/addons_core/bl_pkg/readme.rst new file mode 100644 index 00000000000..003ddd6fde6 --- /dev/null +++ b/scripts/addons_core/bl_pkg/readme.rst @@ -0,0 +1,135 @@ + +################## +Blender Extensions +################## + +Directory Layout +================ + +``./blender_addon/bl_pkg/cli/`` + The stand-alone command line utility to manage extensions. + +``./blender_addon/bl_pkg/`` + The Blender add-on which wraps the command line utility + (abstracts details of interacting with the package manager & repositories). + +``./tests/`` + Automated tests. + + To run tests via the ``Makefile``. + + Test the command line application. + + .. code-block:: + + make test PYTHON_BIN=/path/to/bin/python3.11 + + If your system Python is v3.11 or newer you may omit ``PYTHON_BIN``. + + .. code-block:: + + make test_blender BLENDER_BIN=/path/to/blender + + +GUI +=== + +This GUI is work-in-progress, currently it's been made to work with an un-modified Blender 4.1. + +- Link ``blender_addon/bl_pkg`` into your add-ons directly. +- Enable the blender extensions add-on from Blender. +- Enable the blender extensions checkbox in the add-ons preference (this is a temporary location). +- Repositories can be added/removed from the "Files" section in the preferences. + + +Hacking +======= + +Some useful hints. + +When developing the command line interface, these tests can be setup to run on file-change, run: + +.. code-block:: + + make watch_test + +To run Blender tests. + +.. code-block:: + + make watch_test_blender BLENDER_BIN=/path/to/blender + +How to Setup a Test Environment +=============================== + +Most of the options here are using the command-line tools. For a comprehensive list of commands check the help: + +.. code-block:: + + ./blender_addon/bl_pkg/cli/blender_ext.py --help + + +Dummy server +------------ + +The simple way to get started is by creating a dummy asset library. + +.. code-block:: + + ./blender_addon/bl_pkg/cli/blender_ext.py dummy-repo \ + --repo-dir=/path/to/host/my/repo/files \ + --package-names="blue,red,green,purple,orange" + +This will populate the directory specified as ``--repo-dir`` with dummy assets packages (``.zip``), +and an index (``bl_ext_repo.json``). + + +Setup an Extensions Repository +============================== + +First you need to create individual packages for the individual extension: + +- Go to the directory of the extension you want to package. +- Create a ``bl_ext_pkg.toml`` file with your configuration. +- Run the command ``blender_ext.py build``. + +You can look at an example of a dummy extension in the ``example_extension`` directory. + +.. code-block:: + + cd ./example_extension + ../blender_addon/bl_pkg/cli/blender_ext.py build + +This will create a ``my_example_package.zip`` (as specified in the .toml file). + +Now you can move all your ``*.zip`` packages to where they will be hosted in the server. +The final step is to create an index file to serve all your packages. + +.. code-block:: + + mkdir -p /path/to/host/my/repo/files + cp ./example_extension/my_example_package.zip /path/to/host/my/repo/files + ./blender_addon/bl_pkg/cli/blender_ext.py server-generate --repo-dir /path/to/host/my/repo/files + +This will generate a new file ``bl_ext_repo.json`` in your repository directory. +This file is to be used the entry point to your remote server. + +Alternatively, if you are doing tests locally, +you can point the directory containing this file as the ``Remote Path`` to your Extensions Repository. + + +.. This section could go elsewhere, for now there is only a single note. + +Requirement: Add-Ons +==================== + +Add-ons packaged as extensions must use relative imports when importing its own sub-modules. +This is a requirement of Python module name-spacing. + + +Requirement: Blender 4.2 +======================== + +This add-on requires an yet-to-be released version of Blender. + +You can download a `daily build `__ of Blender 4.2 for testing and development purposes. diff --git a/scripts/addons_core/bl_pkg/tests/modules/http_server_context.py b/scripts/addons_core/bl_pkg/tests/modules/http_server_context.py new file mode 100644 index 00000000000..0a59ae4c445 --- /dev/null +++ b/scripts/addons_core/bl_pkg/tests/modules/http_server_context.py @@ -0,0 +1,103 @@ +# SPDX-FileCopyrightText: 2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +Starts up a web server pointed to a local directory for the purpose of simulating online access. +With basic options for PORT/path & verbosity (so tests aren't too noisy). +""" +__all__ = ( + "HTTPServerContext", +) + +import socketserver +import http.server +import threading + +from typing import ( + Any, +) + + +class HTTPServerContext: + __slots__ = ( + "_directory", + "_port", + "_http_thread", + "_http_server", + "_wait_tries", + "_wait_delay", + "_verbose", + ) + + class _TestServer(socketserver.TCPServer): + allow_reuse_address = True + + @staticmethod + def _is_port_in_use(port: int) -> bool: + import socket + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + return s.connect_ex(("localhost", port)) == 0 + + @staticmethod + def _test_handler_factory(directory: str, verbose: bool = False) -> type: + class TestHandler(http.server.SimpleHTTPRequestHandler): + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, directory=directory, **kwargs) + # Suppress messages by overriding the function. + if not verbose: + def log_message(self, *_args: Any, **_kw: Any) -> None: + pass + return TestHandler + + def __init__( + self, + directory: str, + port: int, + *, + verbose: bool = False, + wait_delay: float = 0.0, + wait_tries: int = 0, + ) -> None: + self._directory = directory + self._port = port + self._wait_delay = wait_delay + self._wait_tries = wait_tries + self._verbose = verbose + + # Members `_http_thread` & `_http_server` are set when entering the context. + + def __enter__(self) -> None: + + if self._wait_tries: + import time + for _ in range(self._wait_tries): + if not HTTPServerContext._is_port_in_use(self._port): + break + + print("Waiting...") + time.sleep(self._wait_delay) + + http_server = HTTPServerContext._TestServer( + ("", self._port), + HTTPServerContext._test_handler_factory( + self._directory, + verbose=self._verbose, + ), + ) + + # Use a thread so as not to block. + http_thread = threading.Thread(target=http_server.serve_forever) + http_thread.daemon = True + http_thread.start() + + self._http_thread = http_thread + self._http_server = http_server + + def __exit__(self, _type: Any, _value: Any, traceback: Any) -> None: + # Needed on WIN32, otherwise exit causes an `OSError`. + self._http_server.shutdown() + + self._http_server.server_close() + del self._http_server + del self._http_thread diff --git a/scripts/addons_core/bl_pkg/tests/modules/python_wheel_generate.py b/scripts/addons_core/bl_pkg/tests/modules/python_wheel_generate.py new file mode 100644 index 00000000000..d9bbc388c14 --- /dev/null +++ b/scripts/addons_core/bl_pkg/tests/modules/python_wheel_generate.py @@ -0,0 +1,153 @@ +# SPDX-FileCopyrightText: 2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +This module generates a Python wheel (*.whl) for the purpose of running tests. +""" +__all__ = ( + "generate_from_file_data", + "generate_from_source", +) + +import os +import subprocess +import sys +import tempfile + +from typing import ( + Callable, + Dict, + List, + Tuple, +) + + +def _contents_to_filesystem( + contents: Dict[str, bytes], + directory: str, +) -> None: + swap_slash = os.sep == "\\" + for key, value in contents.items(): + path = key.replace("/", "\\") if swap_slash else key + path_full = os.path.join(directory, path) + path_base = os.path.dirname(path_full) + os.makedirs(path_base, exist_ok=True) + + with ( + open(path_full, "wb") if isinstance(value, bytes) else + open(path_full, "w", encoding="utf-8") + ) as fh: + fh.write(value) + + +def search_impl(directory: str, fn: Callable[[os.DirEntry[str]], bool], result: List[str]) -> None: + for entry in os.scandir(directory): + if entry.is_dir(): + search_impl(entry.path, fn, result) + if fn(entry): + result.append(entry.path) + + +def search(directory: str, fn: Callable[[os.DirEntry[str]], bool]) -> List[str]: + result: List[str] = [] + search_impl(directory, fn, result) + return result + + +def generate_from_file_data( + *, + module_name: str, + version: str, + package_contents: Dict[str, bytes], +) -> Tuple[str, bytes]: + """ + :arg package_contents: + The package contents. + - The key is a path. + - The value is file contents. + + Return filename & data. + """ + + setup_contents: Dict[str, bytes] = { + "setup.py": """ +from setuptools import setup + +setup() +""".encode("utf-8"), + "pyproject.toml": """ +[build-system] +requires = ["setuptools >= 61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "{:s}" +version = "{:s}" +dependencies = [] + +requires-python = ">=3.11" +authors = [ + {{name = "Developer Name", email = "name@example.com"}}, +] +maintainers = [ + {{name = "Developer Name", email = "name@example.com"}} +] +description = "Dummy description." +keywords = ["egg", "bacon", "sausage", "tomatoes", "Lobster Thermidor"] +classifiers = [ + "Development Status :: 4 - Beta", + "Programming Language :: Python" +] +""".format(module_name, version).encode("utf-8"), + } + + with tempfile.TemporaryDirectory() as temp_dir: + _contents_to_filesystem(package_contents, temp_dir) + _contents_to_filesystem(setup_contents, temp_dir) + + output = subprocess.run( + [sys.executable, "setup.py", "bdist_wheel"], + cwd=temp_dir, + stderr=subprocess.PIPE, + stdout=subprocess.PIPE, + ) + + result = search(temp_dir, lambda entry: entry.name.endswith(".whl")) + if len(result) != 1: + print(output) + raise Exception("failed to create wheel!") + + with open(result[0], 'rb') as fh: + data = fh.read() + + filename = os.path.basename(result[0]) + + return filename, data + + +def generate_from_source( + *, + module_name: str, + version: str, + source: str, +) -> Tuple[str, bytes]: + """ + Return filename & data. + """ + return generate_from_file_data( + module_name=module_name, + version=version, + package_contents={ + "{:s}/__init__.py".format(module_name): source.encode("utf-8"), + }, + ) + + +if __name__ == "__main__": + filename, data = generate_from_source( + module_name="blender_example_module", + version="0.0.1", + source="print(\"Hello World\")" + ) + print(filename, len(data)) diff --git a/scripts/addons_core/bl_pkg/tests/test_blender.py b/scripts/addons_core/bl_pkg/tests/test_blender.py new file mode 100644 index 00000000000..db416828aaf --- /dev/null +++ b/scripts/addons_core/bl_pkg/tests/test_blender.py @@ -0,0 +1,199 @@ +# SPDX-FileCopyrightText: 2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +Test with command: + make test +""" + +# NOTE: +# Currently this sets up an environment and runs commands. + +# High level tests, run operators which manage a repository, ensure they work as expected. +# This tests Blender's integration for all areas except for the interactive GUI... for now +# perhaps this is supported in the future. + + +# Start a web server, connect blender to it, then setup new repos and install extensions. + + +import json +import os +import subprocess +import sys +import tempfile + +from typing import ( + Any, + Sequence, + Tuple, +) + +BASE_DIR = os.path.abspath(os.path.dirname(__file__)) + +CMD = ( + sys.executable, + os.path.normpath(os.path.join(BASE_DIR, "..", "cli", "blender_ext.py")), +) + +# Simulate communicating with a web-server. +USE_HTTP = os.environ.get("USE_HTTP", "0") != "0" +HTTP_PORT = 8002 + +VERBOSE = os.environ.get("VERBOSE", "0") != "0" + +sys.path.append(os.path.join(BASE_DIR, "modules")) +from http_server_context import HTTPServerContext # noqa: E402 + + +PKG_REPO_LIST_FILENAME = "bl_ext_repo.json" + +# Use an in-memory temp, when available. +TEMP_PREFIX = tempfile.gettempdir() +if os.path.exists("/ramcache/tmp"): + TEMP_PREFIX = "/ramcache/tmp" + +# Useful for debugging, when blank create dynamically. +TEMP_DIR_SOURCE = os.path.join(TEMP_PREFIX, "blender_app_ext_source") +TEMP_DIR_REMOTE = os.path.join(TEMP_PREFIX, "blender_app_ext_remote") +TEMP_DIR_LOCAL = os.path.join(TEMP_PREFIX, "blender_app_ext_local") + +if TEMP_DIR_SOURCE and not os.path.isdir(TEMP_DIR_SOURCE): + os.makedirs(TEMP_DIR_SOURCE) +if TEMP_DIR_LOCAL and not os.path.isdir(TEMP_DIR_LOCAL): + os.makedirs(TEMP_DIR_LOCAL) +if TEMP_DIR_REMOTE and not os.path.isdir(TEMP_DIR_REMOTE): + os.makedirs(TEMP_DIR_REMOTE) + + +# ----------------------------------------------------------------------------- +# Generic Functions + +def command_output_from_json_0(args: Sequence[str]) -> Sequence[Tuple[str, Any]]: + result = [] + for json_bytes in subprocess.check_output( + [*CMD, *args, "--output-type=JSON_0"], + ).split(b'\0'): + if not json_bytes: + continue + json_str = json_bytes.decode("utf-8") + json_data = json.loads(json_str) + assert len(json_data) == 2 + assert isinstance(json_data[0], str) + result.append((json_data[0], json_data[1])) + + return result + + +def ensure_script_directory(script_directory_to_add: str) -> None: + import bpy # type: ignore + script_directories = bpy.context.preferences.filepaths.script_directories + script_dir_empty = None + for script_dir in script_directories: + dir_test = script_dir.directory + if dir_test == script_directory_to_add: + return + if not dir_test: + script_dir_empty = script_dir + + if not script_dir_empty: + bpy.ops.preferences.script_directory_add() + script_dir_empty = script_directories[-1] + + script_dir_empty.directory = script_directory_to_add + + if script_directory_to_add not in sys.path: + sys.path.append(script_directory_to_add) + + +def blender_test_run(temp_dir_local: str) -> None: + import bpy + import addon_utils # type: ignore + + preferences = bpy.context.preferences + + preferences.view.show_developer_ui = True + preferences.experimental.use_extension_repos = True + + addon_dir = os.path.normpath(os.path.join(BASE_DIR, "..", "blender_addon")) + + ensure_script_directory(addon_dir) + + addon_utils.enable("bl_pkg") + + print("BEG*********************", dir(bpy.ops.bl_pkg)) + + # NOTE: it's assumed the URL will expand to JSON, example: + # http://extensions.local:8111/add-ons/?format=json + # This is not supported by the test server so the file name needs to be added. + remote_url = "http://localhost:{:d}/{:s}".format(HTTP_PORT, PKG_REPO_LIST_FILENAME) + + repo = preferences.extensions.repos.new( + name="My Test", + module="my_repo", + custom_directory=temp_dir_local, + remote_url=remote_url, + ) + + bpy.ops.bl_pkg.dummy_progress() + + bpy.ops.bl_pkg.repo_sync( + repo_directory=temp_dir_local, + ) + + bpy.ops.bl_pkg.pkg_install( + repo_directory=temp_dir_local, + pkg_id="blue", + ) + + bpy.ops.bl_pkg.pkg_uninstall( + repo_directory=temp_dir_local, + pkg_id="blue", + ) + + preferences.extensions.repos.remove(repo) + + print("END*********************") + + +def main() -> None: + package_names = ( + "blue", + "red", + "green", + "purple", + "orange", + ) + with tempfile.TemporaryDirectory(dir=TEMP_DIR_REMOTE) as temp_dir_remote: + # Populate repository from source. + for msg in command_output_from_json_0([ + "dummy-repo", + "--repo-dir", temp_dir_remote, + "--package-names", ",".join(package_names) + ]): + print(msg) + + with HTTPServerContext( + directory=temp_dir_remote, + port=HTTP_PORT, + # Avoid error when running tests quickly, + # sometimes the port isn't available yet. + wait_tries=10, + wait_delay=0.05, + ): + # Where we will put the files. + with tempfile.TemporaryDirectory() as temp_dir_local: + blender_test_run(temp_dir_local) + + with open(os.path.join(temp_dir_remote, PKG_REPO_LIST_FILENAME), 'r', encoding="utf-8") as fh: + print(fh.read()) + + # If we want to copy out these. + # print(temp_dir_remote) + # import time + # time.sleep(540) + + +if __name__ == "__main__": + main() diff --git a/scripts/addons_core/bl_pkg/tests/test_cli.py b/scripts/addons_core/bl_pkg/tests/test_cli.py new file mode 100644 index 00000000000..8c03f95141d --- /dev/null +++ b/scripts/addons_core/bl_pkg/tests/test_cli.py @@ -0,0 +1,420 @@ +# SPDX-FileCopyrightText: 2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +Test with command: + make test_blender BLENDER_BIN=$PWD/../../../blender.bin +""" + +import json +import os +import shutil +import subprocess +import sys +import tempfile +import unittest + +import unittest.util + +from typing import ( + Any, + Sequence, + Dict, + NamedTuple, + Optional, + Set, + Tuple, +) + +# For more useful output that isn't clipped. +unittest.util._MAX_LENGTH = 10_000 + +IS_WIN32 = sys.platform == "win32" + +# See the variable with the same name in `blender_ext.py`. +REMOTE_REPO_HAS_JSON_IMPLIED = True + +PKG_EXT = ".zip" + +# PKG_REPO_LIST_FILENAME = "bl_ext_repo.json" +PKG_MANIFEST_FILENAME = "bl_ext_pkg_manifest.json" + +PKG_MANIFEST_FILENAME_TOML = "blender_manifest.toml" + +# Use an in-memory temp, when available. +TEMP_PREFIX = tempfile.gettempdir() +if os.path.exists("/ramcache/tmp"): + TEMP_PREFIX = "/ramcache/tmp" + +TEMP_DIR_REMOTE = os.path.join(TEMP_PREFIX, "bl_ext_remote") +TEMP_DIR_LOCAL = os.path.join(TEMP_PREFIX, "bl_ext_local") + +if TEMP_DIR_LOCAL and not os.path.isdir(TEMP_DIR_LOCAL): + os.makedirs(TEMP_DIR_LOCAL) +if TEMP_DIR_REMOTE and not os.path.isdir(TEMP_DIR_REMOTE): + os.makedirs(TEMP_DIR_REMOTE) + + +BASE_DIR = os.path.abspath(os.path.dirname(__file__)) +# PYTHON_CMD = sys.executable + +CMD = ( + sys.executable, + os.path.normpath(os.path.join(BASE_DIR, "..", "cli", "blender_ext.py")), +) + +# Simulate communicating with a web-server. +USE_HTTP = os.environ.get("USE_HTTP", "0") != "0" +HTTP_PORT = 8001 + +VERBOSE = os.environ.get("VERBOSE", "0") != "0" + +sys.path.append(os.path.join(BASE_DIR, "modules")) +from http_server_context import HTTPServerContext # noqa: E402 + +STATUS_NON_ERROR = {'STATUS', 'PROGRESS'} + + +# ----------------------------------------------------------------------------- +# Generic Utilities +# + + +def rmdir_contents(directory: str) -> None: + """ + Remove all directory contents without removing the directory. + """ + for entry in os.scandir(directory): + filepath = os.path.join(directory, entry.name) + if entry.is_dir(): + shutil.rmtree(filepath) + else: + os.unlink(filepath) + + +# ----------------------------------------------------------------------------- +# HTTP Server (simulate remote access) +# + +# ----------------------------------------------------------------------------- +# Generate Repository +# + + +def my_create_package(dirpath: str, filename: str, *, metadata: Dict[str, Any], files: Dict[str, bytes]) -> None: + """ + Create a package using the command line interface. + """ + assert filename.endswith(PKG_EXT) + outfile = os.path.join(dirpath, filename) + + # NOTE: use the command line packaging utility to ensure 1:1 behavior with actual packages. + metadata_copy = metadata.copy() + + with tempfile.TemporaryDirectory() as temp_dir_pkg: + temp_dir_pkg_manifest_toml = os.path.join(temp_dir_pkg, PKG_MANIFEST_FILENAME_TOML) + with open(temp_dir_pkg_manifest_toml, "wb") as fh: + # NOTE: escaping is not supported, this is primitive TOML writing for tests. + data = "".join(( + """# Example\n""", + """schema_version = "{:s}"\n""".format(metadata_copy.pop("schema_version")), + """id = "{:s}"\n""".format(metadata_copy.pop("id")), + """name = "{:s}"\n""".format(metadata_copy.pop("name")), + """tagline = "{:s}"\n""".format(metadata_copy.pop("tagline")), + """version = "{:s}"\n""".format(metadata_copy.pop("version")), + """type = "{:s}"\n""".format(metadata_copy.pop("type")), + """tags = [{:s}]\n""".format(", ".join("\"{:s}\"".format(v) for v in metadata_copy.pop("tags"))), + """blender_version_min = "{:s}"\n""".format(metadata_copy.pop("blender_version_min")), + """maintainer = "{:s}"\n""".format(metadata_copy.pop("maintainer")), + """license = [{:s}]\n""".format(", ".join("\"{:s}\"".format(v) for v in metadata_copy.pop("license"))), + )).encode('utf-8') + fh.write(data) + + if metadata_copy: + raise Exception("Unexpected mata-data: {!r}".format(metadata_copy)) + + for filename_iter, data in files.items(): + with open(os.path.join(temp_dir_pkg, filename_iter), "wb") as fh: + fh.write(data) + + output_json = command_output_from_json_0( + [ + "build", + "--source-dir", temp_dir_pkg, + "--output-filepath", outfile, + ], + exclude_types={"PROGRESS"}, + ) + + output_json_error = command_output_filter_exclude( + output_json, + exclude_types=STATUS_NON_ERROR, + ) + + if output_json_error: + raise Exception("Creating a package produced some error output: {!r}".format(output_json_error)) + + +class PkgTemplate(NamedTuple): + """Data need to create a package for testing.""" + idname: str + name: str + version: str + + +def my_generate_repo( + dirpath: str, + *, + templates: Sequence[PkgTemplate], +) -> None: + for template in templates: + my_create_package( + dirpath, template.idname + PKG_EXT, + metadata={ + "schema_version": "1.0.0", + "id": template.idname, + "name": template.name, + "tagline": """This package has a tagline""", + "version": template.version, + "type": "add-on", + "tags": ["UV", "Modeling"], + "blender_version_min": "0.0.0", + "maintainer": "Some Developer", + "license": ["SPDX:GPL-2.0-or-later"], + }, + files={ + "__init__.py": b"# This is a script\n", + }, + ) + + +def command_output_filter_include( + output_json: Sequence[Tuple[str, Any]], + include_types: Set[str], +) -> Sequence[Tuple[str, Any]]: + return [(a, b) for a, b in output_json if a in include_types] + + +def command_output_filter_exclude( + output_json: Sequence[Tuple[str, Any]], + exclude_types: Set[str], +) -> Sequence[Tuple[str, Any]]: + return [(a, b) for a, b in output_json if a not in exclude_types] + + +def command_output( + args: Sequence[str], + expected_returncode: int = 0, +) -> str: + proc = subprocess.run( + [*CMD, *args], + stdout=subprocess.PIPE, + check=expected_returncode == 0, + ) + if proc.returncode != expected_returncode: + raise subprocess.CalledProcessError(proc.returncode, proc.args, output=proc.stdout, stderr=proc.stderr) + result = proc.stdout.decode("utf-8") + if IS_WIN32: + result = result.replace("\r\n", "\n") + return result + + +def command_output_from_json_0( + args: Sequence[str], + *, + exclude_types: Optional[Set[str]] = None, + expected_returncode: int = 0, +) -> Sequence[Tuple[str, Any]]: + result = [] + + proc = subprocess.run( + [*CMD, *args, "--output-type=JSON_0"], + stdout=subprocess.PIPE, + check=expected_returncode == 0, + ) + if proc.returncode != expected_returncode: + raise subprocess.CalledProcessError(proc.returncode, proc.args, output=proc.stdout, stderr=proc.stderr) + for json_bytes in proc.stdout.split(b'\0'): + if not json_bytes: + continue + json_str = json_bytes.decode("utf-8") + json_data = json.loads(json_str) + assert len(json_data) == 2 + assert isinstance(json_data[0], str) + if (exclude_types is not None) and (json_data[0] in exclude_types): + continue + result.append((json_data[0], json_data[1])) + + return result + + +class TestCLI(unittest.TestCase): + + def test_version(self) -> None: + self.assertEqual(command_output(["--version"]), "0.1\n") + + +class TestCLI_WithRepo(unittest.TestCase): + dirpath = "" + dirpath_url = "" + + @classmethod + def setUpClass(cls) -> None: + if TEMP_DIR_REMOTE: + cls.dirpath = TEMP_DIR_REMOTE + if os.path.isdir(cls.dirpath): + # pylint: disable-next=using-constant-test + if False: + shutil.rmtree(cls.dirpath) + os.makedirs(TEMP_DIR_REMOTE) + else: + # Empty the path without removing it, + # handy so a developer can remain in the directory. + rmdir_contents(TEMP_DIR_REMOTE) + else: + os.makedirs(TEMP_DIR_REMOTE) + else: + cls.dirpath = tempfile.mkdtemp(prefix="bl_ext_") + + my_generate_repo( + cls.dirpath, + templates=( + PkgTemplate(idname="foo_bar", name="Foo Bar", version="1.0.5"), + PkgTemplate(idname="another_package", name="Another Package", version="1.5.2"), + PkgTemplate(idname="test_package", name="Test Package", version="1.5.2"), + ), + ) + + if USE_HTTP: + if REMOTE_REPO_HAS_JSON_IMPLIED: + cls.dirpath_url = "http://localhost:{:d}/bl_ext_repo.json".format(HTTP_PORT) + else: + cls.dirpath_url = "http://localhost:{:d}".format(HTTP_PORT) + else: + cls.dirpath_url = cls.dirpath + + @classmethod + def tearDownClass(cls) -> None: + if not TEMP_DIR_REMOTE: + shutil.rmtree(cls.dirpath) + del cls.dirpath + del cls.dirpath_url + + def test_version(self) -> None: + self.assertEqual(command_output(["--version"]), "0.1\n") + + def test_server_generate(self) -> None: + output = command_output(["server-generate", "--repo-dir", self.dirpath]) + self.assertEqual(output, "found 3 packages.\n") + + def test_client_list(self) -> None: + # TODO: only run once. + self.test_server_generate() + + output = command_output(["list", "--remote-url", self.dirpath_url, "--local-dir", ""]) + self.assertEqual( + output, ( + "another_package(1.5.2): Another Package\n" + "foo_bar(1.0.5): Foo Bar\n" + "test_package(1.5.2): Test Package\n" + ) + ) + del output + + # TODO, figure out how to split JSON & TEXT output tests, this test just checks JSON is working at all. + output_json = command_output_from_json_0( + ["list", "--remote-url", self.dirpath_url, "--local-dir", ""], + exclude_types={"PROGRESS"}, + ) + self.assertEqual( + output_json, [ + ("STATUS", "another_package(1.5.2): Another Package"), + ("STATUS", "foo_bar(1.0.5): Foo Bar"), + ("STATUS", "test_package(1.5.2): Test Package"), + ] + ) + + def test_client_install_and_uninstall(self) -> None: + with tempfile.TemporaryDirectory(dir=TEMP_DIR_LOCAL) as temp_dir_local: + # TODO: only run once. + self.test_server_generate() + + output_json = command_output_from_json_0([ + "sync", + "--remote-url", self.dirpath_url, + "--local-dir", temp_dir_local, + ], exclude_types={"PROGRESS"}) + self.assertEqual( + output_json, [ + ('STATUS', 'Sync repo: ' + self.dirpath_url), + ('STATUS', 'Sync downloading remote data'), + ('STATUS', 'Sync complete: ' + self.dirpath_url), + ] + ) + + # Install. + output_json = command_output_from_json_0( + [ + "install", "another_package", + "--remote-url", self.dirpath_url, + "--local-dir", temp_dir_local, + ], + exclude_types={"PROGRESS"}, + ) + self.assertEqual( + output_json, [ + ("STATUS", "Installed \"another_package\"") + ] + ) + self.assertTrue(os.path.isdir(os.path.join(temp_dir_local, "another_package"))) + + # Re-Install. + output_json = command_output_from_json_0( + [ + "install", "another_package", + "--remote-url", self.dirpath_url, + "--local-dir", temp_dir_local, + ], + exclude_types={"PROGRESS"}, + ) + self.assertEqual( + output_json, [ + ("STATUS", "Re-Installed \"another_package\"") + ] + ) + self.assertTrue(os.path.isdir(os.path.join(temp_dir_local, "another_package"))) + + # Uninstall (not found). + output_json = command_output_from_json_0( + [ + "uninstall", "another_package_", + "--local-dir", temp_dir_local, + ], + expected_returncode=1, + ) + self.assertEqual( + output_json, [ + ("ERROR", "Package not found \"another_package_\"") + ] + ) + + # Uninstall. + output_json = command_output_from_json_0([ + "uninstall", "another_package", + "--local-dir", temp_dir_local, + ]) + self.assertEqual( + output_json, [ + ("STATUS", "Removed \"another_package\"") + ] + ) + self.assertFalse(os.path.isdir(os.path.join(temp_dir_local, "another_package"))) + + +if __name__ == "__main__": + if USE_HTTP: + with HTTPServerContext(directory=TEMP_DIR_REMOTE, port=HTTP_PORT): + unittest.main() + else: + unittest.main() diff --git a/scripts/addons_core/bl_pkg/tests/test_cli_blender.py b/scripts/addons_core/bl_pkg/tests/test_cli_blender.py new file mode 100644 index 00000000000..06c3be5f310 --- /dev/null +++ b/scripts/addons_core/bl_pkg/tests/test_cli_blender.py @@ -0,0 +1,515 @@ +# SPDX-FileCopyrightText: 2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +This test emulates running packaging commands with Blender via the command line. + +This also happens to test packages with ``*.whl``. + +Command to run this test: + make test_cli_blender BLENDER_BIN=$PWD/../../../blender.bin +""" + +import os +import shlex +import shutil +import subprocess +import sys +import tempfile +import time +import unittest + +from typing import ( + Dict, + Sequence, + Tuple, +) + + +PKG_MANIFEST_FILENAME_TOML = "blender_manifest.toml" + +VERBOSE_CMD = False + + +BLENDER_BIN = os.environ.get("BLENDER_BIN") +if BLENDER_BIN is None: + raise Exception("BLENDER_BIN: environment variable not defined") + + +# Arguments to ensure extensions are enabled (currently it's an experimental feature). +BLENDER_ENABLE_EXTENSION_ARGS = [ + "--python-exit-code", "1", + # Code begin/end text because of Blender's chatty reporting of version and that it quit. + "--python-expr", '''\ +from bpy import context +context.preferences.view.show_developer_ui = True +context.preferences.experimental.use_extension_repos = True +''', +] + +BASE_DIR = os.path.abspath(os.path.dirname(__file__)) +sys.path.append(os.path.join(BASE_DIR, "modules")) +import python_wheel_generate # noqa: E402 + + +CMD = ( + sys.executable, + os.path.normpath(os.path.join(BASE_DIR, "..", "cli", "blender_ext.py")), +) + +# Write the command to a script, use so it's possible to manually run commands outside of the test environment. +TEMP_COMMAND_OUTPUT = "" # os.path.join(tempfile.gettempdir(), "blender_test.sh") + +# Handy when developing test so the paths can be manually inspected. +USE_PAUSE_BEFORE_EXIT = False + + +# ----------------------------------------------------------------------------- +# Utility Functions + +def pause_until_keyboard_interrupt() -> None: + print("Waiting for keyboard interrupt...") + try: + time.sleep(10_000) + except KeyboardInterrupt: + pass + print("Exiting!") + + +def contents_to_filesystem( + contents: Dict[str, bytes], + directory: str, +) -> None: + swap_slash = os.sep == "\\" + for key, value in contents.items(): + path = key.replace("/", "\\") if swap_slash else key + path_full = os.path.join(directory, path) + path_base = os.path.dirname(path_full) + os.makedirs(path_base, exist_ok=True) + + with ( + open(path_full, "wb") if isinstance(value, bytes) else + open(path_full, "w", encoding="utf-8") + ) as fh: + fh.write(value) + + +def create_package( + pkg_src_dir: str, + pkg_idname: str, + wheel_module_name: str, + wheel_module_version: str, +) -> None: + pkg_name = pkg_idname.replace("_", " ").title() + + wheel_filename, wheel_filedata = python_wheel_generate.generate_from_source( + module_name=wheel_module_name, + version=wheel_module_version, + source=( + "__version__ = {!r}\n" + "print(\"The wheel has been found\")\n" + ).format(wheel_module_version), + ) + + wheel_dir = os.path.join(pkg_src_dir, "wheels") + os.makedirs(wheel_dir, exist_ok=True) + path = os.path.join(wheel_dir, wheel_filename) + with open(path, "wb") as fh: + fh.write(wheel_filedata) + + with open(os.path.join(pkg_src_dir, PKG_MANIFEST_FILENAME_TOML), "w", encoding="utf-8") as fh: + fh.write('''# Example\n''') + fh.write('''schema_version = "1.0.0"\n''') + fh.write('''id = "{:s}"\n'''.format(pkg_idname)) + fh.write('''name = "{:s}"\n'''.format(pkg_name)) + fh.write('''type = "add-on"\n''') + fh.write('''tags = ["UV"]\n''') + fh.write('''maintainer = "Maintainer Name "\n''') + fh.write('''license = ["SPDX:GPL-2.0-or-later"]\n''') + fh.write('''version = "1.0.0"\n''') + fh.write('''tagline = "This is a tagline"\n''') + fh.write('''blender_version_min = "0.0.0"\n''') + fh.write('''\n''') + fh.write('''wheels = ["./wheels/{:s}"]\n'''.format(wheel_filename)) + + with open(os.path.join(pkg_src_dir, "__init__.py"), "w", encoding="utf-8") as fh: + fh.write(( + '''import {:s}\n''' + '''def register():\n''' + ''' print("Register success:", __name__)\n''' + '''\n''' + '''def unregister():\n''' + ''' print("Unregister success:", __name__)\n''' + ).format(wheel_module_name)) + + +def run_blender( + args: Sequence[str], + force_script_and_pause: bool = False, +) -> Tuple[int, str, str]: + """ + :arg force_script_and_pause: + When true, write out a shell script and wait, + this lets the developer run the command manually which is useful as the temporary directories + are removed once the test finished. + """ + assert BLENDER_BIN is not None + cmd: Tuple[str, ...] = ( + BLENDER_BIN, + # Needed while extensions is experimental. + *BLENDER_ENABLE_EXTENSION_ARGS, + *args, + ) + cwd = TEMP_DIR_LOCAL + + if VERBOSE_CMD: + print(shlex.join(cmd)) + + env_overlay = { + "TMPDIR": TEMP_DIR_TMPDIR, + "BLENDER_USER_RESOURCES": TEMP_DIR_BLENDER_USER, + # Needed for ASAN builds. + "ASAN_OPTIONS": "log_path={:s}:exitcode=0:{:s}".format( + # Needed so the `stdout` & `stderr` aren't mixed in with ASAN messages. + os.path.join(TEMP_DIR_TMPDIR, "blender_asan.txt"), + # Support using existing configuration (if set). + os.environ.get("ASAN_OPTIONS", ""), + ), + } + + if force_script_and_pause: + temp_command_output = os.path.join(tempfile.gettempdir(), "blender_test.sh") + else: + temp_command_output = TEMP_COMMAND_OUTPUT + + if temp_command_output: + with open(temp_command_output, "w", encoding="utf-8") as fh: + fh.write("#!/usr/bin/env bash\n") + for k, v in env_overlay.items(): + fh.write("export {:s}={:s}\n".format(k, shlex.quote(v))) + fh.write("\n") + + fh.write("cd {:s}\n\n".format(shlex.quote(cwd))) + + for i, v in enumerate(cmd): + if i != 0: + fh.write(" ") + fh.write(shlex.quote(v)) + if i + 1 != len(cmd): + fh.write(" \\\n") + fh.write("\n\n") + + if force_script_and_pause: + print("Written:", temp_command_output) + time.sleep(10_000) + + output = subprocess.run( + cmd, + cwd=cwd, + env={ + **os.environ, + **env_overlay, + }, + stderr=subprocess.PIPE, + stdout=subprocess.PIPE, + ) + stdout = output.stdout.decode("utf-8") + stderr = output.stderr.decode("utf-8") + + if VERBOSE_CMD: + print(stdout) + print(stderr) + + return ( + output.returncode, + stdout, + stderr, + ) + + +def run_blender_no_errors( + args: Sequence[str], + force_script_and_pause: bool = False, +) -> str: + returncode, stdout, stderr = run_blender(args, force_script_and_pause=force_script_and_pause) + if returncode != 0: + if stdout: + sys.stdout.write("STDOUT:\n") + sys.stdout.write(stdout + "\n") + if stderr: + sys.stdout.write("STDERR:\n") + sys.stdout.write(stderr + "\n") + raise Exception("Expected zero returncode, got {:d}".format(returncode)) + if stderr: + raise Exception("Expected empty stderr, got {:s}".format(stderr)) + return stdout + + +def run_blender_extensions( + args: Sequence[str], + force_script_and_pause: bool = False, +) -> Tuple[int, str, str]: + return run_blender(("--command", "extension", *args,), force_script_and_pause=force_script_and_pause) + + +def run_blender_extensions_no_errors( + args: Sequence[str], + force_script_and_pause: bool = False, +) -> str: + return run_blender_no_errors(("--command", "extension", *args,), force_script_and_pause=force_script_and_pause) + + +# Initialized from `main()`. +TEMP_DIR_BLENDER_USER = "" +TEMP_DIR_REMOTE = "" +TEMP_DIR_LOCAL = "" +# Don't leave temporary files in TMP: `/tmp` (since it's only cleared on restart). +# Instead, have a test-local temporary directly which is removed when the test finishes. +TEMP_DIR_TMPDIR = "" + +user_dirs: Tuple[str, ...] = ( + "config", + "datafiles", + "extensions", + "scripts", +) + + +class TestWithTempBlenderUser_MixIn(unittest.TestCase): + + @classmethod + def setUpClass(cls) -> None: + for dirname in user_dirs: + os.makedirs(os.path.join(TEMP_DIR_BLENDER_USER, dirname), exist_ok=True) + + @classmethod + def tearDownClass(cls) -> None: + for dirname in user_dirs: + shutil.rmtree(os.path.join(TEMP_DIR_BLENDER_USER, dirname)) + + +class TestSimple(TestWithTempBlenderUser_MixIn, unittest.TestCase): + + # Internal utilities. + def _build_package( + self, + *, + pkg_idname: str, + wheel_module_name: str, + wheel_module_version: str, + ) -> None: + pkg_output_filepath = os.path.join(TEMP_DIR_REMOTE, pkg_idname + ".zip") + with tempfile.TemporaryDirectory() as package_build_dir: + create_package( + package_build_dir, + pkg_idname=pkg_idname, + wheel_module_name=wheel_module_name, + wheel_module_version=wheel_module_version, + ) + stdout = run_blender_extensions_no_errors(( + "build", + "--source-dir", package_build_dir, + "--output-filepath", pkg_output_filepath, + )) + self.assertEqual( + stdout, + ( + "Building {:s}.zip\n" + "complete\n" + "created \"{:s}\", {:d}\n" + ).format(pkg_idname, pkg_output_filepath, os.path.getsize(pkg_output_filepath)), + ) + + def test_simple_package(self) -> None: + """ + Create a simple package and install it. + """ + + repo_id = "test_repo_module_name" + + stdout = run_blender_extensions_no_errors(( + "repo-add", + "--name", "MyTestRepo", + "--directory", TEMP_DIR_LOCAL, + "--url", TEMP_DIR_REMOTE, + # A bit odd, this argument avoids running so many commands to setup a test. + "--clear-all", + repo_id, + )) + self.assertEqual(stdout, "Info: Preferences saved\n") + + wheel_module_name = "my_custom_wheel" + + # Create a package contents. + pkg_idname = "my_test_pkg" + self._build_package( + pkg_idname=pkg_idname, + wheel_module_name=wheel_module_name, + wheel_module_version="1.0.1", + ) + + # Generate the repository. + stdout = run_blender_extensions_no_errors(( + "server-generate", + "--repo-dir", TEMP_DIR_REMOTE, + )) + self.assertEqual(stdout, "found 1 packages.\n") + + stdout = run_blender_extensions_no_errors(( + "sync", + )) + self.assertEqual(stdout.rstrip("\n").split("\n")[-1], "STATUS Sync complete: {:s}".format(TEMP_DIR_REMOTE)) + + # Install the package into Blender. + + stdout = run_blender_extensions_no_errors(("repo-list",)) + self.assertEqual( + stdout, + ( + '''test_repo_module_name:\n''' + ''' name: "MyTestRepo"\n''' + ''' directory: "{:s}"\n''' + ''' url: "{:s}"\n''' + ).format(TEMP_DIR_LOCAL, TEMP_DIR_REMOTE)) + + stdout = run_blender_extensions_no_errors(("list",)) + self.assertEqual( + stdout, + ( + '''Repository: "MyTestRepo" (id=test_repo_module_name)\n''' + ''' my_test_pkg: "My Test Pkg", This is a tagline\n''' + ) + ) + + stdout = run_blender_extensions_no_errors(("install", pkg_idname, "--enable")) + self.assertEqual( + [line for line in stdout.split("\n") if line.startswith("STATUS ")][0], + "STATUS Installed \"my_test_pkg\"" + ) + + # TODO: validate the installation works - that the package does something non-trivial when Blender starts. + + stdout = run_blender_extensions_no_errors(("remove", pkg_idname)) + self.assertEqual( + [line for line in stdout.split("\n") if line.startswith("STATUS ")][0], + "STATUS Removed \"my_test_pkg\"" + ) + + returncode, _, _ = run_blender(( + "-b", + "--python-expr", + # Return an `exitcode` of 64 if the module exists. + # The module should not exist (and return a zero error code). + ( + '''import sys\n''' + '''try:\n''' + ''' import {:s}\n''' + ''' code = 32\n''' + '''except ModuleNotFoundError:\n''' + ''' code = 64\n''' + '''sys.exit(code)\n''' + ).format(wheel_module_name) + )) + self.assertEqual(returncode, 64) + + # Ensure packages that including conflicting dependencies use the newest wheel. + packages_to_install = ["my_test_pkg"] + # This is the maximum wheel version. + packages_wheel_version_max = "4.0.1" + # Create a package contents (with a different wheel version). + for pkg_idname, wheel_module_version in ( + ("my_test_pkg_a", "2.0.1"), + ("my_test_pkg_b", packages_wheel_version_max), + ("my_test_pkg_c", "3.0.1"), + ): + packages_to_install.append(pkg_idname) + self._build_package( + pkg_idname=pkg_idname, + wheel_module_name=wheel_module_name, + wheel_module_version=wheel_module_version, + ) + + # Generate the repository. + stdout = run_blender_extensions_no_errors(( + "server-generate", + "--repo-dir", TEMP_DIR_REMOTE, + )) + self.assertEqual(stdout, "found 4 packages.\n") + + stdout = run_blender_extensions_no_errors(( + "sync", + )) + self.assertEqual(stdout.rstrip("\n").split("\n")[-1], "STATUS Sync complete: {:s}".format(TEMP_DIR_REMOTE)) + + # Install. + + stdout = run_blender_extensions_no_errors(("install", ",".join(packages_to_install), "--enable")) + self.assertEqual( + tuple([line for line in stdout.split("\n") if line.startswith("STATUS ")]), + ( + '''STATUS Installed "my_test_pkg"''', + '''STATUS Installed "my_test_pkg_a"''', + '''STATUS Installed "my_test_pkg_b"''', + '''STATUS Installed "my_test_pkg_c"''', + ) + ) + + returncode, stdout, stderr = run_blender(( + "-b", + "--python-expr", + # Return an `exitcode` of 64 if the module exists. + # The module should not exist (and return a zero error code). + ( + '''import sys\n''' + '''try:\n''' + ''' import {:s}\n''' + ''' found = True\n''' + '''except ModuleNotFoundError:\n''' + ''' found = False\n''' + '''if found:\n''' + ''' if {:s}.__version__ == "{:s}":\n''' + ''' sys.exit(64) # Success!\n''' + ''' else:\n''' + ''' sys.exit(32)\n''' + '''else:\n''' + ''' sys.exit(16)\n''' + ).format(wheel_module_name, wheel_module_name, packages_wheel_version_max), + )) + + self.assertEqual(returncode, 64) + + if USE_PAUSE_BEFORE_EXIT: + print(TEMP_DIR_REMOTE) + print(TEMP_DIR_BLENDER_USER) + pause_until_keyboard_interrupt() + + +def main() -> None: + global TEMP_DIR_BLENDER_USER + global TEMP_DIR_REMOTE + global TEMP_DIR_LOCAL + global TEMP_DIR_TMPDIR + + with tempfile.TemporaryDirectory() as temp_prefix: + TEMP_DIR_BLENDER_USER = os.path.join(temp_prefix, "bl_ext_blender") + TEMP_DIR_REMOTE = os.path.join(temp_prefix, "bl_ext_remote") + TEMP_DIR_LOCAL = os.path.join(temp_prefix, "bl_ext_local") + TEMP_DIR_TMPDIR = os.path.join(temp_prefix, "tmp") + + for directory in ( + TEMP_DIR_BLENDER_USER, + TEMP_DIR_REMOTE, + TEMP_DIR_LOCAL, + TEMP_DIR_TMPDIR, + ): + os.makedirs(directory, exist_ok=True) + + for dirname in user_dirs: + os.makedirs(os.path.join(TEMP_DIR_BLENDER_USER, dirname), exist_ok=True) + + unittest.main() + + +if __name__ == "__main__": + main() diff --git a/scripts/addons_core/bl_pkg/wheel_manager.py b/scripts/addons_core/bl_pkg/wheel_manager.py new file mode 100644 index 00000000000..fe509364a67 --- /dev/null +++ b/scripts/addons_core/bl_pkg/wheel_manager.py @@ -0,0 +1,380 @@ +# SPDX-FileCopyrightText: 2024 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +# Ref: https://peps.python.org/pep-0491/ +# Deferred but seems to include valid info for existing wheels. + +""" +This module takes wheels and applies them to a "managed" destination directory. +""" + +__all__ = ( + "apply_action" +) + +import os +import re +import shutil +import zipfile + +from typing import ( + Dict, + List, + Optional, + Set, + Tuple, +) + +WheelSource = Tuple[ + # Key - doesn't matter what this is... it's just a handle. + str, + # A list of absolute wheel file-paths. + List[str], +] + + +def _read_records_csv(filepath: str) -> List[List[str]]: + import csv + with open(filepath, encoding="utf8", errors="surrogateescape") as fh: + return list(csv.reader(fh.read().splitlines())) + + +def _wheels_from_dir(dirpath: str) -> Tuple[ + # The key is: + # wheel_id + # The values are: + # Top level directories. + Dict[str, List[str]], + # Unknown path.s + List[str], +]: + result: Dict[str, List[str]] = {} + paths_unused: Set[str] = set() + + if not os.path.exists(dirpath): + return result, list(paths_unused) + + for entry in os.scandir(dirpath): + name = entry.name + paths_unused.add(name) + if not entry.is_dir(): + continue + # TODO: is this part of the spec? + name = entry.name + if not name.endswith("-info"): + continue + filepath_record = os.path.join(entry.path, "RECORD") + if not os.path.exists(filepath_record): + continue + + record_rows = _read_records_csv(filepath_record) + + # Build top-level paths. + toplevel_paths_set: Set[str] = set() + for row in record_rows: + if not row: + continue + path_text = row[0] + # Ensure paths separator is compatible. + path_text = path_text.replace("\\", "/") + # Ensure double slashes don't cause issues or "/./" doesn't complicate checking the head of the path. + path_split = [ + elem for elem in path_text.split("/") + if elem not in {"", "."} + ] + if not path_split: + continue + # These wont have been extracted. + if path_split[0] in {"..", name}: + continue + + toplevel_paths_set.add(path_split[0]) + + result[name] = list(sorted(toplevel_paths_set)) + del toplevel_paths_set + + for wheel_name, toplevel_paths in result.items(): + paths_unused.discard(wheel_name) + for name in toplevel_paths: + paths_unused.discard(name) + + paths_unused_list = list(sorted(paths_unused)) + + return result, paths_unused_list + + +def _wheel_info_dir_from_zip(filepath_wheel: str) -> Optional[Tuple[str, List[str]]]: + """ + Return: + - The "*-info" directory name which contains meta-data. + - The top-level path list (excluding ".."). + """ + dir_info = "" + toplevel_paths: Set[str] = set() + + with zipfile.ZipFile(filepath_wheel, mode="r") as zip_fh: + # This file will always exist. + for filepath_rel in zip_fh.namelist(): + path_split = [ + elem for elem in filepath_rel.split("/") + if elem not in {"", "."} + ] + if not path_split: + continue + if path_split[0] == "..": + continue + + if len(path_split) == 2: + if path_split[1].upper() == "RECORD": + if path_split[0].endswith("-info"): + dir_info = path_split[0] + + toplevel_paths.add(path_split[0]) + + if dir_info == "": + return None + toplevel_paths.discard(dir_info) + toplevel_paths_list = list(sorted(toplevel_paths)) + return dir_info, toplevel_paths_list + + +def _rmtree_safe(dir_remove: str, expected_root: str) -> None: + if not dir_remove.startswith(expected_root): + raise Exception("Expected prefix not found") + shutil.rmtree(dir_remove) + + +def _zipfile_extractall_safe( + zip_fh: zipfile.ZipFile, + path: str, + path_restrict: str, +) -> None: + """ + A version of ``ZipFile.extractall`` that wont write to paths outside ``path_restrict``. + + Avoids writing this: + ``zip_fh.extractall(zip_fh, path)`` + """ + sep = os.sep + path_restrict = path_restrict.rstrip(sep) + if sep == "\\": + path_restrict = path_restrict.rstrip("/") + path_restrict_with_slash = path_restrict + sep + + # Strip is probably not needed (only if multiple slashes exist). + path_prefix = path[len(path_restrict_with_slash):].lstrip(sep) + # Switch slashes forward. + if sep == "\\": + path_prefix = path_prefix.replace("\\", "/").rstrip("/") + "/" + else: + path_prefix = path_prefix + "/" + + path_restrict_with_slash = path_restrict + sep + assert len(path) >= len(path_restrict_with_slash) + if not path.startswith(path_restrict_with_slash): + raise Exception("Expected the restricted directory to start with ") + + for member in zip_fh.infolist(): + filename_orig = member.filename + member.filename = path_prefix + filename_orig + # This isn't likely to happen so accept a noisy print here. + # If this ends up happening more often, it could be suppressed. + # (although this hints at bigger problems because we might be excluding necessary files). + if os.path.normpath(member.filename).startswith(".." + sep): + print("Skipping path:", member.filename, "that escapes:", path_restrict) + continue + zip_fh.extract(member, path_restrict) + member.filename = filename_orig + + +WHEEL_VERSION_RE = re.compile(r"(\d+)?(?:\.(\d+))?(?:\.(\d+))") + + +def wheel_version_from_filename_for_cmp( + filename: str, +) -> Tuple[int, int, int, str]: + """ + Extract the version number for comparison. + Note that this only handled the first 3 numbers, + the trailing text is compared as a string which is not technically correct + however this is not a priority to support since scripts should only be including stable releases, + so comparing the first 3 numbers is sufficient. The trailing string is just a tie breaker in the + unlikely event it differs. + + If supporting the full spec, comparing: "1.1.dev6" with "1.1.6rc6" for e.g. + we could support this doesn't seem especially important as extensions should use major releases. + """ + filename_split = filename.split("-") + if len(filename_split) >= 2: + version = filename.split("-")[1] + if (version_match := WHEEL_VERSION_RE.match(version)) is not None: + groups = version_match.groups() + # print(groups) + return ( + int(groups[0]) if groups[0] is not None else 0, + int(groups[1]) if groups[1] is not None else 0, + int(groups[2]) if groups[2] is not None else 0, + version[version_match.end():], + ) + return (0, 0, 0, "") + + +def wheel_list_deduplicate_as_skip_set( + wheel_list: List[WheelSource], +) -> Set[str]: + """ + Return all wheel paths to skip. + """ + wheels_to_skip: Set[str] = set() + all_wheels: Set[str] = { + filepath + for _, wheels in wheel_list + for filepath in wheels + } + + # NOTE: this is not optimized. + # Probably speed is never an issue here, but this could be sped up. + + # Keep a map from the base name to the "best" wheel, + # the other wheels get added to `wheels_to_skip` to be ignored. + all_wheels_by_base: Dict[str, str] = {} + + for wheel in all_wheels: + wheel_filename = os.path.basename(wheel) + wheel_base = wheel_filename.partition("-")[0] + + wheel_exists = all_wheels_by_base.get(wheel_base) + if wheel_exists is None: + all_wheels_by_base[wheel_base] = wheel + continue + + wheel_exists_filename = os.path.basename(wheel_exists) + if wheel_exists_filename == wheel_filename: + # Should never happen because they are converted into a set before looping. + assert wheel_exists != wheel + # The same wheel is used in two different locations, use a tie breaker for predictability + # although the result should be the same. + if wheel_exists_filename < wheel_filename: + all_wheels_by_base[wheel_base] = wheel + wheels_to_skip.add(wheel_exists) + else: + wheels_to_skip.add(wheel) + else: + wheel_version = wheel_version_from_filename_for_cmp(wheel_filename) + wheel_exists_version = wheel_version_from_filename_for_cmp(wheel_exists_filename) + if ( + (wheel_exists_version < wheel_version) or + # Tie breaker for predictability. + ((wheel_exists_version == wheel_version) and (wheel_exists_filename < wheel_filename)) + ): + all_wheels_by_base[wheel_base] = wheel + wheels_to_skip.add(wheel_exists) + else: + wheels_to_skip.add(wheel) + + return wheels_to_skip + + +def apply_action( + *, + local_dir: str, + local_dir_site_packages: str, + wheel_list: List[WheelSource], +) -> None: + """ + :arg local_dir: + The location wheels are stored. + Typically: ``~/.config/blender/4.2/extensions/.local``. + + WARNING: files under this directory may be removed. + :arg local_dir_site_packages: + The path which wheels are extracted into. + Typically: ``~/.config/blender/4.2/extensions/.local/lib/python3.11/site-packages``. + """ + debug = False + + # NOTE: we could avoid scanning the wheel directories however: + # Recursively removing all paths on the users system can be considered relatively risky + # even if this is located in a known location under the users home directory - better avoid. + # So build a list of wheel paths and only remove the unused paths from this list. + wheels_installed, paths_unknown = _wheels_from_dir(local_dir_site_packages) + + # Wheels and their top level directories (which would be installed). + wheels_packages: Dict[str, List[str]] = {} + + # Map the wheel ID to path. + wheels_dir_info_to_filepath_map: Dict[str, str] = {} + + # NOTE(@ideasman42): the wheels skip-set only de-duplicates at the level of the base-name of the wheels filename. + # So the wheel file-paths: + # - `pip-24.0-py3-none-any.whl` + # - `pip-22.1-py2-none-any.whl` + # Will both extract the *base* name `pip`, de-duplicating by skipping the wheels with an older version number. + # This is not fool-proof, because it is possible files inside the `.whl` conflict upon extraction. + # In practice I consider this fairly unlikely because: + # - Practically all wheels extract to their top-level module names. + # - Modules are mainly downloaded from the Python package index. + # + # Having two modules conflict is possible but this is an issue outside of Blender, + # as it's most likely quite rare and generally avoided with unique module names, + # this is not considered a problem to "solve" at the moment. + # + # The one exception to this assumption is any extensions that bundle `.whl` files that aren't + # available on the Python package index. In this case naming collisions are more likely. + # This probably needs to be handled on a policy level - if the `.whl` author also maintains + # the extension they can in all likelihood make the module a sub-module of the extension + # without the need to use `.whl` files. + wheels_to_skip = wheel_list_deduplicate_as_skip_set(wheel_list) + + for key, wheels in wheel_list: + for wheel in wheels: + if wheel in wheels_to_skip: + continue + if (wheel_info := _wheel_info_dir_from_zip(wheel)) is None: + continue + dir_info, toplevel_paths_list = wheel_info + wheels_packages[dir_info] = toplevel_paths_list + + wheels_dir_info_to_filepath_map[dir_info] = wheel + + # Now there is two sets of packages, the ones we need and the ones we have. + + # ----- + # Clear + + # First remove installed packages no longer needed: + for dir_info, toplevel_paths_list in wheels_installed.items(): + if dir_info in wheels_packages: + continue + + # Remove installed packages which aren't needed any longer. + for filepath_rel in (dir_info, *toplevel_paths_list): + filepath_abs = os.path.join(local_dir_site_packages, filepath_rel) + if not os.path.exists(filepath_abs): + continue + + if debug: + print("removing wheel:", filepath_rel) + + if os.path.isdir(filepath_abs): + _rmtree_safe(filepath_abs, local_dir) + else: + os.remove(filepath_abs) + + # ----- + # Setup + + # Install packages that need to be installed: + for dir_info, toplevel_paths_list in wheels_packages.items(): + if dir_info in wheels_installed: + continue + + if debug: + for filepath_rel in toplevel_paths_list: + print("adding wheel:", filepath_rel) + filepath = wheels_dir_info_to_filepath_map[dir_info] + # `ZipFile.extractall` is needed because some wheels contain paths that point to parent directories. + # Handle this *safely* by allowing extracting to parent directories but limit this to the `local_dir`. + with zipfile.ZipFile(filepath, mode="r") as zip_fh: + _zipfile_extractall_safe(zip_fh, local_dir_site_packages, local_dir) diff --git a/scripts/addons_core/copy_global_transform.py b/scripts/addons_core/copy_global_transform.py new file mode 100644 index 00000000000..0f36d064940 --- /dev/null +++ b/scripts/addons_core/copy_global_transform.py @@ -0,0 +1,1100 @@ +# SPDX-FileCopyrightText: 2021-2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +Copy Global Transform + +Simple add-on for copying world-space transforms. + +It's called "global" to avoid confusion with the Blender World data-block. +""" + +bl_info = { + "name": "Copy Global Transform", + "author": "Sybren A. Stüvel", + "version": (3, 0), + "blender": (4, 2, 0), + "location": "N-panel in the 3D Viewport", + "category": "Animation", + "support": 'OFFICIAL', + "doc_url": "{BLENDER_MANUAL_URL}/addons/animation/copy_global_transform.html", + "tracker_url": "https://projects.blender.org/blender/blender-addons/issues", +} + +import ast +import abc +import contextlib +from typing import Iterable, Optional, Union, Any, TypeAlias, Iterator + +import bpy +from bpy.types import Context, Object, Operator, Panel, PoseBone, UILayout, FCurve, Camera, FModifierStepped +from mathutils import Matrix + + +_axis_enum_items = [ + ("x", "X", "", 1), + ("y", "Y", "", 2), + ("z", "Z", "", 3), +] + + +class AutoKeying: + """Auto-keying support. + + Based on Rigify code by Alexander Gavrilov. + """ + + # Use AutoKeying.keytype() or Authkeying.options() context to change those. + _keytype = 'KEYFRAME' + _force_autokey = False # Allow use without the user activating auto-keying. + _use_loc = True + _use_rot = True + _use_scale = True + + @classmethod + @contextlib.contextmanager + def keytype(cls, the_keytype: str) -> Iterator[None]: + """Context manager to set the key type that's inserted.""" + default_keytype = cls._keytype + try: + cls._keytype = the_keytype + yield + finally: + cls._keytype = default_keytype + + @classmethod + @contextlib.contextmanager + def options(cls, *, keytype="", use_loc=True, use_rot=True, use_scale=True, force_autokey=False) -> Iterator[None]: + """Context manager to set various options.""" + default_keytype = cls._keytype + default_use_loc = cls._use_loc + default_use_rot = cls._use_rot + default_use_scale = cls._use_scale + default_force_autokey = cls._force_autokey + try: + cls._keytype = keytype + cls._use_loc = use_loc + cls._use_rot = use_rot + cls._use_scale = use_scale + cls._force_autokey = force_autokey + yield + finally: + cls._keytype = default_keytype + cls._use_loc = default_use_loc + cls._use_rot = default_use_rot + cls._use_scale = default_use_scale + cls._force_autokey = default_force_autokey + + @classmethod + def keying_options(cls, context: Context) -> set[str]: + """Retrieve the general keyframing options from user preferences.""" + + prefs = context.preferences + ts = context.scene.tool_settings + options = set() + + if prefs.edit.use_visual_keying: + options.add('INSERTKEY_VISUAL') + if prefs.edit.use_keyframe_insert_needed: + options.add('INSERTKEY_NEEDED') + if ts.use_keyframe_cycle_aware: + options.add('INSERTKEY_CYCLE_AWARE') + return options + + @classmethod + def autokeying_options(cls, context: Context) -> Optional[set[str]]: + """Retrieve the Auto Keyframe options, or None if disabled.""" + + ts = context.scene.tool_settings + + if not (cls._force_autokey or ts.use_keyframe_insert_auto): + return None + + if ts.use_keyframe_insert_keyingset: + # No support for keying sets (yet). + return None + + prefs = context.preferences + options = cls.keying_options(context) + + if prefs.edit.use_keyframe_insert_available: + options.add('INSERTKEY_AVAILABLE') + if ts.auto_keying_mode == 'REPLACE_KEYS': + options.add('INSERTKEY_REPLACE') + return options + + @staticmethod + def get_4d_rotlock(bone: PoseBone) -> Iterable[bool]: + "Retrieve the lock status for 4D rotation." + if bone.lock_rotations_4d: + return [bone.lock_rotation_w, *bone.lock_rotation] + else: + return [all(bone.lock_rotation)] * 4 + + @classmethod + def keyframe_channels( + cls, + target: Union[Object, PoseBone], + options: set[str], + data_path: str, + group: str, + locks: Iterable[bool], + ) -> None: + if all(locks): + return + + if not any(locks): + target.keyframe_insert(data_path, group=group, options=options, keytype=cls._keytype) + return + + for index, lock in enumerate(locks): + if lock: + continue + target.keyframe_insert(data_path, index=index, group=group, options=options, keytype=cls._keytype) + + @classmethod + def key_transformation( + cls, + target: Union[Object, PoseBone], + options: set[str], + ) -> None: + """Keyframe transformation properties, avoiding keying locked channels.""" + + is_bone = isinstance(target, PoseBone) + if is_bone: + group = target.name + else: + group = "Object Transforms" + + def keyframe(data_path: str, locks: Iterable[bool]) -> None: + try: + cls.keyframe_channels(target, options, data_path, group, locks) + except RuntimeError: + # These are expected when "Insert Available" is turned on, and + # these curves are not available. + pass + + if cls._use_loc and not (is_bone and target.bone.use_connect): + keyframe("location", target.lock_location) + + if cls._use_rot: + if target.rotation_mode == 'QUATERNION': + keyframe("rotation_quaternion", cls.get_4d_rotlock(target)) + elif target.rotation_mode == 'AXIS_ANGLE': + keyframe("rotation_axis_angle", cls.get_4d_rotlock(target)) + else: + keyframe("rotation_euler", target.lock_rotation) + + if cls._use_scale: + keyframe("scale", target.lock_scale) + + @classmethod + def autokey_transformation(cls, context: Context, target: Union[Object, PoseBone]) -> None: + """Auto-key transformation properties.""" + + options = cls.autokeying_options(context) + if options is None: + return + cls.key_transformation(target, options) + + +def get_matrix(context: Context) -> Matrix: + bone = context.active_pose_bone + if bone: + # Convert matrix to world space + arm = context.active_object + mat = arm.matrix_world @ bone.matrix + else: + mat = context.active_object.matrix_world + + return mat + + +def set_matrix(context: Context, mat: Matrix) -> None: + bone = context.active_pose_bone + if bone: + # Convert matrix to local space + arm_eval = context.active_object.evaluated_get(context.view_layer.depsgraph) + bone.matrix = arm_eval.matrix_world.inverted() @ mat + AutoKeying.autokey_transformation(context, bone) + else: + context.active_object.matrix_world = mat + AutoKeying.autokey_transformation(context, context.active_object) + + +def _selected_keyframes(context: Context) -> list[float]: + """Return the list of frame numbers that have a selected key. + + Only keys on the active bone/object are considered. + """ + bone = context.active_pose_bone + if bone: + return _selected_keyframes_for_bone(context.active_object, bone) + return _selected_keyframes_for_object(context.active_object) + + +def _selected_keyframes_for_bone(object: Object, bone: PoseBone) -> list[float]: + """Return the list of frame numbers that have a selected key. + + Only keys on the given pose bone are considered. + """ + name = bpy.utils.escape_identifier(bone.name) + return _selected_keyframes_in_action(object, f'pose.bones["{name}"].') + + +def _selected_keyframes_for_object(object: Object) -> list[float]: + """Return the list of frame numbers that have a selected key. + + Only keys on the given object are considered. + """ + return _selected_keyframes_in_action(object, "") + + +def _selected_keyframes_in_action(object: Object, rna_path_prefix: str) -> list[float]: + """Return the list of frame numbers that have a selected key. + + Only keys on the given object's Action on FCurves starting with rna_path_prefix are considered. + """ + + action = object.animation_data and object.animation_data.action + if action is None: + return [] + + keyframes = set() + for fcurve in action.fcurves: + if not fcurve.data_path.startswith(rna_path_prefix): + continue + + for kp in fcurve.keyframe_points: + if not kp.select_control_point: + continue + keyframes.add(kp.co.x) + return sorted(keyframes) + + +def _copy_matrix_to_clipboard(window_manager: bpy.types.WindowManager, matrix: Matrix) -> None: + rows = [f" {tuple(row)!r}," for row in matrix] + as_string = "\n".join(rows) + window_manager.clipboard = f"Matrix((\n{as_string}\n))" + + +class OBJECT_OT_copy_global_transform(Operator): + bl_idname = "object.copy_global_transform" + bl_label = "Copy Global Transform" + bl_description = ( + "Copies the matrix of the currently active object or pose bone to the clipboard. Uses world-space matrices" + ) + # This operator cannot be un-done because it manipulates data outside Blender. + bl_options = {'REGISTER'} + + @classmethod + def poll(cls, context: Context) -> bool: + return bool(context.active_pose_bone) or bool(context.active_object) + + def execute(self, context: Context) -> set[str]: + mat = get_matrix(context) + _copy_matrix_to_clipboard(context.window_manager, mat) + return {'FINISHED'} + + +def _get_relative_ob(context: Context) -> Optional[Object]: + """Get the 'relative' object. + + This is the object that's configured, or if that's empty, the active scene camera. + """ + rel_ob = context.scene.addon_copy_global_transform_relative_ob + return rel_ob or context.scene.camera + + +class OBJECT_OT_copy_relative_transform(Operator): + bl_idname = "object.copy_relative_transform" + bl_label = "Copy Relative Transform" + bl_description = "Copies the matrix of the currently active object or pose bone to the clipboard. " \ + "Uses matrices relative to a specific object or the active scene camera" + # This operator cannot be un-done because it manipulates data outside Blender. + bl_options = {'REGISTER'} + + @classmethod + def poll(cls, context: Context) -> bool: + rel_ob = _get_relative_ob(context) + if not rel_ob: + return False + return bool(context.active_pose_bone) or bool(context.active_object) + + def execute(self, context: Context) -> set[str]: + rel_ob = _get_relative_ob(context) + mat = rel_ob.matrix_world.inverted() @ get_matrix(context) + _copy_matrix_to_clipboard(context.window_manager, mat) + return {'FINISHED'} + + +class UnableToMirrorError(Exception): + """Raised when mirroring is enabled but no mirror object/bone is set.""" + + +class OBJECT_OT_paste_transform(Operator): + bl_idname = "object.paste_transform" + bl_label = "Paste Global Transform" + bl_description = ( + "Pastes the matrix from the clipboard to the currently active pose bone or object. Uses world-space matrices" + ) + bl_options = {'REGISTER', 'UNDO'} + + _method_items = [ + ( + 'CURRENT', + "Current Transform", + "Paste onto the current values only, only manipulating the animation data if auto-keying is enabled", + ), + ( + 'EXISTING_KEYS', + "Selected Keys", + "Paste onto frames that have a selected key, potentially creating new keys on those frames", + ), + ( + 'BAKE', + "Bake on Key Range", + "Paste onto all frames between the first and last selected key, creating new keyframes if necessary", + ), + ] + method: bpy.props.EnumProperty( # type: ignore + items=_method_items, + name="Paste Method", + description="Update the current transform, selected keyframes, or even create new keys", + ) + bake_step: bpy.props.IntProperty( # type: ignore + name="Frame Step", + description="Only used for baking. Step=1 creates a key on every frame, step=2 bakes on 2s, etc", + min=1, + soft_min=1, + soft_max=5, + ) + + use_mirror: bpy.props.BoolProperty( # type: ignore + name="Mirror Transform", + description="When pasting, mirror the transform relative to a specific object or bone", + default=False, + ) + + mirror_axis_loc: bpy.props.EnumProperty( # type: ignore + items=_axis_enum_items, + name="Location Axis", + description="Coordinate axis used to mirror the location part of the transform", + default='x', + ) + mirror_axis_rot: bpy.props.EnumProperty( # type: ignore + items=_axis_enum_items, + name="Rotation Axis", + description="Coordinate axis used to mirror the rotation part of the transform", + default='z', + ) + + use_relative: bpy.props.BoolProperty( # type: ignore + name="Use Relative Paste", + description="When pasting, assume the pasted matrix is relative to another object (set in the user interface)", + default=False, + ) + + @classmethod + def poll(cls, context: Context) -> bool: + if not context.active_pose_bone and not context.active_object: + cls.poll_message_set("Select an object or pose bone") + return False + + clipboard = context.window_manager.clipboard.strip() + if not (clipboard.startswith("Matrix(") or clipboard.startswith(" Optional[Matrix]: + """Parse output from Blender's print_m4() function. + + Expects four lines of space-separated floats. + """ + + lines = value.strip().splitlines() + if len(lines) != 4: + return None + + floats = tuple(tuple(float(item) for item in line.split()) for line in lines) + return Matrix(floats) + + @staticmethod + def parse_repr_m4(value: str) -> Optional[Matrix]: + """Four lines of (a, b, c, d) floats.""" + + lines = value.strip().splitlines() + if len(lines) != 4: + return None + + floats = tuple(tuple(float(item.strip()) for item in line.strip()[1:-1].split(',')) for line in lines) + return Matrix(floats) + + def execute(self, context: Context) -> set[str]: + clipboard = context.window_manager.clipboard.strip() + if clipboard.startswith("Matrix"): + mat = Matrix(ast.literal_eval(clipboard[6:])) + elif clipboard.startswith(" Matrix: + matrix = self._relative_to_world(context, matrix) + + if self.use_mirror: + matrix = self._mirror_matrix(context, matrix) + return matrix + + def _relative_to_world(self, context: Context, matrix: Matrix) -> Matrix: + if not self.use_relative: + return matrix + + rel_ob = _get_relative_ob(context) + if not rel_ob: + return matrix + + rel_ob_eval = rel_ob.evaluated_get(context.view_layer.depsgraph) + return rel_ob_eval.matrix_world @ matrix + + def _mirror_matrix(self, context: Context, matrix: Matrix) -> Matrix: + mirror_ob = context.scene.addon_copy_global_transform_mirror_ob + mirror_bone = context.scene.addon_copy_global_transform_mirror_bone + + # No mirror object means "current armature object". + ctx_ob = context.object + if not mirror_ob and mirror_bone and ctx_ob and ctx_ob.type == 'ARMATURE': + mirror_ob = ctx_ob + + if not mirror_ob: + raise UnableToMirrorError() + + if mirror_ob.type == 'ARMATURE' and mirror_bone: + return self._mirror_over_bone(matrix, mirror_ob, mirror_bone) + return self._mirror_over_ob(matrix, mirror_ob) + + def _mirror_over_ob(self, matrix: Matrix, mirror_ob: bpy.types.Object) -> Matrix: + mirror_matrix = mirror_ob.matrix_world + return self._mirror_over_matrix(matrix, mirror_matrix) + + def _mirror_over_bone(self, matrix: Matrix, mirror_ob: bpy.types.Object, mirror_bone_name: str) -> Matrix: + bone = mirror_ob.pose.bones[mirror_bone_name] + mirror_matrix = mirror_ob.matrix_world @ bone.matrix + return self._mirror_over_matrix(matrix, mirror_matrix) + + def _mirror_over_matrix(self, matrix: Matrix, mirror_matrix: Matrix) -> Matrix: + # Compute the matrix in the space of the mirror matrix: + mat_local = mirror_matrix.inverted() @ matrix + + # Decompose the matrix, as we don't want to touch the scale. This + # operator should only mirror the translation and rotation components. + trans, rot_q, scale = mat_local.decompose() + + # Mirror the translation component: + axis_index = ord(self.mirror_axis_loc) - ord('x') + trans[axis_index] *= -1 + + # Flip the rotation, and use a rotation order that applies the to-be-flipped axes first. + match self.mirror_axis_rot: + case 'x': + rot_e = rot_q.to_euler('XYZ') + rot_e.x *= -1 # Flip the requested rotation axis. + rot_e.y *= -1 # Also flip the bone roll. + case 'y': + rot_e = rot_q.to_euler('YZX') + rot_e.y *= -1 # Flip the requested rotation axis. + rot_e.z *= -1 # Also flip another axis? Not sure how to handle this one. + case 'z': + rot_e = rot_q.to_euler('ZYX') + rot_e.z *= -1 # Flip the requested rotation axis. + rot_e.y *= -1 # Also flip the bone roll. + + # Recompose the local matrix: + mat_local = Matrix.LocRotScale(trans, rot_e, scale) + + # Go back to world space: + mirrored_world = mirror_matrix @ mat_local + return mirrored_world + + @staticmethod + def _paste_current(context: Context, matrix: Matrix) -> set[str]: + set_matrix(context, matrix) + return {'FINISHED'} + + def _paste_existing_keys(self, context: Context, matrix: Matrix) -> set[str]: + if not context.scene.tool_settings.use_keyframe_insert_auto: + self.report({'ERROR'}, "This mode requires auto-keying to work properly") + return {'CANCELLED'} + + frame_numbers = _selected_keyframes(context) + if not frame_numbers: + self.report({'WARNING'}, "No selected frames found") + return {'CANCELLED'} + + self._paste_on_frames(context, frame_numbers, matrix) + return {'FINISHED'} + + def _paste_bake(self, context: Context, matrix: Matrix) -> set[str]: + if not context.scene.tool_settings.use_keyframe_insert_auto: + self.report({'ERROR'}, "This mode requires auto-keying to work properly") + return {'CANCELLED'} + + bake_step = max(1, self.bake_step) + # Put the clamped bake step back into RNA for the redo panel. + self.bake_step = bake_step + + frame_start, frame_end = self._determine_bake_range(context) + frame_range = range(round(frame_start), round(frame_end) + bake_step, bake_step) + self._paste_on_frames(context, frame_range, matrix) + return {'FINISHED'} + + def _determine_bake_range(self, context: Context) -> tuple[float, float]: + frame_numbers = _selected_keyframes(context) + if frame_numbers: + # Note that these could be the same frame, if len(frame_numbers) == 1: + return frame_numbers[0], frame_numbers[-1] + + if context.scene.use_preview_range: + self.report({'INFO'}, "No selected keys, pasting over preview range") + return context.scene.frame_preview_start, context.scene.frame_preview_end + + self.report({'INFO'}, "No selected keys, pasting over scene range") + return context.scene.frame_start, context.scene.frame_end + + def _paste_on_frames(self, context: Context, frame_numbers: Iterable[float], matrix: Matrix) -> None: + current_frame = context.scene.frame_current_final + try: + for frame in frame_numbers: + context.scene.frame_set(int(frame), subframe=frame % 1.0) + set_matrix(context, matrix) + finally: + context.scene.frame_set(int(current_frame), subframe=current_frame % 1.0) + + +# Mapping from frame number to the dominant key type. +# GENERATED is the only recessive key type, others are dominant. +KeyInfo: TypeAlias = dict[float, str] + + +class Transformable(metaclass=abc.ABCMeta): + """Interface for a bone or an object.""" + + def __init__(self) -> None: + self._key_info_cache: Optional[KeyInfo] = None + + @abc.abstractmethod + def matrix_world(self) -> Matrix: + pass + + @abc.abstractmethod + def set_matrix_world(self, context: Context, matrix: Matrix) -> None: + pass + + @abc.abstractmethod + def _my_fcurves(self) -> Iterable[bpy.types.FCurve]: + pass + + def key_info(self) -> KeyInfo: + if self._key_info_cache is not None: + return self._key_info_cache + + keyinfo: KeyInfo = {} + for fcurve in self._my_fcurves(): + for kp in fcurve.keyframe_points: + frame = kp.co.x + if kp.type == 'GENERATED' and frame in keyinfo: + # Don't bother overwriting other key types. + continue + keyinfo[frame] = kp.type + + self._key_info_cache = keyinfo + return keyinfo + + def remove_keys_of_type(self, key_type: str, *, frame_start=float("-inf"), frame_end=float("inf")) -> None: + self._key_info_cache = None + + for fcurve in self._my_fcurves(): + to_remove = [ + kp for kp in fcurve.keyframe_points if kp.type == key_type and (frame_start <= kp.co.x <= frame_end) + ] + for kp in reversed(to_remove): + fcurve.keyframe_points.remove(kp, fast=True) + fcurve.keyframe_points.handles_recalc() + + +class TransformableObject(Transformable): + object: Object + + def __init__(self, object: Object) -> None: + super().__init__() + self.object = object + + def matrix_world(self) -> Matrix: + return self.object.matrix_world + + def set_matrix_world(self, context: Context, matrix: Matrix) -> None: + self.object.matrix_world = matrix + AutoKeying.autokey_transformation(context, self.object) + + def __hash__(self) -> int: + return hash(self.object.as_pointer()) + + def _my_fcurves(self) -> Iterable[bpy.types.FCurve]: + action = self._action() + if not action: + return + yield from action.fcurves + + def _action(self) -> Optional[bpy.types.Action]: + adt = self.object.animation_data + return adt and adt.action + + +class TransformableBone(Transformable): + arm_object: Object + pose_bone: PoseBone + + def __init__(self, pose_bone: PoseBone) -> None: + super().__init__() + self.arm_object = pose_bone.id_data + self.pose_bone = pose_bone + + def matrix_world(self) -> Matrix: + mat = self.arm_object.matrix_world @ self.pose_bone.matrix + return mat + + def set_matrix_world(self, context: Context, matrix: Matrix) -> None: + # Convert matrix to armature-local space + arm_eval = self.arm_object.evaluated_get(context.view_layer.depsgraph) + self.pose_bone.matrix = arm_eval.matrix_world.inverted() @ matrix + AutoKeying.autokey_transformation(context, self.pose_bone) + + def __hash__(self) -> int: + return hash(self.pose_bone.as_pointer()) + + def _my_fcurves(self) -> Iterable[bpy.types.FCurve]: + action = self._action() + if not action: + return + + rna_prefix = f"{self.pose_bone.path_from_id()}." + for fcurve in action.fcurves: + if fcurve.data_path.startswith(rna_prefix): + yield fcurve + + def _action(self) -> Optional[bpy.types.Action]: + adt = self.arm_object.animation_data + return adt and adt.action + + +class FixToCameraCommon: + """Common functionality for the Fix To Scene Camera operator + its 'delete' button.""" + + keytype = 'GENERATED' + + # Operator method stubs to avoid PyLance/MyPy errors: + @classmethod + def poll_message_set(cls, message: str) -> None: + raise NotImplementedError() + + def report(self, level: set[str], message: str) -> None: + raise NotImplementedError() + + # Implement in subclass: + def _execute(self, context: Context, transformables: list[Transformable]) -> None: + raise NotImplementedError() + + @classmethod + def poll(cls, context: Context) -> bool: + if not context.active_pose_bone and not context.active_object: + cls.poll_message_set("Select an object or pose bone") + return False + if context.mode not in {'POSE', 'OBJECT'}: + cls.poll_message_set("Switch to Pose or Object mode") + return False + if not context.scene.camera: + cls.poll_message_set("The Scene needs a camera") + return False + return True + + def execute(self, context: Context) -> set[str]: + match context.mode: + case 'OBJECT': + transformables = self._transformable_objects(context) + case 'POSE': + transformables = self._transformable_pbones(context) + case mode: + self.report({'ERROR'}, 'Unsupported mode: %r' % mode) + return {'CANCELLED'} + + restore_frame = context.scene.frame_current + try: + self._execute(context, transformables) + finally: + context.scene.frame_set(restore_frame) + return {'FINISHED'} + + def _transformable_objects(self, context: Context) -> list[Transformable]: + return [TransformableObject(object=ob) for ob in context.selected_editable_objects] + + def _transformable_pbones(self, context: Context) -> list[Transformable]: + return [TransformableBone(pose_bone=bone) for bone in context.selected_pose_bones] + + +class OBJECT_OT_fix_to_camera(Operator, FixToCameraCommon): + bl_idname = "object.fix_to_camera" + bl_label = "Fix to Scene Camera" + bl_description = "Generate new keys to fix the selected object/bone to the camera on unkeyed frames" + bl_options = {'REGISTER', 'UNDO'} + + use_loc: bpy.props.BoolProperty( # type: ignore + name="Location", + description="Create Location keys when fixing to the scene camera", + default=True, + ) + use_rot: bpy.props.BoolProperty( # type: ignore + name="Rotation", + description="Create Rotation keys when fixing to the scene camera", + default=True, + ) + use_scale: bpy.props.BoolProperty( # type: ignore + name="Scale", + description="Create Scale keys when fixing to the scene camera", + default=True, + ) + + def _get_matrices(self, camera: Camera, transformables: list[Transformable]) -> dict[Transformable, Matrix]: + camera_mat_inv = camera.matrix_world.inverted() + return {t: camera_mat_inv @ t.matrix_world() for t in transformables} + + def _execute(self, context: Context, transformables: list[Transformable]) -> None: + depsgraph = context.view_layer.depsgraph + scene = context.scene + + scene.frame_set(scene.frame_start) + camera_eval = scene.camera.evaluated_get(depsgraph) + last_camera_name = scene.camera.name + matrices = self._get_matrices(camera_eval, transformables) + + if scene.use_preview_range: + frame_start = scene.frame_preview_start + frame_end = scene.frame_preview_end + else: + frame_start = scene.frame_start + frame_end = scene.frame_end + + with AutoKeying.options( + keytype=self.keytype, + use_loc=self.use_loc, + use_rot=self.use_rot, + use_scale=self.use_scale, + force_autokey=True, + ): + for frame in range(frame_start, frame_end + scene.frame_step, scene.frame_step): + scene.frame_set(frame) + + camera_eval = scene.camera.evaluated_get(depsgraph) + cam_matrix_world = camera_eval.matrix_world + camera_mat_inv = cam_matrix_world.inverted() + + if scene.camera.name != last_camera_name: + # The scene camera changed, so the previous + # relative-to-camera matrices can no longer be used. + matrices = self._get_matrices(camera_eval, transformables) + last_camera_name = scene.camera.name + + for t, camera_rel_matrix in matrices.items(): + key_info = t.key_info() + key_type = key_info.get(frame, "") + if key_type not in {self.keytype, ""}: + # Manually set key, remember the current camera-relative matrix. + matrices[t] = camera_mat_inv @ t.matrix_world() + continue + + # No key, or a generated one. Overwrite it with a new transform. + t.set_matrix_world(context, cam_matrix_world @ camera_rel_matrix) + + +class OBJECT_OT_delete_fix_to_camera_keys(Operator, FixToCameraCommon): + bl_idname = "object.delete_fix_to_camera_keys" + bl_label = "Delete Generated Keys" + bl_description = "Delete all keys that were generated by the 'Fix to Scene Camera' operator" + bl_options = {'REGISTER', 'UNDO'} + + def _execute(self, context: Context, transformables: list[Transformable]) -> None: + scene = context.scene + if scene.use_preview_range: + frame_start = scene.frame_preview_start + frame_end = scene.frame_preview_end + else: + frame_start = scene.frame_start + frame_end = scene.frame_end + + for t in transformables: + t.remove_keys_of_type(self.keytype, frame_start=frame_start, frame_end=frame_end) + + +class PanelMixin: + bl_space_type = 'VIEW_3D' + bl_region_type = 'UI' + bl_category = "Animation" + + +class VIEW3D_PT_copy_global_transform(PanelMixin, Panel): + bl_label = "Global Transform" + + def draw(self, context: Context) -> None: + layout = self.layout + scene = context.scene + + # No need to put "Global Transform" in the operator text, given that it's already in the panel title. + layout.operator("object.copy_global_transform", text="Copy", icon='COPYDOWN') + + paste_col = layout.column(align=True) + + paste_row = paste_col.row(align=True) + paste_props = paste_row.operator("object.paste_transform", text="Paste", icon='PASTEDOWN') + paste_props.method = 'CURRENT' + paste_props.use_mirror = False + paste_props = paste_row.operator("object.paste_transform", text="Mirrored", icon='PASTEFLIPDOWN') + paste_props.method = 'CURRENT' + paste_props.use_mirror = True + + wants_autokey_col = paste_col.column(align=False) + has_autokey = scene.tool_settings.use_keyframe_insert_auto + wants_autokey_col.enabled = has_autokey + if not has_autokey: + wants_autokey_col.label(text="These require auto-key:") + + paste_col = wants_autokey_col.column(align=True) + paste_col.operator( + "object.paste_transform", + text="Paste to Selected Keys", + icon='PASTEDOWN', + ).method = 'EXISTING_KEYS' + paste_col.operator( + "object.paste_transform", + text="Paste and Bake", + icon='PASTEDOWN', + ).method = 'BAKE' + + +class VIEW3D_PT_copy_global_transform_fix_to_camera(PanelMixin, Panel): + bl_label = "Fix to Camera" + bl_parent_id = "VIEW3D_PT_copy_global_transform" + + def draw(self, context: Context) -> None: + layout = self.layout + scene = context.scene + + # Fix to Scene Camera: + layout.use_property_split = True + props_box = layout.column(heading="Fix", align=True) + props_box.prop(scene, "addon_copy_global_transform_fix_cam_use_loc", text="Location") + props_box.prop(scene, "addon_copy_global_transform_fix_cam_use_rot", text="Rotation") + props_box.prop(scene, "addon_copy_global_transform_fix_cam_use_scale", text="Scale") + + row = layout.row(align=True) + props = row.operator("object.fix_to_camera") + props.use_loc = scene.addon_copy_global_transform_fix_cam_use_loc + props.use_rot = scene.addon_copy_global_transform_fix_cam_use_rot + props.use_scale = scene.addon_copy_global_transform_fix_cam_use_scale + row.operator("object.delete_fix_to_camera_keys", text="", icon='TRASH') + + +class VIEW3D_PT_copy_global_transform_mirror(PanelMixin, Panel): + bl_label = "Mirror Options" + bl_parent_id = "VIEW3D_PT_copy_global_transform" + + def draw(self, context: Context) -> None: + layout = self.layout + scene = context.scene + layout.prop(scene, 'addon_copy_global_transform_mirror_ob', text="Object") + + mirror_ob = scene.addon_copy_global_transform_mirror_ob + if mirror_ob is None: + # No explicit mirror object means "the current armature", so then the bone name should be editable. + if context.object and context.object.type == 'ARMATURE': + self._bone_search(layout, scene, context.object) + else: + self._bone_entry(layout, scene) + elif mirror_ob.type == 'ARMATURE': + self._bone_search(layout, scene, mirror_ob) + + def _bone_search(self, layout: UILayout, scene: bpy.types.Scene, armature_ob: bpy.types.Object) -> None: + """Search within the bones of the given armature.""" + assert armature_ob and armature_ob.type == 'ARMATURE' + + layout.prop_search( + scene, + "addon_copy_global_transform_mirror_bone", + armature_ob.data, + "edit_bones" if armature_ob.mode == 'EDIT' else "bones", + text="Bone", + ) + + def _bone_entry(self, layout: UILayout, scene: bpy.types.Scene) -> None: + """Allow manual entry of a bone name.""" + layout.prop(scene, "addon_copy_global_transform_mirror_bone", text="Bone") + + +class VIEW3D_PT_copy_global_transform_relative(PanelMixin, Panel): + bl_label = "Relative" + bl_parent_id = "VIEW3D_PT_copy_global_transform" + + def draw(self, context: Context) -> None: + layout = self.layout + scene = context.scene + + # Copy/Paste relative to some object: + copy_paste_sub = layout.column(align=False) + has_relative_ob = bool(_get_relative_ob(context)) + copy_paste_sub.label(text="Work Relative to some Object") + copy_paste_sub.prop(scene, 'addon_copy_global_transform_relative_ob', text="Object") + if not scene.addon_copy_global_transform_relative_ob: + copy_paste_sub.label(text="Using Active Scene Camera") + + button_sub = copy_paste_sub.row(align=True) + button_sub.enabled = has_relative_ob + button_sub.operator("object.copy_relative_transform", text="Copy", icon='COPYDOWN') + + paste_props = button_sub.operator("object.paste_transform", text="Paste", icon='PASTEDOWN') + paste_props.method = 'CURRENT' + paste_props.use_mirror = False + paste_props.use_relative = True + + # It is unknown whether this combination of options is in any way + # sensible or usable, and of so, in which order the mirroring and + # relative'ing-to should happen. That's why, for now, it's disabled. + # + # paste_props = paste_row.operator("object.paste_transform", text="Mirrored", icon='PASTEFLIPDOWN') + # paste_props.method = 'CURRENT' + # paste_props.use_mirror = True + # paste_props.use_relative = True + + +# Messagebus subscription to monitor changes & refresh panels. +_msgbus_owner = object() + + +def _refresh_3d_panels(): + refresh_area_types = {'VIEW_3D'} + for win in bpy.context.window_manager.windows: + for area in win.screen.areas: + if area.type not in refresh_area_types: + continue + area.tag_redraw() + + +classes = ( + OBJECT_OT_copy_global_transform, + OBJECT_OT_copy_relative_transform, + OBJECT_OT_paste_transform, + OBJECT_OT_fix_to_camera, + OBJECT_OT_delete_fix_to_camera_keys, + VIEW3D_PT_copy_global_transform, + VIEW3D_PT_copy_global_transform_mirror, + VIEW3D_PT_copy_global_transform_fix_to_camera, + VIEW3D_PT_copy_global_transform_relative, +) +_register, _unregister = bpy.utils.register_classes_factory(classes) + + +def _register_message_bus() -> None: + bpy.msgbus.subscribe_rna( + key=(bpy.types.ToolSettings, "use_keyframe_insert_auto"), + owner=_msgbus_owner, + args=(), + notify=_refresh_3d_panels, + options={'PERSISTENT'}, + ) + + +def _unregister_message_bus() -> None: + bpy.msgbus.clear_by_owner(_msgbus_owner) + + +@bpy.app.handlers.persistent # type: ignore +def _on_blendfile_load_post(none: Any, other_none: Any) -> None: + # The parameters are required, but both are None. + _register_message_bus() + + +def register(): + _register() + bpy.app.handlers.load_post.append(_on_blendfile_load_post) + + # The mirror object & bone name are stored on the scene, and not on the + # operator. This makes it possible to set up the operator for use in a + # certain scene, while keeping hotkey assignments working as usual. + # + # The goal is to allow hotkeys for "copy", "paste", and "paste mirrored", + # while keeping the other choices in a more global place. + bpy.types.Scene.addon_copy_global_transform_mirror_ob = bpy.props.PointerProperty( + type=bpy.types.Object, + name="Mirror Object", + description="Object to mirror over. Leave empty and name a bone to always mirror " + "over that bone of the active armature", + ) + bpy.types.Scene.addon_copy_global_transform_mirror_bone = bpy.props.StringProperty( + name="Mirror Bone", + description="Bone to use for the mirroring", + ) + bpy.types.Scene.addon_copy_global_transform_relative_ob = bpy.props.PointerProperty( + type=bpy.types.Object, + name="Relative Object", + description="Object to which matrices are made relative", + ) + + bpy.types.Scene.addon_copy_global_transform_fix_cam_use_loc = bpy.props.BoolProperty( + name="Fix Camera: Use Location", + description="Create Location keys when fixing to the scene camera", + default=True, + options=set(), # Remove ANIMATABLE default option. + ) + bpy.types.Scene.addon_copy_global_transform_fix_cam_use_rot = bpy.props.BoolProperty( + name="Fix Camera: Use Rotation", + description="Create Rotation keys when fixing to the scene camera", + default=True, + options=set(), # Remove ANIMATABLE default option. + ) + bpy.types.Scene.addon_copy_global_transform_fix_cam_use_scale = bpy.props.BoolProperty( + name="Fix Camera: Use Scale", + description="Create Scale keys when fixing to the scene camera", + default=True, + options=set(), # Remove ANIMATABLE default option. + ) + + +def unregister(): + _unregister() + _unregister_message_bus() + bpy.app.handlers.load_post.remove(_on_blendfile_load_post) + + del bpy.types.Scene.addon_copy_global_transform_mirror_ob + del bpy.types.Scene.addon_copy_global_transform_mirror_bone + del bpy.types.Scene.addon_copy_global_transform_relative_ob + + del bpy.types.Scene.addon_copy_global_transform_fix_cam_use_loc + del bpy.types.Scene.addon_copy_global_transform_fix_cam_use_rot + del bpy.types.Scene.addon_copy_global_transform_fix_cam_use_scale diff --git a/scripts/addons_core/hydra_storm/__init__.py b/scripts/addons_core/hydra_storm/__init__.py new file mode 100644 index 00000000000..11479f85bc4 --- /dev/null +++ b/scripts/addons_core/hydra_storm/__init__.py @@ -0,0 +1,33 @@ +# SPDX-FileCopyrightText: 2011-2022 Blender Foundation +# +# SPDX-License-Identifier: Apache-2.0 + +bl_info = { + "name": "Hydra Storm render engine", + "author": "AMD", + "version": (1, 0, 0), + "blender": (4, 0, 0), + "description": "USD's high performance rasterizing renderer", + "tracker_url": "", + "doc_url": "", + "community": "", + "downloads": "", + "main_web": "", + "support": 'OFFICIAL', + "category": "Render" +} + + +from . import engine, properties, ui + + +def register(): + engine.register() + properties.register() + ui.register() + + +def unregister(): + ui.unregister() + properties.unregister() + engine.unregister() diff --git a/scripts/addons_core/hydra_storm/engine.py b/scripts/addons_core/hydra_storm/engine.py new file mode 100644 index 00000000000..d1d798308ee --- /dev/null +++ b/scripts/addons_core/hydra_storm/engine.py @@ -0,0 +1,47 @@ +# SPDX-FileCopyrightText: 2011-2022 Blender Foundation +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy + + +class StormHydraRenderEngine(bpy.types.HydraRenderEngine): + bl_idname = 'HYDRA_STORM' + bl_label = "Hydra Storm" + bl_info = "USD's high performance rasterizing renderer" + + bl_use_preview = True + bl_use_gpu_context = True + bl_use_materialx = True + + bl_delegate_id = 'HdStormRendererPlugin' + + def get_render_settings(self, engine_type): + settings = bpy.context.scene.hydra_storm.viewport if engine_type == 'VIEWPORT' else \ + bpy.context.scene.hydra_storm.final + result = { + 'enableTinyPrimCulling': settings.use_tiny_prim_culling, + 'maxLights': settings.max_lights, + 'volumeRaymarchingStepSize': settings.volume_raymarching_step_size, + 'volumeRaymarchingStepSizeLighting': settings.volume_raymarching_step_size_lighting, + 'volumeMaxTextureMemoryPerField': settings.volume_max_texture_memory_per_field, + } + + if engine_type != 'VIEWPORT': + result |= { + 'aovToken:Combined': "color", + 'aovToken:Depth': "depth", + } + + return result + + def update_render_passes(self, scene, render_layer): + if render_layer.use_pass_combined: + self.register_pass(scene, render_layer, 'Combined', 4, 'RGBA', 'COLOR') + if render_layer.use_pass_z: + self.register_pass(scene, render_layer, 'Depth', 1, 'Z', 'VALUE') + + +register, unregister = bpy.utils.register_classes_factory(( + StormHydraRenderEngine, +)) diff --git a/scripts/addons_core/hydra_storm/properties.py b/scripts/addons_core/hydra_storm/properties.py new file mode 100644 index 00000000000..997dbb741e5 --- /dev/null +++ b/scripts/addons_core/hydra_storm/properties.py @@ -0,0 +1,62 @@ +# SPDX-FileCopyrightText: 2011-2022 Blender Foundation +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy + + +class Properties(bpy.types.PropertyGroup): + type = None + + @classmethod + def register(cls): + cls.type.hydra_storm = bpy.props.PointerProperty( + name="Hydra Storm", + description="Hydra Storm properties", + type=cls, + ) + + @classmethod + def unregister(cls): + del cls.type.hydra_storm + + +class RenderProperties(bpy.types.PropertyGroup): + max_lights: bpy.props.IntProperty( + name="Max Lights", + description="Limit maximum number of lights", + default=16, min=0, max=16, + ) + use_tiny_prim_culling: bpy.props.BoolProperty( + name="Tiny Prim Culling", + description="Hide small geometry primitives to improve performance", + default=False, + ) + volume_raymarching_step_size: bpy.props.FloatProperty( + name="Volume Raymarching Step Size", + description="Step size when raymarching volume", + default=1.0, + ) + volume_raymarching_step_size_lighting: bpy.props.FloatProperty( + name="Volume Raymarching Step Size Lighting", + description="Step size when raymarching volume for lighting computation", + default=10.0, + ) + volume_max_texture_memory_per_field: bpy.props.FloatProperty( + name="Max Texture Memory Per Field", + description="Maximum memory for a volume field texture in Mb (unless overridden by field prim)", + default=128.0, + ) + + +class SceneProperties(Properties): + type = bpy.types.Scene + + final: bpy.props.PointerProperty(type=RenderProperties) + viewport: bpy.props.PointerProperty(type=RenderProperties) + + +register, unregister = bpy.utils.register_classes_factory(( + RenderProperties, + SceneProperties, +)) diff --git a/scripts/addons_core/hydra_storm/ui.py b/scripts/addons_core/hydra_storm/ui.py new file mode 100644 index 00000000000..2ebe9b081aa --- /dev/null +++ b/scripts/addons_core/hydra_storm/ui.py @@ -0,0 +1,259 @@ +# SPDX-FileCopyrightText: 2011-2022 Blender Foundation +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy + +from .engine import StormHydraRenderEngine + + +class Panel(bpy.types.Panel): + bl_space_type = 'PROPERTIES' + bl_region_type = 'WINDOW' + bl_context = 'render' + COMPAT_ENGINES = {StormHydraRenderEngine.bl_idname} + + @classmethod + def poll(cls, context): + return context.engine in cls.COMPAT_ENGINES + + +# +# Quality render settings +# +class STORM_HYDRA_RENDER_PT_quality(Panel): + bl_label = "Quality" + + def draw(self, layout): + pass + + +class STORM_HYDRA_RENDER_PT_quality_viewport(Panel): + bl_label = "Viewport" + bl_parent_id = "STORM_HYDRA_RENDER_PT_quality" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False + + settings = context.scene.hydra_storm.viewport + layout.prop(settings, 'max_lights') + layout.prop(settings, 'use_tiny_prim_culling') + + +class STORM_HYDRA_RENDER_PT_quality_render(Panel): + bl_label = "Render" + bl_parent_id = "STORM_HYDRA_RENDER_PT_quality" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False + + settings = context.scene.hydra_storm.final + layout.prop(settings, 'max_lights') + layout.prop(settings, 'use_tiny_prim_culling') + + +# +# Volume render settings +# +class STORM_HYDRA_RENDER_PT_volumes(Panel): + bl_label = "Volumes" + bl_options = {'DEFAULT_CLOSED'} + + def draw(self, layout): + pass + + +class STORM_HYDRA_RENDER_PT_volumes_viewport(Panel): + bl_label = "Viewport" + bl_parent_id = "STORM_HYDRA_RENDER_PT_volumes" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False + + settings = context.scene.hydra_storm.viewport + + col = layout.column(align=True) + col.prop(settings, "volume_raymarching_step_size", text="Step Size") + col.prop(settings, "volume_raymarching_step_size_lighting", text="Step Size Lightning") + col.prop(settings, "volume_max_texture_memory_per_field") + + +class STORM_HYDRA_RENDER_PT_volumes_render(Panel): + bl_label = "Render" + bl_parent_id = "STORM_HYDRA_RENDER_PT_volumes" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False + + settings = context.scene.hydra_storm.final + + col = layout.column(align=True) + col.prop(settings, "volume_raymarching_step_size", text="Step Size") + col.prop(settings, "volume_raymarching_step_size_lighting", text="Step Size Lightning") + col.prop(settings, "volume_max_texture_memory_per_field") + + +# +# Film settings +# +class STORM_HYDRA_RENDER_PT_film(Panel): + bl_label = "Film" + bl_options = {'DEFAULT_CLOSED'} + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False + + layout.prop(context.scene.render, "film_transparent", text="Transparent Background") + + +# +# View layer settings +# +class STORM_HYDRA_RENDER_PT_passes(Panel): + bl_label = "Passes" + bl_context = "view_layer" + + def draw(self, context): + pass + + +class STORM_HYDRA_RENDER_PT_passes_data(Panel): + bl_label = "Data" + bl_context = "view_layer" + bl_parent_id = "STORM_HYDRA_RENDER_PT_passes" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False + + view_layer = context.view_layer + + col = layout.column(heading="Include", align=True) + col.prop(view_layer, "use_pass_combined") + col.prop(view_layer, "use_pass_z") + + +# +# Light settings +# +class STORM_HYDRA_LIGHT_PT_light(Panel): + """Physical light sources""" + bl_label = "Light" + bl_context = 'data' + + @classmethod + def poll(cls, context): + return super().poll(context) and context.light + + def draw(self, context): + layout = self.layout + + light = context.light + + layout.prop(light, "type", expand=True) + + layout.use_property_split = True + layout.use_property_decorate = False + + main_col = layout.column() + + main_col.prop(light, "color") + main_col.prop(light, "energy") + main_col.separator() + + if light.type == 'POINT': + row = main_col.row(align=True) + row.prop(light, "shadow_soft_size", text="Radius") + + elif light.type == 'SPOT': + col = main_col.column(align=True) + col.prop(light, 'spot_size', slider=True) + col.prop(light, 'spot_blend', slider=True) + + main_col.prop(light, 'show_cone') + + elif light.type == 'SUN': + main_col.prop(light, "angle") + + elif light.type == 'AREA': + main_col.prop(light, "shape", text="Shape") + sub = main_col.column(align=True) + + if light.shape in {'SQUARE', 'DISK'}: + sub.prop(light, "size") + elif light.shape in {'RECTANGLE', 'ELLIPSE'}: + sub.prop(light, "size", text="Size X") + sub.prop(light, "size_y", text="Y") + + else: + main_col.prop(light, 'size') + + +register_classes, unregister_classes = bpy.utils.register_classes_factory(( + STORM_HYDRA_RENDER_PT_quality, + STORM_HYDRA_RENDER_PT_quality_viewport, + STORM_HYDRA_RENDER_PT_quality_render, + STORM_HYDRA_RENDER_PT_volumes, + STORM_HYDRA_RENDER_PT_volumes_viewport, + STORM_HYDRA_RENDER_PT_volumes_render, + STORM_HYDRA_RENDER_PT_film, + STORM_HYDRA_LIGHT_PT_light, + STORM_HYDRA_RENDER_PT_passes, + STORM_HYDRA_RENDER_PT_passes_data, +)) + + +def get_panels(): + # Follow the Cycles model of excluding panels we don't want. + exclude_panels = { + 'RENDER_PT_stamp', + 'DATA_PT_light', + 'DATA_PT_spot', + 'NODE_DATA_PT_light', + 'DATA_PT_falloff_curve', + 'RENDER_PT_post_processing', + 'RENDER_PT_simplify', + 'SCENE_PT_audio', + 'RENDER_PT_freestyle' + } + include_eevee_panels = { + 'MATERIAL_PT_preview', + 'EEVEE_MATERIAL_PT_context_material', + 'EEVEE_MATERIAL_PT_surface', + 'EEVEE_MATERIAL_PT_volume', + 'EEVEE_MATERIAL_PT_settings', + 'EEVEE_WORLD_PT_surface', + } + + for panel_cls in bpy.types.Panel.__subclasses__(): + if hasattr(panel_cls, 'COMPAT_ENGINES') and ( + ('BLENDER_RENDER' in panel_cls.COMPAT_ENGINES and panel_cls.__name__ not in exclude_panels) or + ('BLENDER_EEVEE' in panel_cls.COMPAT_ENGINES and panel_cls.__name__ in include_eevee_panels) + ): + yield panel_cls + + +def register(): + register_classes() + + for panel_cls in get_panels(): + panel_cls.COMPAT_ENGINES.add(StormHydraRenderEngine.bl_idname) + + +def unregister(): + unregister_classes() + + for panel_cls in get_panels(): + if StormHydraRenderEngine.bl_idname in panel_cls.COMPAT_ENGINES: + panel_cls.COMPAT_ENGINES.remove(StormHydraRenderEngine.bl_idname) diff --git a/scripts/addons_core/io_anim_bvh/__init__.py b/scripts/addons_core/io_anim_bvh/__init__.py new file mode 100644 index 00000000000..918d8d8ed1d --- /dev/null +++ b/scripts/addons_core/io_anim_bvh/__init__.py @@ -0,0 +1,388 @@ +# SPDX-FileCopyrightText: 2011-2022 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +bl_info = { + "name": "BioVision Motion Capture (BVH) format", + "author": "Campbell Barton", + "version": (1, 0, 1), + "blender": (2, 81, 6), + "location": "File > Import-Export", + "description": "Import-Export BVH from armature objects", + "warning": "", + "doc_url": "{BLENDER_MANUAL_URL}/addons/import_export/anim_bvh.html", + "support": 'OFFICIAL', + "category": "Import-Export", +} + +if "bpy" in locals(): + import importlib + if "import_bvh" in locals(): + importlib.reload(import_bvh) + if "export_bvh" in locals(): + importlib.reload(export_bvh) + +import bpy +from bpy.props import ( + StringProperty, + FloatProperty, + IntProperty, + BoolProperty, + EnumProperty, +) +from bpy_extras.io_utils import ( + ImportHelper, + ExportHelper, + orientation_helper, + axis_conversion, +) + + +@orientation_helper(axis_forward='-Z', axis_up='Y') +class ImportBVH(bpy.types.Operator, ImportHelper): + """Load a BVH motion capture file""" + bl_idname = "import_anim.bvh" + bl_label = "Import BVH" + bl_options = {'REGISTER', 'UNDO'} + + filename_ext = ".bvh" + filter_glob: StringProperty(default="*.bvh", options={'HIDDEN'}) + + target: EnumProperty( + items=( + ('ARMATURE', "Armature", ""), + ('OBJECT', "Object", ""), + ), + name="Target", + description="Import target type", + default='ARMATURE', + ) + global_scale: FloatProperty( + name="Scale", + description="Scale the BVH by this value", + min=0.0001, max=1000000.0, + soft_min=0.001, soft_max=100.0, + default=1.0, + ) + frame_start: IntProperty( + name="Start Frame", + description="Starting frame for the animation", + default=1, + ) + use_fps_scale: BoolProperty( + name="Scale FPS", + description=( + "Scale the framerate from the BVH to the current scenes, " + "otherwise each BVH frame maps directly to a Blender frame" + ), + default=False, + ) + update_scene_fps: BoolProperty( + name="Update Scene FPS", + description=( + "Set the scene framerate to that of the BVH file (note that this " + "nullifies the 'Scale FPS' option, as the scale will be 1:1)" + ), + default=False, + ) + update_scene_duration: BoolProperty( + name="Update Scene Duration", + description="Extend the scene's duration to the BVH duration (never shortens the scene)", + default=False, + ) + use_cyclic: BoolProperty( + name="Loop", + description="Loop the animation playback", + default=False, + ) + rotate_mode: EnumProperty( + name="Rotation", + description="Rotation conversion", + items=( + ('QUATERNION', "Quaternion", + "Convert rotations to quaternions"), + ('NATIVE', "Euler (Native)", + "Use the rotation order defined in the BVH file"), + ('XYZ', "Euler (XYZ)", "Convert rotations to euler XYZ"), + ('XZY', "Euler (XZY)", "Convert rotations to euler XZY"), + ('YXZ', "Euler (YXZ)", "Convert rotations to euler YXZ"), + ('YZX', "Euler (YZX)", "Convert rotations to euler YZX"), + ('ZXY', "Euler (ZXY)", "Convert rotations to euler ZXY"), + ('ZYX', "Euler (ZYX)", "Convert rotations to euler ZYX"), + ), + default='NATIVE', + ) + + def execute(self, context): + keywords = self.as_keywords( + ignore=( + "axis_forward", + "axis_up", + "filter_glob", + ) + ) + global_matrix = axis_conversion( + from_forward=self.axis_forward, + from_up=self.axis_up, + ).to_4x4() + + keywords["global_matrix"] = global_matrix + + from . import import_bvh + return import_bvh.load(context, report=self.report, **keywords) + + def draw(self, context): + pass + + +class BVH_PT_import_main(bpy.types.Panel): + bl_space_type = 'FILE_BROWSER' + bl_region_type = 'TOOL_PROPS' + bl_label = "" + bl_parent_id = "FILE_PT_operator" + bl_options = {'HIDE_HEADER'} + + @classmethod + def poll(cls, context): + sfile = context.space_data + operator = sfile.active_operator + + return operator.bl_idname == "IMPORT_ANIM_OT_bvh" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False # No animation. + + sfile = context.space_data + operator = sfile.active_operator + + layout.prop(operator, "target") + + +class BVH_PT_import_transform(bpy.types.Panel): + bl_space_type = 'FILE_BROWSER' + bl_region_type = 'TOOL_PROPS' + bl_label = "Transform" + bl_parent_id = "FILE_PT_operator" + + @classmethod + def poll(cls, context): + sfile = context.space_data + operator = sfile.active_operator + + return operator.bl_idname == "IMPORT_ANIM_OT_bvh" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False # No animation. + + sfile = context.space_data + operator = sfile.active_operator + + layout.prop(operator, "global_scale") + layout.prop(operator, "rotate_mode") + layout.prop(operator, "axis_forward") + layout.prop(operator, "axis_up") + + +class BVH_PT_import_animation(bpy.types.Panel): + bl_space_type = 'FILE_BROWSER' + bl_region_type = 'TOOL_PROPS' + bl_label = "Animation" + bl_parent_id = "FILE_PT_operator" + + @classmethod + def poll(cls, context): + sfile = context.space_data + operator = sfile.active_operator + + return operator.bl_idname == "IMPORT_ANIM_OT_bvh" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False # No animation. + + sfile = context.space_data + operator = sfile.active_operator + + layout.prop(operator, "frame_start") + layout.prop(operator, "use_fps_scale") + layout.prop(operator, "use_cyclic") + + layout.prop(operator, "update_scene_fps") + layout.prop(operator, "update_scene_duration") + + +class ExportBVH(bpy.types.Operator, ExportHelper): + """Save a BVH motion capture file from an armature""" + bl_idname = "export_anim.bvh" + bl_label = "Export BVH" + + filename_ext = ".bvh" + filter_glob: StringProperty( + default="*.bvh", + options={'HIDDEN'}, + ) + + global_scale: FloatProperty( + name="Scale", + description="Scale the BVH by this value", + min=0.0001, max=1000000.0, + soft_min=0.001, soft_max=100.0, + default=1.0, + ) + frame_start: IntProperty( + name="Start Frame", + description="Starting frame to export", + default=0, + ) + frame_end: IntProperty( + name="End Frame", + description="End frame to export", + default=0, + ) + rotate_mode: EnumProperty( + name="Rotation", + description="Rotation conversion", + items=( + ('NATIVE', "Euler (Native)", + "Use the rotation order defined in the BVH file"), + ('XYZ', "Euler (XYZ)", "Convert rotations to euler XYZ"), + ('XZY', "Euler (XZY)", "Convert rotations to euler XZY"), + ('YXZ', "Euler (YXZ)", "Convert rotations to euler YXZ"), + ('YZX', "Euler (YZX)", "Convert rotations to euler YZX"), + ('ZXY', "Euler (ZXY)", "Convert rotations to euler ZXY"), + ('ZYX', "Euler (ZYX)", "Convert rotations to euler ZYX"), + ), + default='NATIVE', + ) + root_transform_only: BoolProperty( + name="Root Translation Only", + description="Only write out translation channels for the root bone", + default=False, + ) + + @classmethod + def poll(cls, context): + obj = context.object + return obj and obj.type == 'ARMATURE' + + def invoke(self, context, event): + self.frame_start = context.scene.frame_start + self.frame_end = context.scene.frame_end + + return super().invoke(context, event) + + def execute(self, context): + if self.frame_start == 0 and self.frame_end == 0: + self.frame_start = context.scene.frame_start + self.frame_end = context.scene.frame_end + + keywords = self.as_keywords( + ignore=( + "axis_forward", + "axis_up", + "check_existing", + "filter_glob", + ) + ) + + from . import export_bvh + return export_bvh.save(context, **keywords) + + def draw(self, context): + pass + + +class BVH_PT_export_transform(bpy.types.Panel): + bl_space_type = 'FILE_BROWSER' + bl_region_type = 'TOOL_PROPS' + bl_label = "Transform" + bl_parent_id = "FILE_PT_operator" + + @classmethod + def poll(cls, context): + sfile = context.space_data + operator = sfile.active_operator + + return operator.bl_idname == "EXPORT_ANIM_OT_bvh" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False # No animation. + + sfile = context.space_data + operator = sfile.active_operator + + layout.prop(operator, "global_scale") + layout.prop(operator, "rotate_mode") + layout.prop(operator, "root_transform_only") + + +class BVH_PT_export_animation(bpy.types.Panel): + bl_space_type = 'FILE_BROWSER' + bl_region_type = 'TOOL_PROPS' + bl_label = "Animation" + bl_parent_id = "FILE_PT_operator" + + @classmethod + def poll(cls, context): + sfile = context.space_data + operator = sfile.active_operator + + return operator.bl_idname == "EXPORT_ANIM_OT_bvh" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False # No animation. + + sfile = context.space_data + operator = sfile.active_operator + + col = layout.column(align=True) + col.prop(operator, "frame_start", text="Frame Start") + col.prop(operator, "frame_end", text="End") + + +def menu_func_import(self, context): + self.layout.operator(ImportBVH.bl_idname, text="Motion Capture (.bvh)") + + +def menu_func_export(self, context): + self.layout.operator(ExportBVH.bl_idname, text="Motion Capture (.bvh)") + + +classes = ( + ImportBVH, + BVH_PT_import_main, + BVH_PT_import_transform, + BVH_PT_import_animation, + ExportBVH, + BVH_PT_export_transform, + BVH_PT_export_animation, +) + + +def register(): + for cls in classes: + bpy.utils.register_class(cls) + + bpy.types.TOPBAR_MT_file_import.append(menu_func_import) + bpy.types.TOPBAR_MT_file_export.append(menu_func_export) + + +def unregister(): + for cls in classes: + bpy.utils.unregister_class(cls) + + bpy.types.TOPBAR_MT_file_import.remove(menu_func_import) + bpy.types.TOPBAR_MT_file_export.remove(menu_func_export) + + +if __name__ == "__main__": + register() diff --git a/scripts/addons_core/io_anim_bvh/export_bvh.py b/scripts/addons_core/io_anim_bvh/export_bvh.py new file mode 100644 index 00000000000..f2fb237c087 --- /dev/null +++ b/scripts/addons_core/io_anim_bvh/export_bvh.py @@ -0,0 +1,294 @@ +# SPDX-FileCopyrightText: 2011 Campbell Barton +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import bpy + + +def write_armature( + context, + filepath, + frame_start, + frame_end, + global_scale=1.0, + rotate_mode='NATIVE', + root_transform_only=False, +): + + def ensure_rot_order(rot_order_str): + if set(rot_order_str) != {'X', 'Y', 'Z'}: + rot_order_str = "XYZ" + return rot_order_str + + from mathutils import Matrix, Euler + from math import degrees + + file = open(filepath, "w", encoding="utf8", newline="\n") + + obj = context.object + arm = obj.data + + # Build a dictionary of children. + # None for parentless + children = {None: []} + + # initialize with blank lists + for bone in arm.bones: + children[bone.name] = [] + + # keep bone order from armature, no sorting, not esspential but means + # we can maintain order from import -> export which secondlife incorrectly expects. + for bone in arm.bones: + children[getattr(bone.parent, "name", None)].append(bone.name) + + # bone name list in the order that the bones are written + serialized_names = [] + + node_locations = {} + + file.write("HIERARCHY\n") + + def write_recursive_nodes(bone_name, indent): + my_children = children[bone_name] + + indent_str = "\t" * indent + + bone = arm.bones[bone_name] + pose_bone = obj.pose.bones[bone_name] + loc = bone.head_local + node_locations[bone_name] = loc + + if rotate_mode == "NATIVE": + rot_order_str = ensure_rot_order(pose_bone.rotation_mode) + else: + rot_order_str = rotate_mode + + # make relative if we can + if bone.parent: + loc = loc - node_locations[bone.parent.name] + + if indent: + file.write("%sJOINT %s\n" % (indent_str, bone_name)) + else: + file.write("%sROOT %s\n" % (indent_str, bone_name)) + + file.write("%s{\n" % indent_str) + file.write("%s\tOFFSET %.6f %.6f %.6f\n" % (indent_str, *(loc * global_scale))) + if (bone.use_connect or root_transform_only) and bone.parent: + file.write("%s\tCHANNELS 3 %srotation %srotation %srotation\n" % (indent_str, *rot_order_str)) + else: + file.write( + "%s\tCHANNELS 6 Xposition Yposition Zposition %srotation %srotation %srotation\n" % ( + indent_str, + *rot_order_str, + ) + ) + + if my_children: + # store the location for the children + # to get their relative offset + + # Write children + for child_bone in my_children: + serialized_names.append(child_bone) + write_recursive_nodes(child_bone, indent + 1) + + else: + # Write the bone end. + file.write("%s\tEnd Site\n" % indent_str) + file.write("%s\t{\n" % indent_str) + loc = bone.tail_local - node_locations[bone_name] + file.write("%s\t\tOFFSET %.6f %.6f %.6f\n" % (indent_str, *(loc * global_scale))) + file.write("%s\t}\n" % indent_str) + + file.write("%s}\n" % indent_str) + + if len(children[None]) == 1: + key = children[None][0] + serialized_names.append(key) + indent = 0 + + write_recursive_nodes(key, indent) + + else: + # Write a dummy parent node, with a dummy key name + # Just be sure it's not used by another bone! + i = 0 + key = "__%d" % i + while key in children: + i += 1 + key = "__%d" % i + file.write("ROOT %s\n" % key) + file.write("{\n") + file.write("\tOFFSET 0.0 0.0 0.0\n") + file.write("\tCHANNELS 0\n") # Xposition Yposition Zposition Xrotation Yrotation Zrotation + indent = 1 + + # Write children + for child_bone in children[None]: + serialized_names.append(child_bone) + write_recursive_nodes(child_bone, indent) + + file.write("}\n") + + # redefine bones as sorted by serialized_names + # so we can write motion + + class DecoratedBone: + __slots__ = ( + # Bone name, used as key in many places. + "name", + "parent", # decorated bone parent, set in a later loop + # Blender armature bone. + "rest_bone", + # Blender pose bone. + "pose_bone", + # Blender pose matrix. + "pose_mat", + # Blender rest matrix (armature space). + "rest_arm_mat", + # Blender rest matrix (local space). + "rest_local_mat", + # Pose_mat inverted. + "pose_imat", + # Rest_arm_mat inverted. + "rest_arm_imat", + # Rest_local_mat inverted. + "rest_local_imat", + # Last used euler to preserve euler compatibility in between keyframes. + "prev_euler", + # Is the bone disconnected to the parent bone? + "skip_position", + "rot_order", + "rot_order_str", + # Needed for the euler order when converting from a matrix. + "rot_order_str_reverse", + ) + + _eul_order_lookup = { + 'XYZ': (0, 1, 2), + 'XZY': (0, 2, 1), + 'YXZ': (1, 0, 2), + 'YZX': (1, 2, 0), + 'ZXY': (2, 0, 1), + 'ZYX': (2, 1, 0), + } + + def __init__(self, bone_name): + self.name = bone_name + self.rest_bone = arm.bones[bone_name] + self.pose_bone = obj.pose.bones[bone_name] + + if rotate_mode == "NATIVE": + self.rot_order_str = ensure_rot_order(self.pose_bone.rotation_mode) + else: + self.rot_order_str = rotate_mode + self.rot_order_str_reverse = self.rot_order_str[::-1] + + self.rot_order = DecoratedBone._eul_order_lookup[self.rot_order_str] + + self.pose_mat = self.pose_bone.matrix + + # mat = self.rest_bone.matrix # UNUSED + self.rest_arm_mat = self.rest_bone.matrix_local + self.rest_local_mat = self.rest_bone.matrix + + # inverted mats + self.pose_imat = self.pose_mat.inverted() + self.rest_arm_imat = self.rest_arm_mat.inverted() + self.rest_local_imat = self.rest_local_mat.inverted() + + self.parent = None + self.prev_euler = Euler((0.0, 0.0, 0.0), self.rot_order_str_reverse) + self.skip_position = ((self.rest_bone.use_connect or root_transform_only) and self.rest_bone.parent) + + def update_posedata(self): + self.pose_mat = self.pose_bone.matrix + self.pose_imat = self.pose_mat.inverted() + + def __repr__(self): + if self.parent: + return "[\"%s\" child on \"%s\"]\n" % (self.name, self.parent.name) + else: + return "[\"%s\" root bone]\n" % (self.name) + + bones_decorated = [DecoratedBone(bone_name) for bone_name in serialized_names] + + # Assign parents + bones_decorated_dict = {dbone.name: dbone for dbone in bones_decorated} + for dbone in bones_decorated: + parent = dbone.rest_bone.parent + if parent: + dbone.parent = bones_decorated_dict[parent.name] + del bones_decorated_dict + # finish assigning parents + + scene = context.scene + frame_current = scene.frame_current + + file.write("MOTION\n") + file.write("Frames: %d\n" % (frame_end - frame_start + 1)) + file.write("Frame Time: %.6f\n" % (1.0 / (scene.render.fps / scene.render.fps_base))) + + for frame in range(frame_start, frame_end + 1): + scene.frame_set(frame) + + for dbone in bones_decorated: + dbone.update_posedata() + + for dbone in bones_decorated: + trans = Matrix.Translation(dbone.rest_bone.head_local) + itrans = Matrix.Translation(-dbone.rest_bone.head_local) + + if dbone.parent: + mat_final = dbone.parent.rest_arm_mat @ dbone.parent.pose_imat @ dbone.pose_mat @ dbone.rest_arm_imat + mat_final = itrans @ mat_final @ trans + loc = mat_final.to_translation() + (dbone.rest_bone.head_local - dbone.parent.rest_bone.head_local) + else: + mat_final = dbone.pose_mat @ dbone.rest_arm_imat + mat_final = itrans @ mat_final @ trans + loc = mat_final.to_translation() + dbone.rest_bone.head + + # keep eulers compatible, no jumping on interpolation. + rot = mat_final.to_euler(dbone.rot_order_str_reverse, dbone.prev_euler) + + if not dbone.skip_position: + file.write("%.6f %.6f %.6f " % (loc * global_scale)[:]) + + file.write( + "%.6f %.6f %.6f " % ( + degrees(rot[dbone.rot_order[0]]), + degrees(rot[dbone.rot_order[1]]), + degrees(rot[dbone.rot_order[2]]), + ) + ) + + dbone.prev_euler = rot + + file.write("\n") + + file.close() + + scene.frame_set(frame_current) + + print("BVH Exported: %s frames:%d\n" % (filepath, frame_end - frame_start + 1)) + + +def save( + context, filepath="", + frame_start=-1, + frame_end=-1, + global_scale=1.0, + rotate_mode="NATIVE", + root_transform_only=False, +): + write_armature( + context, filepath, + frame_start=frame_start, + frame_end=frame_end, + global_scale=global_scale, + rotate_mode=rotate_mode, + root_transform_only=root_transform_only, + ) + + return {'FINISHED'} diff --git a/scripts/addons_core/io_anim_bvh/import_bvh.py b/scripts/addons_core/io_anim_bvh/import_bvh.py new file mode 100644 index 00000000000..5d1b5a1f67d --- /dev/null +++ b/scripts/addons_core/io_anim_bvh/import_bvh.py @@ -0,0 +1,783 @@ +# SPDX-FileCopyrightText: 2011 Campbell Barton +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from math import radians, ceil + +import bpy +from bpy.app.translations import pgettext_tip as tip_ +from mathutils import Vector, Euler, Matrix + + +class BVH_Node: + __slots__ = ( + # Bvh joint name. + 'name', + # BVH_Node type or None for no parent. + 'parent', + # A list of children of this type.. + 'children', + # Worldspace rest location for the head of this node. + 'rest_head_world', + # Localspace rest location for the head of this node. + 'rest_head_local', + # Worldspace rest location for the tail of this node. + 'rest_tail_world', + # Worldspace rest location for the tail of this node. + 'rest_tail_local', + # List of 6 ints, -1 for an unused channel, + # otherwise an index for the BVH motion data lines, + # loc triple then rot triple. + 'channels', + # A triple of indices as to the order rotation is applied. + # [0,1,2] is x/y/z - [None, None, None] if no rotation.. + 'rot_order', + # Same as above but a string 'XYZ' format.. + 'rot_order_str', + # A list one tuple's one for each frame: (locx, locy, locz, rotx, roty, rotz), + # euler rotation ALWAYS stored xyz order, even when native used. + 'anim_data', + # Convenience function, bool, same as: (channels[0] != -1 or channels[1] != -1 or channels[2] != -1). + 'has_loc', + # Convenience function, bool, same as: (channels[3] != -1 or channels[4] != -1 or channels[5] != -1). + 'has_rot', + # Index from the file, not strictly needed but nice to maintain order. + 'index', + # Use this for whatever you want. + 'temp', + ) + + _eul_order_lookup = { + (None, None, None): 'XYZ', # XXX Dummy one, no rotation anyway! + (0, 1, 2): 'XYZ', + (0, 2, 1): 'XZY', + (1, 0, 2): 'YXZ', + (1, 2, 0): 'YZX', + (2, 0, 1): 'ZXY', + (2, 1, 0): 'ZYX', + } + + def __init__(self, name, rest_head_world, rest_head_local, parent, channels, rot_order, index): + self.name = name + self.rest_head_world = rest_head_world + self.rest_head_local = rest_head_local + self.rest_tail_world = None + self.rest_tail_local = None + self.parent = parent + self.channels = channels + self.rot_order = tuple(rot_order) + self.rot_order_str = BVH_Node._eul_order_lookup[self.rot_order] + self.index = index + + # convenience functions + self.has_loc = channels[0] != -1 or channels[1] != -1 or channels[2] != -1 + self.has_rot = channels[3] != -1 or channels[4] != -1 or channels[5] != -1 + + self.children = [] + + # List of 6 length tuples: (lx, ly, lz, rx, ry, rz) + # even if the channels aren't used they will just be zero. + self.anim_data = [(0, 0, 0, 0, 0, 0)] + + def __repr__(self): + return ( + "BVH name: '%s', rest_loc:(%.3f,%.3f,%.3f), rest_tail:(%.3f,%.3f,%.3f)" % ( + self.name, + *self.rest_head_world, + *self.rest_head_world, + ) + ) + + +def sorted_nodes(bvh_nodes): + bvh_nodes_list = list(bvh_nodes.values()) + bvh_nodes_list.sort(key=lambda bvh_node: bvh_node.index) + return bvh_nodes_list + + +def read_bvh(context, file_path, rotate_mode='XYZ', global_scale=1.0): + # File loading stuff + # Open the file for importing + file = open(file_path, 'r') + + # Separate into a list of lists, each line a list of words. + file_lines = file.readlines() + # Non standard carriage returns? + if len(file_lines) == 1: + file_lines = file_lines[0].split('\r') + + # Split by whitespace. + file_lines = [ll for ll in [l.split() for l in file_lines] if ll] + + # Create hierarchy as empties + if file_lines[0][0].lower() == 'hierarchy': + # print 'Importing the BVH Hierarchy for:', file_path + pass + else: + raise Exception("This is not a BVH file") + + bvh_nodes = {None: None} + bvh_nodes_serial = [None] + bvh_frame_count = None + bvh_frame_time = None + + channelIndex = -1 + + lineIdx = 0 # An index for the file. + while lineIdx < len(file_lines) - 1: + if file_lines[lineIdx][0].lower() in {'root', 'joint'}: + + # Join spaces into 1 word with underscores joining it. + if len(file_lines[lineIdx]) > 2: + file_lines[lineIdx][1] = '_'.join(file_lines[lineIdx][1:]) + file_lines[lineIdx] = file_lines[lineIdx][:2] + + # MAY NEED TO SUPPORT MULTIPLE ROOTS HERE! Still unsure weather multiple roots are possible? + + # Make sure the names are unique - Object names will match joint names exactly and both will be unique. + name = file_lines[lineIdx][1] + + # While unlikely, there exists a user report of duplicate joint names, see: #109399. + if name in bvh_nodes: + name_orig = name + name_index = 1 + while (name := "%s.%03d" % (name_orig, name_index)) in bvh_nodes: + name_index += 1 + del name_orig, name_index + + # print '%snode: %s, parent: %s' % (len(bvh_nodes_serial) * ' ', name, bvh_nodes_serial[-1]) + + lineIdx += 2 # Increment to the next line (Offset) + rest_head_local = global_scale * Vector(( + float(file_lines[lineIdx][1]), + float(file_lines[lineIdx][2]), + float(file_lines[lineIdx][3]), + )) + lineIdx += 1 # Increment to the next line (Channels) + + # newChannel[Xposition, Yposition, Zposition, Xrotation, Yrotation, Zrotation] + # newChannel references indices to the motiondata, + # if not assigned then -1 refers to the last value that will be added on loading at a value of zero, this is appended + # We'll add a zero value onto the end of the MotionDATA so this always refers to a value. + my_channel = [-1, -1, -1, -1, -1, -1] + my_rot_order = [None, None, None] + rot_count = 0 + for channel in file_lines[lineIdx][2:]: + channel = channel.lower() + channelIndex += 1 # So the index points to the right channel + if channel == 'xposition': + my_channel[0] = channelIndex + elif channel == 'yposition': + my_channel[1] = channelIndex + elif channel == 'zposition': + my_channel[2] = channelIndex + + elif channel == 'xrotation': + my_channel[3] = channelIndex + my_rot_order[rot_count] = 0 + rot_count += 1 + elif channel == 'yrotation': + my_channel[4] = channelIndex + my_rot_order[rot_count] = 1 + rot_count += 1 + elif channel == 'zrotation': + my_channel[5] = channelIndex + my_rot_order[rot_count] = 2 + rot_count += 1 + + channels = file_lines[lineIdx][2:] + + my_parent = bvh_nodes_serial[-1] # account for none + + # Apply the parents offset accumulatively + if my_parent is None: + rest_head_world = Vector(rest_head_local) + else: + rest_head_world = my_parent.rest_head_world + rest_head_local + + bvh_node = bvh_nodes[name] = BVH_Node( + name, + rest_head_world, + rest_head_local, + my_parent, + my_channel, + my_rot_order, + len(bvh_nodes) - 1, + ) + + # If we have another child then we can call ourselves a parent, else + bvh_nodes_serial.append(bvh_node) + + # Account for an end node. + # There is sometimes a name after 'End Site' but we will ignore it. + if file_lines[lineIdx][0].lower() == 'end' and file_lines[lineIdx][1].lower() == 'site': + # Increment to the next line (Offset) + lineIdx += 2 + rest_tail = global_scale * Vector(( + float(file_lines[lineIdx][1]), + float(file_lines[lineIdx][2]), + float(file_lines[lineIdx][3]), + )) + + bvh_nodes_serial[-1].rest_tail_world = bvh_nodes_serial[-1].rest_head_world + rest_tail + bvh_nodes_serial[-1].rest_tail_local = bvh_nodes_serial[-1].rest_head_local + rest_tail + + # Just so we can remove the parents in a uniform way, + # the end has kids so this is a placeholder. + bvh_nodes_serial.append(None) + + if len(file_lines[lineIdx]) == 1 and file_lines[lineIdx][0] == '}': # == ['}'] + bvh_nodes_serial.pop() # Remove the last item + + # End of the hierarchy. Begin the animation section of the file with + # the following header. + # MOTION + # Frames: n + # Frame Time: dt + if len(file_lines[lineIdx]) == 1 and file_lines[lineIdx][0].lower() == 'motion': + lineIdx += 1 # Read frame count. + if ( + len(file_lines[lineIdx]) == 2 and + file_lines[lineIdx][0].lower() == 'frames:' + ): + bvh_frame_count = int(file_lines[lineIdx][1]) + + lineIdx += 1 # Read frame rate. + if ( + len(file_lines[lineIdx]) == 3 and + file_lines[lineIdx][0].lower() == 'frame' and + file_lines[lineIdx][1].lower() == 'time:' + ): + bvh_frame_time = float(file_lines[lineIdx][2]) + + lineIdx += 1 # Set the cursor to the first frame + + break + + lineIdx += 1 + + # Remove the None value used for easy parent reference + del bvh_nodes[None] + # Don't use anymore + del bvh_nodes_serial + + # importing world with any order but nicer to maintain order + # second life expects it, which isn't to spec. + bvh_nodes_list = sorted_nodes(bvh_nodes) + + while lineIdx < len(file_lines): + line = file_lines[lineIdx] + for bvh_node in bvh_nodes_list: + # for bvh_node in bvh_nodes_serial: + lx = ly = lz = rx = ry = rz = 0.0 + channels = bvh_node.channels + anim_data = bvh_node.anim_data + if channels[0] != -1: + lx = global_scale * float(line[channels[0]]) + + if channels[1] != -1: + ly = global_scale * float(line[channels[1]]) + + if channels[2] != -1: + lz = global_scale * float(line[channels[2]]) + + if channels[3] != -1 or channels[4] != -1 or channels[5] != -1: + + rx = radians(float(line[channels[3]])) + ry = radians(float(line[channels[4]])) + rz = radians(float(line[channels[5]])) + + # Done importing motion data # + anim_data.append((lx, ly, lz, rx, ry, rz)) + lineIdx += 1 + + # Assign children + for bvh_node in bvh_nodes_list: + bvh_node_parent = bvh_node.parent + if bvh_node_parent: + bvh_node_parent.children.append(bvh_node) + + # Now set the tip of each bvh_node + for bvh_node in bvh_nodes_list: + + if not bvh_node.rest_tail_world: + if len(bvh_node.children) == 0: + # could just fail here, but rare BVH files have childless nodes + bvh_node.rest_tail_world = Vector(bvh_node.rest_head_world) + bvh_node.rest_tail_local = Vector(bvh_node.rest_head_local) + elif len(bvh_node.children) == 1: + bvh_node.rest_tail_world = Vector(bvh_node.children[0].rest_head_world) + bvh_node.rest_tail_local = bvh_node.rest_head_local + bvh_node.children[0].rest_head_local + else: + # allow this, see above + # if not bvh_node.children: + # raise Exception("bvh node has no end and no children. bad file") + + # Removed temp for now + rest_tail_world = Vector((0.0, 0.0, 0.0)) + rest_tail_local = Vector((0.0, 0.0, 0.0)) + for bvh_node_child in bvh_node.children: + rest_tail_world += bvh_node_child.rest_head_world + rest_tail_local += bvh_node_child.rest_head_local + + bvh_node.rest_tail_world = rest_tail_world * (1.0 / len(bvh_node.children)) + bvh_node.rest_tail_local = rest_tail_local * (1.0 / len(bvh_node.children)) + + # Make sure tail isn't the same location as the head. + if (bvh_node.rest_tail_local - bvh_node.rest_head_local).length <= 0.001 * global_scale: + print("\tzero length node found:", bvh_node.name) + bvh_node.rest_tail_local.y = bvh_node.rest_tail_local.y + global_scale / 10 + bvh_node.rest_tail_world.y = bvh_node.rest_tail_world.y + global_scale / 10 + + return bvh_nodes, bvh_frame_time, bvh_frame_count + + +def bvh_node_dict2objects(context, bvh_name, bvh_nodes, rotate_mode='NATIVE', frame_start=1, IMPORT_LOOP=False): + + if frame_start < 1: + frame_start = 1 + + scene = context.scene + for obj in scene.objects: + obj.select_set(False) + + objects = [] + + def add_ob(name): + obj = bpy.data.objects.new(name, None) + context.collection.objects.link(obj) + objects.append(obj) + obj.select_set(True) + + # nicer drawing. + obj.empty_display_type = 'CUBE' + obj.empty_display_size = 0.1 + + return obj + + # Add objects + for name, bvh_node in bvh_nodes.items(): + bvh_node.temp = add_ob(name) + bvh_node.temp.rotation_mode = bvh_node.rot_order_str[::-1] + + # Parent the objects + for bvh_node in bvh_nodes.values(): + for bvh_node_child in bvh_node.children: + bvh_node_child.temp.parent = bvh_node.temp + + # Offset + for bvh_node in bvh_nodes.values(): + # Make relative to parents offset + bvh_node.temp.location = bvh_node.rest_head_local + + # Add tail objects + for name, bvh_node in bvh_nodes.items(): + if not bvh_node.children: + ob_end = add_ob(name + '_end') + ob_end.parent = bvh_node.temp + ob_end.location = bvh_node.rest_tail_world - bvh_node.rest_head_world + + for name, bvh_node in bvh_nodes.items(): + obj = bvh_node.temp + + for frame_current in range(len(bvh_node.anim_data)): + + lx, ly, lz, rx, ry, rz = bvh_node.anim_data[frame_current] + + if bvh_node.has_loc: + obj.delta_location = Vector((lx, ly, lz)) - bvh_node.rest_head_world + obj.keyframe_insert("delta_location", index=-1, frame=frame_start + frame_current) + + if bvh_node.has_rot: + obj.delta_rotation_euler = rx, ry, rz + obj.keyframe_insert("delta_rotation_euler", index=-1, frame=frame_start + frame_current) + + return objects + + +def bvh_node_dict2armature( + context, + bvh_name, + bvh_nodes, + bvh_frame_time, + rotate_mode='XYZ', + frame_start=1, + IMPORT_LOOP=False, + global_matrix=None, + use_fps_scale=False, +): + from bpy.utils import escape_identifier + + if frame_start < 1: + frame_start = 1 + + # Add the new armature, + scene = context.scene + for obj in scene.objects: + obj.select_set(False) + + arm_data = bpy.data.armatures.new(bvh_name) + arm_ob = bpy.data.objects.new(bvh_name, arm_data) + + context.collection.objects.link(arm_ob) + + arm_ob.select_set(True) + context.view_layer.objects.active = arm_ob + + bpy.ops.object.mode_set(mode='OBJECT', toggle=False) + bpy.ops.object.mode_set(mode='EDIT', toggle=False) + + bvh_nodes_list = sorted_nodes(bvh_nodes) + + # Get the average bone length for zero length bones, we may not use this. + average_bone_length = 0.0 + nonzero_count = 0 + for bvh_node in bvh_nodes_list: + l = (bvh_node.rest_head_local - bvh_node.rest_tail_local).length + if l: + average_bone_length += l + nonzero_count += 1 + + # Very rare cases all bones could be zero length??? + if not average_bone_length: + average_bone_length = 0.1 + else: + # Normal operation + average_bone_length = average_bone_length / nonzero_count + + # XXX, annoying, remove bone. + while arm_data.edit_bones: + arm_ob.edit_bones.remove(arm_data.edit_bones[-1]) + + ZERO_AREA_BONES = [] + for bvh_node in bvh_nodes_list: + + # New editbone + bone = bvh_node.temp = arm_data.edit_bones.new(bvh_node.name) + + bone.head = bvh_node.rest_head_world + bone.tail = bvh_node.rest_tail_world + + # Zero Length Bones! (an exceptional case) + if (bone.head - bone.tail).length < 0.001: + print("\tzero length bone found:", bone.name) + if bvh_node.parent: + ofs = bvh_node.parent.rest_head_local - bvh_node.parent.rest_tail_local + if ofs.length: # is our parent zero length also?? unlikely + bone.tail = bone.tail - ofs + else: + bone.tail.y = bone.tail.y + average_bone_length + else: + bone.tail.y = bone.tail.y + average_bone_length + + ZERO_AREA_BONES.append(bone.name) + + for bvh_node in bvh_nodes_list: + if bvh_node.parent: + # bvh_node.temp is the Editbone + + # Set the bone parent + bvh_node.temp.parent = bvh_node.parent.temp + + # Set the connection state + if ( + (not bvh_node.has_loc) and + (bvh_node.parent.temp.name not in ZERO_AREA_BONES) and + (bvh_node.parent.rest_tail_local == bvh_node.rest_head_local) + ): + bvh_node.temp.use_connect = True + + # Replace the editbone with the editbone name, + # to avoid memory errors accessing the editbone outside editmode + for bvh_node in bvh_nodes_list: + bvh_node.temp = bvh_node.temp.name + + # Now Apply the animation to the armature + + # Get armature animation data + bpy.ops.object.mode_set(mode='OBJECT', toggle=False) + + pose = arm_ob.pose + pose_bones = pose.bones + + if rotate_mode == 'NATIVE': + for bvh_node in bvh_nodes_list: + bone_name = bvh_node.temp # may not be the same name as the bvh_node, could have been shortened. + pose_bone = pose_bones[bone_name] + pose_bone.rotation_mode = bvh_node.rot_order_str + + elif rotate_mode != 'QUATERNION': + for pose_bone in pose_bones: + pose_bone.rotation_mode = rotate_mode + else: + # Quats default + pass + + context.view_layer.update() + + arm_ob.animation_data_create() + action = bpy.data.actions.new(name=bvh_name) + arm_ob.animation_data.action = action + + # Replace the bvh_node.temp (currently an editbone) + # With a tuple (pose_bone, armature_bone, bone_rest_matrix, bone_rest_matrix_inv) + num_frame = 0 + for bvh_node in bvh_nodes_list: + bone_name = bvh_node.temp # may not be the same name as the bvh_node, could have been shortened. + pose_bone = pose_bones[bone_name] + rest_bone = arm_data.bones[bone_name] + bone_rest_matrix = rest_bone.matrix_local.to_3x3() + + bone_rest_matrix_inv = Matrix(bone_rest_matrix) + bone_rest_matrix_inv.invert() + + bone_rest_matrix_inv.resize_4x4() + bone_rest_matrix.resize_4x4() + bvh_node.temp = (pose_bone, bone, bone_rest_matrix, bone_rest_matrix_inv) + + if 0 == num_frame: + num_frame = len(bvh_node.anim_data) + + # Choose to skip some frames at the beginning. Frame 0 is the rest pose + # used internally by this importer. Frame 1, by convention, is also often + # the rest pose of the skeleton exported by the motion capture system. + skip_frame = 1 + if num_frame > skip_frame: + num_frame = num_frame - skip_frame + + # Create a shared time axis for all animation curves. + time = [float(frame_start)] * num_frame + if use_fps_scale: + dt = scene.render.fps * bvh_frame_time + for frame_i in range(1, num_frame): + time[frame_i] += float(frame_i) * dt + else: + for frame_i in range(1, num_frame): + time[frame_i] += float(frame_i) + + # print("bvh_frame_time = %f, dt = %f, num_frame = %d" + # % (bvh_frame_time, dt, num_frame])) + + for i, bvh_node in enumerate(bvh_nodes_list): + pose_bone, bone, bone_rest_matrix, bone_rest_matrix_inv = bvh_node.temp + + if bvh_node.has_loc: + # Not sure if there is a way to query this or access it in the + # PoseBone structure. + data_path = 'pose.bones["%s"].location' % escape_identifier(pose_bone.name) + + location = [(0.0, 0.0, 0.0)] * num_frame + for frame_i in range(num_frame): + bvh_loc = bvh_node.anim_data[frame_i + skip_frame][:3] + + bone_translate_matrix = Matrix.Translation( + Vector(bvh_loc) - bvh_node.rest_head_local) + location[frame_i] = (bone_rest_matrix_inv @ + bone_translate_matrix).to_translation() + + # For each location x, y, z. + for axis_i in range(3): + curve = action.fcurves.new(data_path=data_path, index=axis_i, action_group=bvh_node.name) + keyframe_points = curve.keyframe_points + keyframe_points.add(num_frame) + + for frame_i in range(num_frame): + keyframe_points[frame_i].co = ( + time[frame_i], + location[frame_i][axis_i], + ) + + if bvh_node.has_rot: + data_path = None + rotate = None + + if 'QUATERNION' == rotate_mode: + rotate = [(1.0, 0.0, 0.0, 0.0)] * num_frame + data_path = ('pose.bones["%s"].rotation_quaternion' % escape_identifier(pose_bone.name)) + else: + rotate = [(0.0, 0.0, 0.0)] * num_frame + data_path = ('pose.bones["%s"].rotation_euler' % escape_identifier(pose_bone.name)) + + prev_euler = Euler((0.0, 0.0, 0.0)) + for frame_i in range(num_frame): + bvh_rot = bvh_node.anim_data[frame_i + skip_frame][3:] + + # apply rotation order and convert to XYZ + # note that the rot_order_str is reversed. + euler = Euler(bvh_rot, bvh_node.rot_order_str[::-1]) + bone_rotation_matrix = euler.to_matrix().to_4x4() + bone_rotation_matrix = ( + bone_rest_matrix_inv @ + bone_rotation_matrix @ + bone_rest_matrix + ) + + if len(rotate[frame_i]) == 4: + rotate[frame_i] = bone_rotation_matrix.to_quaternion() + else: + rotate[frame_i] = bone_rotation_matrix.to_euler( + pose_bone.rotation_mode, prev_euler) + prev_euler = rotate[frame_i] + + # For each euler angle x, y, z (or quaternion w, x, y, z). + for axis_i in range(len(rotate[0])): + curve = action.fcurves.new(data_path=data_path, index=axis_i, action_group=bvh_node.name) + keyframe_points = curve.keyframe_points + keyframe_points.add(num_frame) + + for frame_i in range(num_frame): + keyframe_points[frame_i].co = ( + time[frame_i], + rotate[frame_i][axis_i], + ) + + for cu in action.fcurves: + if IMPORT_LOOP: + pass # 2.5 doenst have cyclic now? + + for bez in cu.keyframe_points: + bez.interpolation = 'LINEAR' + + # finally apply matrix + arm_ob.matrix_world = global_matrix + bpy.ops.object.transform_apply(location=False, rotation=True, scale=False) + + return arm_ob + + +def load( + context, + filepath, + *, + target='ARMATURE', + rotate_mode='NATIVE', + global_scale=1.0, + use_cyclic=False, + frame_start=1, + global_matrix=None, + use_fps_scale=False, + update_scene_fps=False, + update_scene_duration=False, + report=print, +): + import time + t1 = time.time() + print("\tparsing bvh %r..." % filepath, end="") + + bvh_nodes, bvh_frame_time, bvh_frame_count = read_bvh( + context, filepath, + rotate_mode=rotate_mode, + global_scale=global_scale, + ) + + print("%.4f" % (time.time() - t1)) + + scene = context.scene + frame_orig = scene.frame_current + + # Broken BVH handling: guess frame rate when it is not contained in the file. + if bvh_frame_time is None: + report( + {'WARNING'}, + "The BVH file does not contain frame duration in its MOTION " + "section, assuming the BVH and Blender scene have the same " + "frame rate" + ) + bvh_frame_time = scene.render.fps_base / scene.render.fps + # No need to scale the frame rate, as they're equal now anyway. + use_fps_scale = False + + if update_scene_fps: + _update_scene_fps(context, report, bvh_frame_time) + + # Now that we have a 1-to-1 mapping of Blender frames and BVH frames, there is no need + # to scale the FPS any more. It's even better not to, to prevent roundoff errors. + use_fps_scale = False + + if update_scene_duration: + _update_scene_duration(context, report, bvh_frame_count, bvh_frame_time, frame_start, use_fps_scale) + + t1 = time.time() + print("\timporting to blender...", end="") + + bvh_name = bpy.path.display_name_from_filepath(filepath) + + if target == 'ARMATURE': + bvh_node_dict2armature( + context, bvh_name, bvh_nodes, bvh_frame_time, + rotate_mode=rotate_mode, + frame_start=frame_start, + IMPORT_LOOP=use_cyclic, + global_matrix=global_matrix, + use_fps_scale=use_fps_scale, + ) + + elif target == 'OBJECT': + bvh_node_dict2objects( + context, bvh_name, bvh_nodes, + rotate_mode=rotate_mode, + frame_start=frame_start, + IMPORT_LOOP=use_cyclic, + # global_matrix=global_matrix, # TODO + ) + + else: + report({'ERROR'}, tip_("Invalid target %r (must be 'ARMATURE' or 'OBJECT')") % target) + return {'CANCELLED'} + + print('Done in %.4f\n' % (time.time() - t1)) + + context.scene.frame_set(frame_orig) + + return {'FINISHED'} + + +def _update_scene_fps(context, report, bvh_frame_time): + """Update the scene's FPS settings from the BVH, but only if the BVH contains enough info.""" + + # Broken BVH handling: prevent division by zero. + if bvh_frame_time == 0.0: + report( + {'WARNING'}, + "Unable to update scene frame rate, as the BVH file " + "contains a zero frame duration in its MOTION section", + ) + return + + scene = context.scene + scene_fps = scene.render.fps / scene.render.fps_base + new_fps = 1.0 / bvh_frame_time + + if scene.render.fps != new_fps or scene.render.fps_base != 1.0: + print("\tupdating scene FPS (was %f) to BVH FPS (%f)" % (scene_fps, new_fps)) + scene.render.fps = int(round(new_fps)) + scene.render.fps_base = scene.render.fps / new_fps + + +def _update_scene_duration( + context, report, bvh_frame_count, bvh_frame_time, frame_start, + use_fps_scale): + """Extend the scene's duration so that the BVH file fits in its entirety.""" + + if bvh_frame_count is None: + report( + {'WARNING'}, + "Unable to extend the scene duration, as the BVH file does not " + "contain the number of frames in its MOTION section", + ) + return + + # Not likely, but it can happen when a BVH is just used to store an armature. + if bvh_frame_count == 0: + return + + if use_fps_scale: + scene_fps = context.scene.render.fps / context.scene.render.fps_base + scaled_frame_count = int(ceil(bvh_frame_count * bvh_frame_time * scene_fps)) + bvh_last_frame = frame_start + scaled_frame_count + else: + bvh_last_frame = frame_start + bvh_frame_count + + # Only extend the scene, never shorten it. + if context.scene.frame_end < bvh_last_frame: + context.scene.frame_end = bvh_last_frame diff --git a/scripts/addons_core/io_curve_svg/__init__.py b/scripts/addons_core/io_curve_svg/__init__.py new file mode 100644 index 00000000000..3dc482f7a52 --- /dev/null +++ b/scripts/addons_core/io_curve_svg/__init__.py @@ -0,0 +1,67 @@ +# SPDX-FileCopyrightText: 2011-2022 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +bl_info = { + "name": "Scalable Vector Graphics (SVG) 1.1 format", + "author": "JM Soler, Sergey Sharybin", + "blender": (2, 80, 0), + "location": "File > Import > Scalable Vector Graphics (.svg)", + "description": "Import SVG as curves", + "warning": "", + "doc_url": "{BLENDER_MANUAL_URL}/addons/import_export/curve_svg.html", + "support": 'OFFICIAL', + "category": "Import-Export", +} + + +# To support reload properly, try to access a package var, +# if it's there, reload everything +if "bpy" in locals(): + import importlib + if "import_svg" in locals(): + importlib.reload(import_svg) + + +import bpy +from bpy.props import StringProperty +from bpy_extras.io_utils import ImportHelper + + +class ImportSVG(bpy.types.Operator, ImportHelper): + """Load a SVG file""" + bl_idname = "import_curve.svg" + bl_label = "Import SVG" + bl_options = {'UNDO'} + + filename_ext = ".svg" + filter_glob: StringProperty(default="*.svg", options={'HIDDEN'}) + + def execute(self, context): + from . import import_svg + + return import_svg.load(self, context, filepath=self.filepath) + + +def menu_func_import(self, context): + self.layout.operator(ImportSVG.bl_idname, + text="Scalable Vector Graphics (.svg)") + + +def register(): + bpy.utils.register_class(ImportSVG) + + bpy.types.TOPBAR_MT_file_import.append(menu_func_import) + + +def unregister(): + bpy.utils.unregister_class(ImportSVG) + + bpy.types.TOPBAR_MT_file_import.remove(menu_func_import) + +# NOTES +# - blender version is hardcoded + + +if __name__ == "__main__": + register() diff --git a/scripts/addons_core/io_curve_svg/import_svg.py b/scripts/addons_core/io_curve_svg/import_svg.py new file mode 100644 index 00000000000..fb778868a88 --- /dev/null +++ b/scripts/addons_core/io_curve_svg/import_svg.py @@ -0,0 +1,1904 @@ +# SPDX-FileCopyrightText: 2004-2009 JM Soler +# SPDX-FileCopyrightText: 2011-2022 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import re +import xml.dom.minidom +from math import cos, sin, tan, atan2, pi, ceil + +import bpy +from mathutils import Vector, Matrix +from bpy.app.translations import pgettext_tip as tip_ + +from . import svg_colors +from .svg_util import (units, + srgb_to_linearrgb, + check_points_equal, + parse_array_of_floats, + read_float) + +#### Common utilities #### + +SVGEmptyStyles = {'useFill': None, + 'fill': None} + + +def SVGCreateCurve(context): + """ + Create new curve object to hold splines in + """ + + cu = bpy.data.curves.new("Curve", 'CURVE') + obj = bpy.data.objects.new("Curve", cu) + + context['collection'].objects.link(obj) + + return obj + + +def SVGFinishCurve(): + """ + Finish curve creation + """ + + pass + + +def SVGFlipHandle(x, y, x1, y1): + """ + Flip handle around base point + """ + + x = x + (x - x1) + y = y + (y - y1) + + return x, y + + +def SVGParseCoord(coord, size): + """ + Parse coordinate component to common basis + + Needed to handle coordinates set in cm, mm, inches. + """ + + token, last_char = read_float(coord) + val = float(token) + unit = coord[last_char:].strip() # strip() in case there is a space + + if unit == '%': + return float(size) / 100.0 * val + return val * units[unit] + + +def SVGRectFromNode(node, context): + """ + Get display rectangle from node + """ + + w = context['rect'][0] + h = context['rect'][1] + + if node.getAttribute('viewBox'): + viewBox = node.getAttribute('viewBox').replace(',', ' ').split() + w = SVGParseCoord(viewBox[2], w) + h = SVGParseCoord(viewBox[3], h) + else: + if node.getAttribute('width'): + w = SVGParseCoord(node.getAttribute('width'), w) + + if node.getAttribute('height'): + h = SVGParseCoord(node.getAttribute('height'), h) + + return (w, h) + + +def SVGMatrixFromNode(node, context): + """ + Get transformation matrix from given node + """ + + tagName = node.tagName.lower() + tags = ['svg:svg', 'svg:use', 'svg:symbol'] + + if tagName not in tags and 'svg:' + tagName not in tags: + return Matrix() + + rect = context['rect'] + has_user_coordinate = (len(context['rects']) > 1) + + m = Matrix() + x = SVGParseCoord(node.getAttribute('x') or '0', rect[0]) + y = SVGParseCoord(node.getAttribute('y') or '0', rect[1]) + w = SVGParseCoord(node.getAttribute('width') or str(rect[0]), rect[0]) + h = SVGParseCoord(node.getAttribute('height') or str(rect[1]), rect[1]) + + m = Matrix.Translation(Vector((x, y, 0.0))) + if has_user_coordinate: + if rect[0] != 0 and rect[1] != 0: + m = m @ Matrix.Scale(w / rect[0], 4, Vector((1.0, 0.0, 0.0))) + m = m @ Matrix.Scale(h / rect[1], 4, Vector((0.0, 1.0, 0.0))) + + if node.getAttribute('viewBox'): + viewBox = node.getAttribute('viewBox').replace(',', ' ').split() + vx = SVGParseCoord(viewBox[0], w) + vy = SVGParseCoord(viewBox[1], h) + vw = SVGParseCoord(viewBox[2], w) + vh = SVGParseCoord(viewBox[3], h) + + if vw == 0 or vh == 0: + return m + + if has_user_coordinate or (w != 0 and h != 0): + sx = w / vw + sy = h / vh + scale = min(sx, sy) + else: + scale = 1.0 + w = vw + h = vh + + tx = (w - vw * scale) / 2 + ty = (h - vh * scale) / 2 + m = m @ Matrix.Translation(Vector((tx, ty, 0.0))) + + m = m @ Matrix.Translation(Vector((-vx, -vy, 0.0))) + m = m @ Matrix.Scale(scale, 4, Vector((1.0, 0.0, 0.0))) + m = m @ Matrix.Scale(scale, 4, Vector((0.0, 1.0, 0.0))) + + return m + + +def SVGParseTransform(transform): + """ + Parse transform string and return transformation matrix + """ + + m = Matrix() + r = re.compile(r'\s*([A-z]+)\s*\((.*?)\)') + + for match in r.finditer(transform): + func = match.group(1) + params = match.group(2) + params = params.replace(',', ' ').split() + + proc = SVGTransforms.get(func) + if proc is None: + raise Exception('Unknown transform function: ' + func) + + m = m @ proc(params) + + return m + + +def SVGGetMaterial(color, context): + """ + Get material for specified color + """ + + materials = context['materials'] + rgb_re = re.compile(r'^\s*rgb\s*\(\s*(\d+)\s*,\s*(\d+)\s*,(\d+)\s*\)\s*$') + + if color in materials: + return materials[color] + + diff = None + if color.startswith('#'): + color = color[1:] + + if len(color) == 3: + color = color[0] * 2 + color[1] * 2 + color[2] * 2 + + diff = (int(color[0:2], 16), int(color[2:4], 16), int(color[4:6], 16)) + elif color in svg_colors.SVGColors: + diff = svg_colors.SVGColors[color] + elif rgb_re.match(color): + c = rgb_re.findall(color)[0] + diff = (float(c[0]), float(c[1]), float(c[2])) + else: + return None + + diffuse_color = ([x / 255.0 for x in diff]) + + if context['do_colormanage']: + diffuse_color[0] = srgb_to_linearrgb(diffuse_color[0]) + diffuse_color[1] = srgb_to_linearrgb(diffuse_color[1]) + diffuse_color[2] = srgb_to_linearrgb(diffuse_color[2]) + + mat = bpy.data.materials.new(name='SVGMat') + mat.diffuse_color = (*diffuse_color, 1.0) + + materials[color] = mat + + return mat + + +def SVGTransformTranslate(params): + """ + translate SVG transform command + """ + + tx = float(params[0]) + ty = float(params[1]) if len(params) > 1 else 0.0 + + return Matrix.Translation(Vector((tx, ty, 0.0))) + + +def SVGTransformMatrix(params): + """ + matrix SVG transform command + """ + + a = float(params[0]) + b = float(params[1]) + c = float(params[2]) + d = float(params[3]) + e = float(params[4]) + f = float(params[5]) + + return Matrix(((a, c, 0.0, e), + (b, d, 0.0, f), + (0, 0, 1.0, 0), + (0, 0, 0.0, 1))) + + +def SVGTransformScale(params): + """ + scale SVG transform command + """ + + sx = float(params[0]) + sy = float(params[1]) if len(params) > 1 else sx + + m = Matrix() + + m = m @ Matrix.Scale(sx, 4, Vector((1.0, 0.0, 0.0))) + m = m @ Matrix.Scale(sy, 4, Vector((0.0, 1.0, 0.0))) + + return m + + +def SVGTransformSkewY(params): + """ + skewY SVG transform command + """ + + ang = float(params[0]) * pi / 180.0 + + return Matrix(((1.0, 0.0, 0.0), + (tan(ang), 1.0, 0.0), + (0.0, 0.0, 1.0))).to_4x4() + + +def SVGTransformSkewX(params): + """ + skewX SVG transform command + """ + + ang = float(params[0]) * pi / 180.0 + + return Matrix(((1.0, tan(ang), 0.0), + (0.0, 1.0, 0.0), + (0.0, 0.0, 1.0))).to_4x4() + + +def SVGTransformRotate(params): + """ + skewX SVG transform command + """ + + ang = float(params[0]) * pi / 180.0 + cx = cy = 0.0 + + if len(params) >= 3: + cx = float(params[1]) + cy = float(params[2]) + + tm = Matrix.Translation(Vector((cx, cy, 0.0))) + rm = Matrix.Rotation(ang, 4, Vector((0.0, 0.0, 1.0))) + + return tm @ rm @ tm.inverted() + + +SVGTransforms = {'translate': SVGTransformTranslate, + 'scale': SVGTransformScale, + 'skewX': SVGTransformSkewX, + 'skewY': SVGTransformSkewY, + 'matrix': SVGTransformMatrix, + 'rotate': SVGTransformRotate} + + +def SVGParseStyles(node, context): + """ + Parse node to get different styles for displaying geometries + (materials, filling flags, etc..) + """ + + styles = SVGEmptyStyles.copy() + + style = node.getAttribute('style') + if style: + elems = style.split(';') + for elem in elems: + s = elem.split(':') + + if len(s) != 2: + continue + + name = s[0].strip().lower() + val = s[1].strip() + + if name == 'fill': + val = val.lower() + if val == 'none': + styles['useFill'] = False + else: + styles['useFill'] = True + styles['fill'] = SVGGetMaterial(val, context) + + if styles['useFill'] is None: + styles['useFill'] = True + styles['fill'] = SVGGetMaterial('#000', context) + + return styles + + if styles['useFill'] is None: + fill = node.getAttribute('fill') + if fill: + fill = fill.lower() + if fill == 'none': + styles['useFill'] = False + else: + styles['useFill'] = True + styles['fill'] = SVGGetMaterial(fill, context) + + if styles['useFill'] is None and context['style']: + styles = context['style'].copy() + + if styles['useFill'] is None: + styles['useFill'] = True + styles['fill'] = SVGGetMaterial('#000', context) + + return styles + + +def id_names_from_node(node, ob): + if node.getAttribute('id'): + name = node.getAttribute('id') + ob.name = name + ob.data.name = name + +#### SVG path helpers #### + + +class SVGPathData: + """ + SVG Path data token supplier + """ + + __slots__ = ('_data', # List of tokens + '_index', # Index of current token in tokens list + '_len') # Length of tokens list + + def __init__(self, d): + """ + Initialize new path data supplier + + d - the definition of the outline of a shape + """ + + spaces = ' ,\t' + commands = {'m', 'l', 'h', 'v', 'c', 's', 'q', '', 't', 'a', 'z'} + current_command = '' + tokens = [] + + i = 0 + n = len(d) + while i < n: + c = d[i] + + if c in spaces: + pass + elif c.lower() in commands: + tokens.append(c) + current_command = c + arg_index = 1 + elif c in ['-', '.'] or c.isdigit(): + # Special case for 'a/A' commands. + # Arguments 4 and 5 are either 0 or 1 and might not + # be separated from the next argument with space or comma. + if current_command.lower() == 'a': + if arg_index % 7 in [4, 5]: + token = d[i] + last_char = i + 1 + else: + token, last_char = read_float(d, i) + else: + token, last_char = read_float(d, i) + + arg_index += 1 + tokens.append(token) + + # in most cases len(token) and (last_char - i) are the same + # but with whitespace or ',' prefix they are not. + + i += (last_char - i) - 1 + + i += 1 + + self._data = tokens + self._index = 0 + self._len = len(tokens) + + def eof(self): + """ + Check if end of data reached + """ + + return self._index >= self._len + + def cur(self): + """ + Return current token + """ + + if self.eof(): + return None + + return self._data[self._index] + + def lookupNext(self): + """ + get next token without moving pointer + """ + + if self.eof(): + return None + + return self._data[self._index] + + def next(self): + """ + Return current token and go to next one + """ + + if self.eof(): + return None + + token = self._data[self._index] + self._index += 1 + + return token + + def nextCoord(self): + """ + Return coordinate created from current token and move to next token + """ + + token = self.next() + + if token is None: + return None + + return float(token) + + +class SVGPathParser: + """ + Parser of SVG path data + """ + + __slots__ = ('_data', # Path data supplird + '_point', # Current point coordinate + '_handle', # Last handle coordinate + '_splines', # List of all splies created during parsing + '_spline', # Currently handling spline + '_commands', # Hash of all supported path commands + '_use_fill', # Splines would be filled, so expected to be closed + ) + + def __init__(self, d, use_fill): + """ + Initialize path parser + + d - the definition of the outline of a shape + """ + + self._data = SVGPathData(d) + self._point = None # Current point + self._handle = None # Last handle + self._splines = [] # List of splines in path + self._spline = None # Current spline + self._use_fill = use_fill + + self._commands = {'M': self._pathMoveTo, + 'L': self._pathLineTo, + 'H': self._pathLineTo, + 'V': self._pathLineTo, + 'C': self._pathCurveToCS, + 'S': self._pathCurveToCS, + 'Q': self._pathCurveToQT, + 'T': self._pathCurveToQT, + 'A': self._pathCurveToA, + 'Z': self._pathClose, + + 'm': self._pathMoveTo, + 'l': self._pathLineTo, + 'h': self._pathLineTo, + 'v': self._pathLineTo, + 'c': self._pathCurveToCS, + 's': self._pathCurveToCS, + 'q': self._pathCurveToQT, + 't': self._pathCurveToQT, + 'a': self._pathCurveToA, + 'z': self._pathClose} + + def _getCoordPair(self, relative, point): + """ + Get next coordinate pair + """ + + x = self._data.nextCoord() + y = self._data.nextCoord() + + if relative and point is not None: + x += point[0] + y += point[1] + + return x, y + + def _appendPoint(self, x, y, handle_left=None, handle_left_type='VECTOR', + handle_right=None, handle_right_type='VECTOR'): + """ + Append point to spline + + If there's no active spline, create one and set it's first point + to current point coordinate + """ + + if self._spline is None: + self._spline = {'points': [], + 'closed': False} + + self._splines.append(self._spline) + + if len(self._spline['points']) > 0: + # Not sure about specifications, but Illustrator could create + # last point at the same position, as start point (which was + # reached by MoveTo command) to set needed handle coords. + # It's also could use last point at last position to make path + # filled. + + first = self._spline['points'][0] + if check_points_equal((first['x'], first['y']), (x, y)): + if handle_left is not None: + first['handle_left'] = handle_left + first['handle_left_type'] = 'FREE' + + if handle_left_type != 'VECTOR': + first['handle_left_type'] = handle_left_type + + if self._data.eof() or self._data.lookupNext().lower() == 'm': + self._spline['closed'] = True + + return + + last = self._spline['points'][-1] + if last['handle_right_type'] == 'VECTOR' and handle_left_type == 'FREE': + last['handle_right'] = (last['x'], last['y']) + last['handle_right_type'] = 'FREE' + if last['handle_right_type'] == 'FREE' and handle_left_type == 'VECTOR': + handle_left = (x, y) + handle_left_type = 'FREE' + + point = {'x': x, + 'y': y, + + 'handle_left': handle_left, + 'handle_left_type': handle_left_type, + + 'handle_right': handle_right, + 'handle_right_type': handle_right_type} + + self._spline['points'].append(point) + + def _updateHandle(self, handle=None, handle_type=None): + """ + Update right handle of previous point when adding new point to spline + """ + + point = self._spline['points'][-1] + + if handle_type is not None: + point['handle_right_type'] = handle_type + + if handle is not None: + point['handle_right'] = handle + + def _pathMoveTo(self, code): + """ + MoveTo path command + """ + + relative = code.islower() + x, y = self._getCoordPair(relative, self._point) + + self._spline = None # Flag to start new spline + self._point = (x, y) + + cur = self._data.cur() + while cur is not None and not cur.isalpha(): + x, y = self._getCoordPair(relative, self._point) + + if self._spline is None: + self._appendPoint(self._point[0], self._point[1]) + + self._appendPoint(x, y) + + self._point = (x, y) + cur = self._data.cur() + + self._handle = None + + def _pathLineTo(self, code): + """ + LineTo path command + """ + + c = code.lower() + + cur = self._data.cur() + while cur is not None and not cur.isalpha(): + if c == 'l': + x, y = self._getCoordPair(code == 'l', self._point) + elif c == 'h': + x = self._data.nextCoord() + y = self._point[1] + else: + x = self._point[0] + y = self._data.nextCoord() + + if code == 'h': + x += self._point[0] + elif code == 'v': + y += self._point[1] + + if self._spline is None: + self._appendPoint(self._point[0], self._point[1]) + + self._appendPoint(x, y) + + self._point = (x, y) + cur = self._data.cur() + + self._handle = None + + def _pathCurveToCS(self, code): + """ + Cubic BEZIER CurveTo path command + """ + + c = code.lower() + cur = self._data.cur() + while cur is not None and not cur.isalpha(): + if c == 'c': + x1, y1 = self._getCoordPair(code.islower(), self._point) + x2, y2 = self._getCoordPair(code.islower(), self._point) + else: + if self._handle is not None: + x1, y1 = SVGFlipHandle(self._point[0], self._point[1], + self._handle[0], self._handle[1]) + else: + x1, y1 = self._point + + x2, y2 = self._getCoordPair(code.islower(), self._point) + + x, y = self._getCoordPair(code.islower(), self._point) + + if self._spline is None: + self._appendPoint(self._point[0], self._point[1], + handle_left_type='FREE', handle_left=self._point, + handle_right_type='FREE', handle_right=(x1, y1)) + else: + self._updateHandle(handle=(x1, y1), handle_type='FREE') + + self._appendPoint(x, y, + handle_left_type='FREE', handle_left=(x2, y2), + handle_right_type='FREE', handle_right=(x, y)) + + self._point = (x, y) + self._handle = (x2, y2) + cur = self._data.cur() + + def _pathCurveToQT(self, code): + """ + Quadratic BEZIER CurveTo path command + """ + + c = code.lower() + cur = self._data.cur() + + while cur is not None and not cur.isalpha(): + if c == 'q': + x1, y1 = self._getCoordPair(code.islower(), self._point) + else: + if self._handle is not None: + x1, y1 = SVGFlipHandle(self._point[0], self._point[1], + self._handle[0], self._handle[1]) + else: + x1, y1 = self._point + + x, y = self._getCoordPair(code.islower(), self._point) + + if not check_points_equal((x, y), self._point): + if self._spline is None: + self._appendPoint(self._point[0], self._point[1], + handle_left_type='FREE', handle_left=self._point, + handle_right_type='FREE', handle_right=self._point) + + self._appendPoint(x, y, + handle_left_type='FREE', handle_left=(x1, y1), + handle_right_type='FREE', handle_right=(x, y)) + + self._point = (x, y) + self._handle = (x1, y1) + cur = self._data.cur() + + def _calcArc(self, rx, ry, ang, fa, fs, x, y): + """ + Calc arc paths + + Copied and adopted from `paths_svg2obj.py` script for Blender 2.49: + ``Copyright (c) jm soler juillet/novembre 2004-april 2009``. + """ + + cpx = self._point[0] + cpy = self._point[1] + rx = abs(rx) + ry = abs(ry) + px = abs((cos(ang) * (cpx - x) + sin(ang) * (cpy - y)) * 0.5) ** 2.0 + py = abs((cos(ang) * (cpy - y) - sin(ang) * (cpx - x)) * 0.5) ** 2.0 + rpx = rpy = 0.0 + + if abs(rx) > 0.0: + px = px / (rx ** 2.0) + + if abs(ry) > 0.0: + rpy = py / (ry ** 2.0) + + pl = rpx + rpy + if pl > 1.0: + pl = pl ** 0.5 + rx *= pl + ry *= pl + + carx = sarx = cary = sary = 0.0 + + if abs(rx) > 0.0: + carx = cos(ang) / rx + sarx = sin(ang) / rx + + if abs(ry) > 0.0: + cary = cos(ang) / ry + sary = sin(ang) / ry + + x0 = carx * cpx + sarx * cpy + y0 = -sary * cpx + cary * cpy + x1 = carx * x + sarx * y + y1 = -sary * x + cary * y + d = (x1 - x0) * (x1 - x0) + (y1 - y0) * (y1 - y0) + + if abs(d) > 0.0: + sq = 1.0 / d - 0.25 + else: + sq = -0.25 + + if sq < 0.0: + sq = 0.0 + + sf = sq ** 0.5 + if fs == fa: + sf = -sf + + xc = 0.5 * (x0 + x1) - sf * (y1 - y0) + yc = 0.5 * (y0 + y1) + sf * (x1 - x0) + ang_0 = atan2(y0 - yc, x0 - xc) + ang_1 = atan2(y1 - yc, x1 - xc) + ang_arc = ang_1 - ang_0 + + if ang_arc < 0.0 and fs == 1: + ang_arc += 2.0 * pi + elif ang_arc > 0.0 and fs == 0: + ang_arc -= 2.0 * pi + + n_segs = int(ceil(abs(ang_arc * 2.0 / (pi * 0.5 + 0.001)))) + + if self._spline is None: + self._appendPoint(cpx, cpy, + handle_left_type='FREE', handle_left=(cpx, cpy), + handle_right_type='FREE', handle_right=(cpx, cpy)) + + for i in range(n_segs): + ang0 = ang_0 + i * ang_arc / n_segs + ang1 = ang_0 + (i + 1) * ang_arc / n_segs + ang_demi = 0.25 * (ang1 - ang0) + t = 2.66666 * sin(ang_demi) * sin(ang_demi) / sin(ang_demi * 2.0) + x1 = xc + cos(ang0) - t * sin(ang0) + y1 = yc + sin(ang0) + t * cos(ang0) + x2 = xc + cos(ang1) + y2 = yc + sin(ang1) + x3 = x2 + t * sin(ang1) + y3 = y2 - t * cos(ang1) + + coord1 = ((cos(ang) * rx) * x1 + (-sin(ang) * ry) * y1, + (sin(ang) * rx) * x1 + (cos(ang) * ry) * y1) + coord2 = ((cos(ang) * rx) * x3 + (-sin(ang) * ry) * y3, + (sin(ang) * rx) * x3 + (cos(ang) * ry) * y3) + coord3 = ((cos(ang) * rx) * x2 + (-sin(ang) * ry) * y2, + (sin(ang) * rx) * x2 + (cos(ang) * ry) * y2) + + self._updateHandle(handle=coord1, handle_type='FREE') + + self._appendPoint(coord3[0], coord3[1], + handle_left_type='FREE', handle_left=coord2, + handle_right_type='FREE', handle_right=coord3) + + def _pathCurveToA(self, code): + """ + Elliptical arc CurveTo path command + """ + + cur = self._data.cur() + + while cur is not None and not cur.isalpha(): + rx = float(self._data.next()) + ry = float(self._data.next()) + ang = float(self._data.next()) / 180 * pi + fa = float(self._data.next()) + fs = float(self._data.next()) + x, y = self._getCoordPair(code.islower(), self._point) + + self._calcArc(rx, ry, ang, fa, fs, x, y) + + self._point = (x, y) + self._handle = None + cur = self._data.cur() + + def _pathClose(self, code): + """ + Close path command + """ + + if self._spline: + self._spline['closed'] = True + + cv = self._spline['points'][0] + self._point = (cv['x'], cv['y']) + + def _pathCloseImplicitly(self): + """ + Close path implicitly without changing current point coordinate + """ + + if self._spline: + self._spline['closed'] = True + + def parse(self): + """ + Execute parser + """ + + closed = False + + while not self._data.eof(): + code = self._data.next() + cmd = self._commands.get(code) + + if cmd is None: + raise Exception('Unknown path command: {0}' . format(code)) + + if code in {'Z', 'z'}: + closed = True + else: + closed = False + + if code in {'M', 'm'} and self._use_fill and not closed: + self._pathCloseImplicitly() # Ensure closed before MoveTo path command + + cmd(code) + if self._use_fill and not closed: + self._pathCloseImplicitly() # Ensure closed at the end of parsing + + def getSplines(self): + """ + Get splines definitions + """ + + return self._splines + + +class SVGGeometry: + """ + Abstract SVG geometry + """ + + __slots__ = ('_node', # XML node for geometry + '_context', # Global SVG context (holds matrices stack, i.e.) + '_creating') # Flag if geometry is already creating + # for this node + # need to detect cycles for USE node + + def __init__(self, node, context): + """ + Initialize SVG geometry + """ + + self._node = node + self._context = context + self._creating = False + + if hasattr(node, 'getAttribute'): + defs = context['defines'] + + attr_id = node.getAttribute('id') + if attr_id and defs.get('#' + attr_id) is None: + defs['#' + attr_id] = self + + className = node.getAttribute('class') + if className and defs.get(className) is None: + defs[className] = self + + def _pushRect(self, rect): + """ + Push display rectangle + """ + + self._context['rects'].append(rect) + self._context['rect'] = rect + + def _popRect(self): + """ + Pop display rectangle + """ + + self._context['rects'].pop() + self._context['rect'] = self._context['rects'][-1] + + def _pushMatrix(self, matrix): + """ + Push transformation matrix + """ + + current_matrix = self._context['matrix'] + self._context['matrix_stack'].append(current_matrix) + self._context['matrix'] = current_matrix @ matrix + + def _popMatrix(self): + """ + Pop transformation matrix + """ + + old_matrix = self._context['matrix_stack'].pop() + self._context['matrix'] = old_matrix + + def _pushStyle(self, style): + """ + Push style + """ + + self._context['styles'].append(style) + self._context['style'] = style + + def _popStyle(self): + """ + Pop style + """ + + self._context['styles'].pop() + self._context['style'] = self._context['styles'][-1] + + def _transformCoord(self, point): + """ + Transform SVG-file coords + """ + + v = Vector((point[0], point[1], 0.0)) + + return self._context['matrix'] @ v + + def getNodeMatrix(self): + """ + Get transformation matrix of node + """ + + return SVGMatrixFromNode(self._node, self._context) + + def parse(self): + """ + Parse XML node to memory + """ + + pass + + def _doCreateGeom(self, instancing): + """ + Internal handler to create real geometries + """ + + pass + + def getTransformMatrix(self): + """ + Get matrix created from "transform" attribute + """ + + transform = self._node.getAttribute('transform') + + if transform: + return SVGParseTransform(transform) + + return None + + def createGeom(self, instancing): + """ + Create real geometries + """ + + if self._creating: + return + + self._creating = True + + matrix = self.getTransformMatrix() + if matrix is not None: + self._pushMatrix(matrix) + + self._doCreateGeom(instancing) + + if matrix is not None: + self._popMatrix() + + self._creating = False + + +class SVGGeometryContainer(SVGGeometry): + """ + Container of SVG geometries + """ + + __slots__ = ('_geometries', # List of chold geometries + '_styles') # Styles, used for displaying + + def __init__(self, node, context): + """ + Initialize SVG geometry container + """ + + super().__init__(node, context) + + self._geometries = [] + self._styles = SVGEmptyStyles + + def parse(self): + """ + Parse XML node to memory + """ + + if type(self._node) is xml.dom.minidom.Element: + self._styles = SVGParseStyles(self._node, self._context) + + self._pushStyle(self._styles) + + for node in self._node.childNodes: + if type(node) is not xml.dom.minidom.Element: + continue + + ob = parseAbstractNode(node, self._context) + if ob is not None: + self._geometries.append(ob) + + self._popStyle() + + def _doCreateGeom(self, instancing): + """ + Create real geometries + """ + + for geom in self._geometries: + geom.createGeom(instancing) + + def getGeometries(self): + """ + Get list of parsed geometries + """ + + return self._geometries + + +class SVGGeometryPATH(SVGGeometry): + """ + SVG path geometry + """ + + __slots__ = ('_splines', # List of splines after parsing + '_styles') # Styles, used for displaying + + def __init__(self, node, context): + """ + Initialize SVG path + """ + + super().__init__(node, context) + + self._splines = [] + self._styles = SVGEmptyStyles + + def parse(self): + """ + Parse SVG path node + """ + + d = self._node.getAttribute('d') + + self._styles = SVGParseStyles(self._node, self._context) + + pathParser = SVGPathParser(d, self._styles['useFill']) + pathParser.parse() + + self._splines = pathParser.getSplines() + + def _doCreateGeom(self, instancing): + """ + Create real geometries + """ + + ob = SVGCreateCurve(self._context) + cu = ob.data + + id_names_from_node(self._node, ob) + + if self._styles['useFill']: + cu.dimensions = '2D' + cu.fill_mode = 'BOTH' + cu.materials.append(self._styles['fill']) + else: + cu.dimensions = '3D' + + for spline in self._splines: + act_spline = None + + if spline['closed'] and len(spline['points']) >= 2: + first = spline['points'][0] + last = spline['points'][-1] + if (first['handle_left_type'] == 'FREE' and + last['handle_right_type'] == 'VECTOR'): + last['handle_right_type'] = 'FREE' + last['handle_right'] = (last['x'], last['y']) + if (last['handle_right_type'] == 'FREE' and + first['handle_left_type'] == 'VECTOR'): + first['handle_left_type'] = 'FREE' + first['handle_left'] = (first['x'], first['y']) + + for point in spline['points']: + co = self._transformCoord((point['x'], point['y'])) + + if act_spline is None: + cu.splines.new('BEZIER') + + act_spline = cu.splines[-1] + act_spline.use_cyclic_u = spline['closed'] + else: + act_spline.bezier_points.add(1) + + bezt = act_spline.bezier_points[-1] + bezt.co = co + + bezt.handle_left_type = point['handle_left_type'] + if point['handle_left'] is not None: + handle = point['handle_left'] + bezt.handle_left = self._transformCoord(handle) + + bezt.handle_right_type = point['handle_right_type'] + if point['handle_right'] is not None: + handle = point['handle_right'] + bezt.handle_right = self._transformCoord(handle) + + SVGFinishCurve() + + +class SVGGeometryDEFS(SVGGeometryContainer): + """ + Container for referenced elements + """ + + def createGeom(self, instancing): + """ + Create real geometries + """ + + pass + + +class SVGGeometrySYMBOL(SVGGeometryContainer): + """ + Referenced element + """ + + def _doCreateGeom(self, instancing): + """ + Create real geometries + """ + + self._pushMatrix(self.getNodeMatrix()) + + super()._doCreateGeom(False) + + self._popMatrix() + + def createGeom(self, instancing): + """ + Create real geometries + """ + + if not instancing: + return + + super().createGeom(instancing) + + +class SVGGeometryG(SVGGeometryContainer): + """ + Geometry group + """ + + pass + + +class SVGGeometryUSE(SVGGeometry): + """ + User of referenced elements + """ + + def _doCreateGeom(self, instancing): + """ + Create real geometries + """ + + ref = self._node.getAttribute('xlink:href') + geom = self._context['defines'].get(ref) + + if geom is not None: + rect = SVGRectFromNode(self._node, self._context) + self._pushRect(rect) + + self._pushMatrix(self.getNodeMatrix()) + + geom.createGeom(True) + + self._popMatrix() + + self._popRect() + + +class SVGGeometryRECT(SVGGeometry): + """ + SVG rectangle + """ + + __slots__ = ('_rect', # coordinate and dimensions of rectangle + '_radius', # Rounded corner radiuses + '_styles') # Styles, used for displaying + + def __init__(self, node, context): + """ + Initialize new rectangle + """ + + super().__init__(node, context) + + self._rect = ('0', '0', '0', '0') + self._radius = ('0', '0') + self._styles = SVGEmptyStyles + + def parse(self): + """ + Parse SVG rectangle node + """ + + self._styles = SVGParseStyles(self._node, self._context) + + rect = [] + for attr in ['x', 'y', 'width', 'height']: + val = self._node.getAttribute(attr) + rect.append(val or '0') + + self._rect = (rect) + + rx = self._node.getAttribute('rx') + ry = self._node.getAttribute('ry') + + self._radius = (rx, ry) + + def _appendCorner(self, spline, coord, firstTime, rounded): + """ + Append new corner to rectangle + """ + + handle = None + if len(coord) == 3: + handle = self._transformCoord(coord[2]) + coord = (coord[0], coord[1]) + + co = self._transformCoord(coord) + + if not firstTime: + spline.bezier_points.add(1) + + bezt = spline.bezier_points[-1] + bezt.co = co + + if rounded: + if handle: + bezt.handle_left_type = 'VECTOR' + bezt.handle_right_type = 'FREE' + + bezt.handle_right = handle + else: + bezt.handle_left_type = 'FREE' + bezt.handle_right_type = 'VECTOR' + bezt.handle_left = co + + else: + bezt.handle_left_type = 'VECTOR' + bezt.handle_right_type = 'VECTOR' + + def _doCreateGeom(self, instancing): + """ + Create real geometries + """ + + # Run-time parsing -- percents would be correct only if + # parsing them now + crect = self._context['rect'] + rect = [] + + for i in range(4): + rect.append(SVGParseCoord(self._rect[i], crect[i % 2])) + + r = self._radius + rx = ry = 0.0 + + if r[0] and r[1]: + rx = min(SVGParseCoord(r[0], rect[0]), rect[2] / 2) + ry = min(SVGParseCoord(r[1], rect[1]), rect[3] / 2) + elif r[0]: + rx = min(SVGParseCoord(r[0], rect[0]), rect[2] / 2) + ry = min(rx, rect[3] / 2) + rx = ry = min(rx, ry) + elif r[1]: + ry = min(SVGParseCoord(r[1], rect[1]), rect[3] / 2) + rx = min(ry, rect[2] / 2) + rx = ry = min(rx, ry) + + radius = (rx, ry) + + # Geometry creation + ob = SVGCreateCurve(self._context) + cu = ob.data + + id_names_from_node(self._node, ob) + + if self._styles['useFill']: + cu.dimensions = '2D' + cu.fill_mode = 'BOTH' + cu.materials.append(self._styles['fill']) + else: + cu.dimensions = '3D' + + cu.splines.new('BEZIER') + + spline = cu.splines[-1] + spline.use_cyclic_u = True + + x, y = rect[0], rect[1] + w, h = rect[2], rect[3] + rx, ry = radius[0], radius[1] + rounded = False + + if rx or ry: + # + # 0 _______ 1 + # / \ + # / \ + # 7 2 + # | | + # | | + # 6 3 + # \ / + # \ / + # 5 _______ 4 + # + + # Optional third component -- right handle coord + coords = [(x + rx, y), + (x + w - rx, y, (x + w, y)), + (x + w, y + ry), + (x + w, y + h - ry, (x + w, y + h)), + (x + w - rx, y + h), + (x + rx, y + h, (x, y + h)), + (x, y + h - ry), + (x, y + ry, (x, y))] + + rounded = True + else: + coords = [(x, y), (x + w, y), (x + w, y + h), (x, y + h)] + + firstTime = True + for coord in coords: + self._appendCorner(spline, coord, firstTime, rounded) + firstTime = False + + SVGFinishCurve() + + +class SVGGeometryELLIPSE(SVGGeometry): + """ + SVG ellipse + """ + + __slots__ = ('_cx', # X-coordinate of center + '_cy', # Y-coordinate of center + '_rx', # X-axis radius of circle + '_ry', # Y-axis radius of circle + '_styles') # Styles, used for displaying + + def __init__(self, node, context): + """ + Initialize new ellipse + """ + + super().__init__(node, context) + + self._cx = '0.0' + self._cy = '0.0' + self._rx = '0.0' + self._ry = '0.0' + self._styles = SVGEmptyStyles + + def parse(self): + """ + Parse SVG ellipse node + """ + + self._styles = SVGParseStyles(self._node, self._context) + + self._cx = self._node.getAttribute('cx') or '0' + self._cy = self._node.getAttribute('cy') or '0' + self._rx = self._node.getAttribute('rx') or '0' + self._ry = self._node.getAttribute('ry') or '0' + + def _doCreateGeom(self, instancing): + """ + Create real geometries + """ + + # Run-time parsing -- percents would be correct only if + # parsing them now + crect = self._context['rect'] + + cx = SVGParseCoord(self._cx, crect[0]) + cy = SVGParseCoord(self._cy, crect[1]) + rx = SVGParseCoord(self._rx, crect[0]) + ry = SVGParseCoord(self._ry, crect[1]) + + if not rx or not ry: + # Automaic handles will work incorrect in this case + return + + # Create circle + ob = SVGCreateCurve(self._context) + cu = ob.data + + id_names_from_node(self._node, ob) + + if self._styles['useFill']: + cu.dimensions = '2D' + cu.fill_mode = 'BOTH' + cu.materials.append(self._styles['fill']) + else: + cu.dimensions = '3D' + + coords = [((cx - rx, cy), + (cx - rx, cy + ry * 0.552), + (cx - rx, cy - ry * 0.552)), + + ((cx, cy - ry), + (cx - rx * 0.552, cy - ry), + (cx + rx * 0.552, cy - ry)), + + ((cx + rx, cy), + (cx + rx, cy - ry * 0.552), + (cx + rx, cy + ry * 0.552)), + + ((cx, cy + ry), + (cx + rx * 0.552, cy + ry), + (cx - rx * 0.552, cy + ry))] + + spline = None + for coord in coords: + co = self._transformCoord(coord[0]) + handle_left = self._transformCoord(coord[1]) + handle_right = self._transformCoord(coord[2]) + + if spline is None: + cu.splines.new('BEZIER') + spline = cu.splines[-1] + spline.use_cyclic_u = True + else: + spline.bezier_points.add(1) + + bezt = spline.bezier_points[-1] + bezt.co = co + bezt.handle_left_type = 'FREE' + bezt.handle_right_type = 'FREE' + bezt.handle_left = handle_left + bezt.handle_right = handle_right + + SVGFinishCurve() + + +class SVGGeometryCIRCLE(SVGGeometryELLIPSE): + """ + SVG circle + """ + + def parse(self): + """ + Parse SVG circle node + """ + + self._styles = SVGParseStyles(self._node, self._context) + + self._cx = self._node.getAttribute('cx') or '0' + self._cy = self._node.getAttribute('cy') or '0' + + r = self._node.getAttribute('r') or '0' + self._rx = self._ry = r + + +class SVGGeometryLINE(SVGGeometry): + """ + SVG line + """ + + __slots__ = ('_x1', # X-coordinate of beginning + '_y1', # Y-coordinate of beginning + '_x2', # X-coordinate of ending + '_y2') # Y-coordinate of ending + + def __init__(self, node, context): + """ + Initialize new line + """ + + super().__init__(node, context) + + self._x1 = '0.0' + self._y1 = '0.0' + self._x2 = '0.0' + self._y2 = '0.0' + + def parse(self): + """ + Parse SVG line node + """ + + self._x1 = self._node.getAttribute('x1') or '0' + self._y1 = self._node.getAttribute('y1') or '0' + self._x2 = self._node.getAttribute('x2') or '0' + self._y2 = self._node.getAttribute('y2') or '0' + + def _doCreateGeom(self, instancing): + """ + Create real geometries + """ + + # Run-time parsing -- percents would be correct only if + # parsing them now + crect = self._context['rect'] + + x1 = SVGParseCoord(self._x1, crect[0]) + y1 = SVGParseCoord(self._y1, crect[1]) + x2 = SVGParseCoord(self._x2, crect[0]) + y2 = SVGParseCoord(self._y2, crect[1]) + + # Create cline + ob = SVGCreateCurve(self._context) + cu = ob.data + + id_names_from_node(self._node, ob) + + coords = [(x1, y1), (x2, y2)] + spline = None + + for coord in coords: + co = self._transformCoord(coord) + + if spline is None: + cu.splines.new('BEZIER') + spline = cu.splines[-1] + spline.use_cyclic_u = True + else: + spline.bezier_points.add(1) + + bezt = spline.bezier_points[-1] + bezt.co = co + bezt.handle_left_type = 'VECTOR' + bezt.handle_right_type = 'VECTOR' + + SVGFinishCurve() + + +class SVGGeometryPOLY(SVGGeometry): + """ + Abstract class for handling poly-geometries + (polylines and polygons) + """ + + __slots__ = ('_points', # Array of points for poly geometry + '_styles', # Styles, used for displaying + '_closed') # Should generated curve be closed? + + def __init__(self, node, context): + """ + Initialize new poly geometry + """ + + super().__init__(node, context) + + self._points = [] + self._styles = SVGEmptyStyles + self._closed = False + + def parse(self): + """ + Parse poly node + """ + + self._styles = SVGParseStyles(self._node, self._context) + + points = parse_array_of_floats(self._node.getAttribute('points')) + + prev = None + self._points = [] + + for p in points: + if prev is None: + prev = p + else: + self._points.append((prev, p)) + prev = None + + def _doCreateGeom(self, instancing): + """ + Create real geometries + """ + + ob = SVGCreateCurve(self._context) + cu = ob.data + + id_names_from_node(self._node, ob) + + if self._closed and self._styles['useFill']: + cu.dimensions = '2D' + cu.fill_mode = 'BOTH' + cu.materials.append(self._styles['fill']) + else: + cu.dimensions = '3D' + + spline = None + + for point in self._points: + co = self._transformCoord(point) + + if spline is None: + cu.splines.new('BEZIER') + spline = cu.splines[-1] + spline.use_cyclic_u = self._closed + else: + spline.bezier_points.add(1) + + bezt = spline.bezier_points[-1] + bezt.co = co + bezt.handle_left_type = 'VECTOR' + bezt.handle_right_type = 'VECTOR' + + SVGFinishCurve() + + +class SVGGeometryPOLYLINE(SVGGeometryPOLY): + """ + SVG polyline geometry + """ + + pass + + +class SVGGeometryPOLYGON(SVGGeometryPOLY): + """ + SVG polygon geometry + """ + + def __init__(self, node, context): + """ + Initialize new polygon geometry + """ + + super().__init__(node, context) + + self._closed = True + + +class SVGGeometrySVG(SVGGeometryContainer): + """ + Main geometry holder + """ + + def _doCreateGeom(self, instancing): + """ + Create real geometries + """ + + rect = SVGRectFromNode(self._node, self._context) + + matrix = self.getNodeMatrix() + + # Better SVG compatibility: match svg-document units + # with blender units + + viewbox = [] + unit = '' + + if self._node.getAttribute('height'): + raw_height = self._node.getAttribute('height') + token, last_char = read_float(raw_height) + document_height = float(token) + unit = raw_height[last_char:].strip() + + if self._node.getAttribute('viewBox'): + viewbox = parse_array_of_floats(self._node.getAttribute('viewBox')) + + if len(viewbox) == 4 and unit in ('cm', 'mm', 'in', 'pt', 'pc'): + + # convert units to BU: + unitscale = units[unit] / 90 * 1000 / 39.3701 + + # apply blender unit scale: + unitscale = unitscale / bpy.context.scene.unit_settings.scale_length + + matrix = matrix @ Matrix.Scale(unitscale, 4, Vector((1.0, 0.0, 0.0))) + matrix = matrix @ Matrix.Scale(unitscale, 4, Vector((0.0, 1.0, 0.0))) + + # match document origin with 3D space origin. + if self._node.getAttribute('viewBox'): + viewbox = parse_array_of_floats(self._node.getAttribute('viewBox')) + matrix = matrix @ matrix.Translation([0.0, - viewbox[1] - viewbox[3], 0.0]) + + self._pushMatrix(matrix) + self._pushRect(rect) + + super()._doCreateGeom(False) + + self._popRect() + self._popMatrix() + + +class SVGLoader(SVGGeometryContainer): + """ + SVG file loader + """ + + def getTransformMatrix(self): + """ + Get matrix created from "transform" attribute + """ + + # SVG document doesn't support transform specification + # it can't even hold attributes + + return None + + def __init__(self, context, filepath, do_colormanage): + """ + Initialize SVG loader + """ + import os + + svg_name = os.path.basename(filepath) + scene = context.scene + collection = bpy.data.collections.new(name=svg_name) + scene.collection.children.link(collection) + + node = xml.dom.minidom.parse(filepath) + + m = Matrix() + m = m @ Matrix.Scale(1.0 / 90.0 * 0.3048 / 12.0, 4, Vector((1.0, 0.0, 0.0))) + m = m @ Matrix.Scale(-1.0 / 90.0 * 0.3048 / 12.0, 4, Vector((0.0, 1.0, 0.0))) + + rect = (0, 0) + + self._context = {'defines': {}, + 'rects': [rect], + 'rect': rect, + 'matrix_stack': [], + 'matrix': m, + 'materials': {}, + 'styles': [None], + 'style': None, + 'do_colormanage': do_colormanage, + 'collection': collection} + + super().__init__(node, self._context) + + +svgGeometryClasses = { + 'svg': SVGGeometrySVG, + 'path': SVGGeometryPATH, + 'defs': SVGGeometryDEFS, + 'symbol': SVGGeometrySYMBOL, + 'use': SVGGeometryUSE, + 'rect': SVGGeometryRECT, + 'ellipse': SVGGeometryELLIPSE, + 'circle': SVGGeometryCIRCLE, + 'line': SVGGeometryLINE, + 'polyline': SVGGeometryPOLYLINE, + 'polygon': SVGGeometryPOLYGON, + 'g': SVGGeometryG} + + +def parseAbstractNode(node, context): + name = node.tagName.lower() + + if name.startswith('svg:'): + name = name[4:] + + geomClass = svgGeometryClasses.get(name) + + if geomClass is not None: + ob = geomClass(node, context) + ob.parse() + + return ob + + return None + + +def load_svg(context, filepath, do_colormanage): + """ + Load specified SVG file + """ + + if bpy.ops.object.mode_set.poll(): + bpy.ops.object.mode_set(mode='OBJECT') + + loader = SVGLoader(context, filepath, do_colormanage) + loader.parse() + loader.createGeom(False) + + +def load(operator, context, filepath=""): + + # error in code should raise exceptions but loading + # non SVG files can give useful messages. + do_colormanage = context.scene.display_settings.display_device != 'NONE' + try: + load_svg(context, filepath, do_colormanage) + except (xml.parsers.expat.ExpatError, UnicodeEncodeError) as e: + import traceback + traceback.print_exc() + + operator.report({'WARNING'}, tip_("Unable to parse XML, %s:%s for file %r") % (type(e).__name__, e, filepath)) + return {'CANCELLED'} + + return {'FINISHED'} diff --git a/scripts/addons_core/io_curve_svg/svg_colors.py b/scripts/addons_core/io_curve_svg/svg_colors.py new file mode 100644 index 00000000000..a752ff21da2 --- /dev/null +++ b/scripts/addons_core/io_curve_svg/svg_colors.py @@ -0,0 +1,153 @@ +# SPDX-FileCopyrightText: 2004-2009 JM Soler +# +# SPDX-License-Identifier: GPL-2.0-or-later + +# Copied and adopted from paths_svg2obj.py script for Blender 2.49. + +SVGColors = {'aliceblue': (240, 248, 255), + 'antiquewhite': (250, 235, 215), + 'aqua': (0, 255, 255), + 'aquamarine': (127, 255, 212), + 'azure': (240, 255, 255), + 'beige': (245, 245, 220), + 'bisque': (255, 228, 196), + 'black': (0, 0, 0), + 'blanchedalmond': (255, 235, 205), + 'blue': (0, 0, 255), + 'blueviolet': (138, 43, 226), + 'brown': (165, 42, 42), + 'burlywood': (222, 184, 135), + 'cadetblue': (95, 158, 160), + 'chartreuse': (127, 255, 0), + 'chocolate': (210, 105, 30), + 'coral': (255, 127, 80), + 'cornflowerblue': (100, 149, 237), + 'cornsilk': (255, 248, 220), + 'crimson': (220, 20, 60), + 'cyan': (0, 255, 255), + 'darkblue': (0, 0, 139), + 'darkcyan': (0, 139, 139), + 'darkgoldenrod': (184, 134, 11), + 'darkgray': (169, 169, 169), + 'darkgreen': (0, 100, 0), + 'darkgrey': (169, 169, 169), + 'darkkhaki': (189, 183, 107), + 'darkmagenta': (139, 0, 139), + 'darkolivegreen': (85, 107, 47), + 'darkorange': (255, 140, 0), + 'darkorchid': (153, 50, 204), + 'darkred': (139, 0, 0), + 'darksalmon': (233, 150, 122), + 'darkseagreen': (143, 188, 143), + 'darkslateblue': (72, 61, 139), + 'darkslategray': (47, 79, 79), + 'darkslategrey': (47, 79, 79), + 'darkturquoise': (0, 206, 209), + 'darkviolet': (148, 0, 211), + 'deeppink': (255, 20, 147), + 'deepskyblue': (0, 191, 255), + 'dimgray': (105, 105, 105), + 'dimgrey': (105, 105, 105), + 'dodgerblue': (30, 144, 255), + 'firebrick': (178, 34, 34), + 'floralwhite': (255, 250, 240), + 'forestgreen': (34, 139, 34), + 'fuchsia': (255, 0, 255), + 'gainsboro': (220, 220, 220), + 'ghostwhite': (248, 248, 255), + 'gold': (255, 215, 0), + 'goldenrod': (218, 165, 32), + 'gray': (128, 128, 128), + 'grey': (128, 128, 128), + 'green': (0, 128, 0), + 'greenyellow': (173, 255, 47), + 'honeydew': (240, 255, 240), + 'hotpink': (255, 105, 180), + 'indianred': (205, 92, 92), + 'indigo': (75, 0, 130), + 'ivory': (255, 255, 240), + 'khaki': (240, 230, 140), + 'lavender': (230, 230, 250), + 'lavenderblush': (255, 240, 245), + 'lawngreen': (124, 252, 0), + 'lemonchiffon': (255, 250, 205), + 'lightblue': (173, 216, 230), + 'lightcoral': (240, 128, 128), + 'lightcyan': (224, 255, 255), + 'lightgoldenrodyellow': (250, 250, 210), + 'lightgray': (211, 211, 211), + 'lightgreen': (144, 238, 144), + 'lightgrey': (211, 211, 211), + 'lightpink': (255, 182, 193), + 'lightsalmon': (255, 160, 122), + 'lightseagreen': (32, 178, 170), + 'lightskyblue': (135, 206, 250), + 'lightslategray': (119, 136, 153), + 'lightslategrey': (119, 136, 153), + 'lightsteelblue': (176, 196, 222), + 'lightyellow': (255, 255, 224), + 'lime': (0, 255, 0), + 'limegreen': (50, 205, 50), + 'linen': (250, 240, 230), + 'magenta': (255, 0, 255), + 'maroon': (128, 0, 0), + 'mediumaquamarine': (102, 205, 170), + 'mediumblue': (0, 0, 205), + 'mediumorchid': (186, 85, 211), + 'mediumpurple': (147, 112, 219), + 'mediumseagreen': (60, 179, 113), + 'mediumslateblue': (123, 104, 238), + 'mediumspringgreen': (0, 250, 154), + 'mediumturquoise': (72, 209, 204), + 'mediumvioletred': (199, 21, 133), + 'midnightblue': (25, 25, 112), + 'mintcream': (245, 255, 250), + 'mistyrose': (255, 228, 225), + 'moccasin': (255, 228, 181), + 'navajowhite': (255, 222, 173), + 'navy': (0, 0, 128), + 'oldlace': (253, 245, 230), + 'olive': (128, 128, 0), + 'olivedrab': (107, 142, 35), + 'orange': (255, 165, 0), + 'orangered': (255, 69, 0), + 'orchid': (218, 112, 214), + 'palegoldenrod': (238, 232, 170), + 'palegreen': (152, 251, 152), + 'paleturquoise': (175, 238, 238), + 'palevioletred': (219, 112, 147), + 'papayawhip': (255, 239, 213), + 'peachpuff': (255, 218, 185), + 'peru': (205, 133, 63), + 'pink': (255, 192, 203), + 'plum': (221, 160, 221), + 'powderblue': (176, 224, 230), + 'purple': (128, 0, 128), + 'red': (255, 0, 0), + 'rosybrown': (188, 143, 143), + 'royalblue': (65, 105, 225), + 'saddlebrown': (139, 69, 19), + 'salmon': (250, 128, 114), + 'sandybrown': (244, 164, 96), + 'seagreen': (46, 139, 87), + 'seashell': (255, 245, 238), + 'sienna': (160, 82, 45), + 'silver': (192, 192, 192), + 'skyblue': (135, 206, 235), + 'slateblue': (106, 90, 205), + 'slategray': (112, 128, 144), + 'slategrey': (112, 128, 144), + 'snow': (255, 250, 250), + 'springgreen': (0, 255, 127), + 'steelblue': (70, 130, 180), + 'tan': (210, 180, 140), + 'teal': (0, 128, 128), + 'thistle': (216, 191, 216), + 'tomato': (255, 99, 71), + 'turquoise': (64, 224, 208), + 'violet': (238, 130, 238), + 'wheat': (245, 222, 179), + 'white': (255, 255, 255), + 'whitesmoke': (245, 245, 245), + 'yellow': (255, 255, 0), + 'yellowgreen': (154, 205, 50)} diff --git a/scripts/addons_core/io_curve_svg/svg_util.py b/scripts/addons_core/io_curve_svg/svg_util.py new file mode 100644 index 00000000000..4e9e0bf0edf --- /dev/null +++ b/scripts/addons_core/io_curve_svg/svg_util.py @@ -0,0 +1,107 @@ +# SPDX-FileCopyrightText: 2010-2022 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import re + + +units = {"": 1.0, + "px": 1.0, + "in": 90.0, + "mm": 90.0 / 25.4, + "cm": 90.0 / 2.54, + "pt": 1.25, + "pc": 15.0, + "em": 1.0, + "ex": 1.0, + "INVALID": 1.0, # some DocBook files contain this + } + + +def srgb_to_linearrgb(c): + if c < 0.04045: + return 0.0 if c < 0.0 else c * (1.0 / 12.92) + else: + return pow((c + 0.055) * (1.0 / 1.055), 2.4) + + +def check_points_equal(point_a, point_b): + return (abs(point_a[0] - point_b[0]) < 1e-6 and + abs(point_a[1] - point_b[1]) < 1e-6) + + +match_number = r"-?\d+(\.\d+)?([eE][-+]?\d+)?" +match_first_comma = r"^\s*(?=,)" +match_comma_pair = r",\s*(?=,)" +match_last_comma = r",\s*$" + +match_number_optional_parts = r"(-?\d+(\.\d*)?([eE][-+]?\d+)?)|(-?\.\d+([eE][-+]?\d+)?)" +re_match_number_optional_parts = re.compile(match_number_optional_parts) + +array_of_floats_pattern = f"({match_number_optional_parts})|{match_first_comma}|{match_comma_pair}|{match_last_comma}" +re_array_of_floats_pattern = re.compile(array_of_floats_pattern) + + +def parse_array_of_floats(text): + """ + Accepts comma or space separated list of floats (without units) and returns an array + of floating point values. + """ + elements = re_array_of_floats_pattern.findall(text) + return [value_to_float(v[0]) for v in elements] + + +def read_float(text: str, start_index: int = 0): + """ + Reads floating point value from a string. Parsing starts at the given index. + + Returns the value itself (as a string) and index of first character after the value. + """ + + n = len(text) + + # Skip leading whitespace characters and characters which we consider ignorable for float + # (like values separator). + while start_index < n and (text[start_index].isspace() or text[start_index] == ','): + start_index += 1 + if start_index == n: + return "0", start_index + + text_part = text[start_index:] + match = re_match_number_optional_parts.match(text_part) + + if match is None: + raise Exception('Invalid float value near ' + text[start_index:start_index + 10]) + + token = match.group(0) + endptr = start_index + match.end(0) + + return token, endptr + + +def parse_coord(coord, size): + """ + Parse coordinate component to common basis + + Needed to handle coordinates set in cm, mm, inches. + """ + + token, last_char = read_float(coord) + val = float(token) + unit = coord[last_char:].strip() # strip() in case there is a space + + if unit == '%': + return float(size) / 100.0 * val + else: + return val * units[unit] + + return val + + +def value_to_float(value_encoded: str): + """ + A simple wrapper around float() which supports empty strings (which are converted to 0). + """ + if len(value_encoded) == 0: + return 0 + return float(value_encoded) diff --git a/scripts/addons_core/io_curve_svg/svg_util_test.py b/scripts/addons_core/io_curve_svg/svg_util_test.py new file mode 100755 index 00000000000..755ebb6f54e --- /dev/null +++ b/scripts/addons_core/io_curve_svg/svg_util_test.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: 2019-2022 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +# XXX Not really nice, but that hack is needed to allow execution of that test +# from both automated CTest and by directly running the file manually. +if __name__ == '__main__': + from svg_util import (parse_array_of_floats, read_float, parse_coord,) +else: + from .svg_util import (parse_array_of_floats, read_float, parse_coord,) +import unittest + + +class ParseArrayOfFloatsTest(unittest.TestCase): + def test_empty(self): + self.assertEqual(parse_array_of_floats(""), []) + self.assertEqual(parse_array_of_floats(" "), []) + + def test_single_value(self): + self.assertEqual(parse_array_of_floats("123"), [123]) + self.assertEqual(parse_array_of_floats(" \t 123 \t"), [123]) + + def test_single_value_exponent(self): + self.assertEqual(parse_array_of_floats("12e+3"), [12000]) + self.assertEqual(parse_array_of_floats("12e-3"), [0.012]) + + def test_space_separated_values(self): + self.assertEqual(parse_array_of_floats("123 45 6 89"), + [123, 45, 6, 89]) + self.assertEqual(parse_array_of_floats(" 123 45 6 89 "), + [123, 45, 6, 89]) + + def test_comma_separated_values(self): + self.assertEqual(parse_array_of_floats("123,45,6,89"), + [123, 45, 6, 89]) + self.assertEqual(parse_array_of_floats(" 123,45,6,89 "), + [123, 45, 6, 89]) + + def test_mixed_separated_values(self): + self.assertEqual(parse_array_of_floats("123,45 6,89"), + [123, 45, 6, 89]) + self.assertEqual(parse_array_of_floats(" 123 45,6,89 "), + [123, 45, 6, 89]) + + def test_omitted_value_with_comma(self): + self.assertEqual(parse_array_of_floats("1,,3"), [1, 0, 3]) + self.assertEqual(parse_array_of_floats(",,3"), [0, 0, 3]) + + def test_sign_as_separator(self): + self.assertEqual(parse_array_of_floats("1-3"), [1, -3]) + self.assertEqual(parse_array_of_floats("1+3"), [1, 3]) + + def test_all_commas(self): + self.assertEqual(parse_array_of_floats(",,,"), [0, 0, 0, 0]) + + def test_value_with_decimal_separator(self): + self.assertEqual(parse_array_of_floats("3.5"), [3.5]) + + def test_comma_separated_values_with_decimal_separator(self): + self.assertEqual(parse_array_of_floats("2.75,8.5"), [2.75, 8.5]) + + def test_missing_decimal(self): + self.assertEqual(parse_array_of_floats(".92"), [0.92]) + self.assertEqual(parse_array_of_floats(".92e+1"), [9.2]) + + self.assertEqual(parse_array_of_floats("-.92"), [-0.92]) + self.assertEqual(parse_array_of_floats("-.92e+1"), [-9.2]) + + +class ReadFloatTest(unittest.TestCase): + def test_empty(self): + value, endptr = read_float("", 0) + self.assertEqual(value, "0") + self.assertEqual(endptr, 0) + + def test_empty_spaces(self): + value, endptr = read_float(" ", 0) + self.assertEqual(value, "0") + self.assertEqual(endptr, 4) + + def test_single_value(self): + value, endptr = read_float("1.2", 0) + self.assertEqual(value, "1.2") + self.assertEqual(endptr, 3) + + def test_scientific_value(self): + value, endptr = read_float("1.2e+3", 0) + self.assertEqual(value, "1.2e+3") + self.assertEqual(endptr, 6) + + def test_scientific_value_no_sign(self): + value, endptr = read_float("1.2e3", 0) + self.assertEqual(value, "1.2e3") + self.assertEqual(endptr, 5) + + def test_middle(self): + value, endptr = read_float("1.2 3.4 5.6", 3) + self.assertEqual(value, "3.4") + self.assertEqual(endptr, 8) + + def test_comma(self): + value, endptr = read_float("1.2 ,,3.4 5.6", 3) + self.assertEqual(value, "3.4") + self.assertEqual(endptr, 10) + + def test_not_a_number(self): + # TODO(sergey): Make this catch more concrete. + with self.assertRaises(Exception): + read_float("1.2eV", 3) + + def test_missing_fractional(self): + value, endptr = read_float("1.", 0) + self.assertEqual(value, "1.") + self.assertEqual(endptr, 2) + + value, endptr = read_float("2. 3", 0) + self.assertEqual(value, "2.") + self.assertEqual(endptr, 2) + + def test_missing_decimal(self): + value, endptr = read_float(".92", 0) + self.assertEqual(value, ".92") + self.assertEqual(endptr, 3) + + value, endptr = read_float("-.92", 0) + self.assertEqual(value, "-.92") + self.assertEqual(endptr, 4) + + value, endptr = read_float(".92e+3", 0) + self.assertEqual(value, ".92e+3") + self.assertEqual(endptr, 6) + + value, endptr = read_float("-.92e+3", 0) + self.assertEqual(value, "-.92e+3") + self.assertEqual(endptr, 7) + + # TODO(sergey): Make these catch more concrete. + with self.assertRaises(Exception): + read_float(".", 0) + with self.assertRaises(Exception): + read_float(".e+1", 0) + + +class ParseCoordTest(unittest.TestCase): + def test_empty(self): + self.assertEqual(parse_coord("", 200), 0) + + def test_empty_spaces(self): + self.assertEqual(parse_coord(" ", 200), 0) + + def test_no_units(self): + self.assertEqual(parse_coord("1.2", 200), 1.2) + + def test_unit_cm(self): + self.assertAlmostEqual(parse_coord("1.2cm", 200), 42.51968503937008) + + def test_unit_ex(self): + self.assertAlmostEqual(parse_coord("1.2ex", 200), 1.2) + + def test_unit_percentage(self): + self.assertEqual(parse_coord("1.2%", 200), 2.4) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/scripts/addons_core/io_mesh_uv_layout/__init__.py b/scripts/addons_core/io_mesh_uv_layout/__init__.py new file mode 100644 index 00000000000..f2b59ace3d1 --- /dev/null +++ b/scripts/addons_core/io_mesh_uv_layout/__init__.py @@ -0,0 +1,300 @@ +# SPDX-FileCopyrightText: 2011-2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +bl_info = { + "name": "UV Layout", + "author": "Campbell Barton, Matt Ebb", + "version": (1, 2, 0), + "blender": (3, 0, 0), + "location": "UV Editor > UV > Export UV Layout", + "description": "Export the UV layout as a 2D graphic", + "warning": "", + "doc_url": "{BLENDER_MANUAL_URL}/addons/import_export/mesh_uv_layout.html", + "support": 'OFFICIAL', + "category": "Import-Export", +} + + +# @todo write the wiki page + +if "bpy" in locals(): + import importlib + if "export_uv_eps" in locals(): + importlib.reload(export_uv_eps) + if "export_uv_png" in locals(): + importlib.reload(export_uv_png) + if "export_uv_svg" in locals(): + importlib.reload(export_uv_svg) + +import os +import bpy + +from bpy.app.translations import contexts as i18n_contexts + +from bpy.props import ( + StringProperty, + BoolProperty, + EnumProperty, + IntVectorProperty, + FloatProperty, +) + + +class ExportUVLayout(bpy.types.Operator): + """Export UV layout to file""" + + bl_idname = "uv.export_layout" + bl_label = "Export UV Layout" + bl_options = {'REGISTER', 'UNDO'} + + filepath: StringProperty( + subtype='FILE_PATH', + ) + export_all: BoolProperty( + name="All UVs", + description="Export all UVs in this mesh (not just visible ones)", + default=False, + ) + export_tiles: EnumProperty( + name="Export Tiles", + items=( + ('NONE', "None", + "Export only UVs in the [0, 1] range"), + ('UDIM', "UDIM", + "Export tiles in the UDIM numbering scheme: 1001 + u_tile + 10*v_tile"), + ('UV', "UVTILE", + "Export tiles in the UVTILE numbering scheme: u(u_tile + 1)_v(v_tile + 1)"), + ), + description="Choose whether to export only the [0, 1] range, or all UV tiles", + default='NONE', + ) + modified: BoolProperty( + name="Modified", + description="Exports UVs from the modified mesh", + default=False, + translation_context=i18n_contexts.id_mesh, + ) + mode: EnumProperty( + items=( + ('SVG', "Scalable Vector Graphic (.svg)", + "Export the UV layout to a vector SVG file"), + ('EPS', "Encapsulated PostScript (.eps)", + "Export the UV layout to a vector EPS file"), + ('PNG', "PNG Image (.png)", + "Export the UV layout to a bitmap image"), + ), + name="Format", + description="File format to export the UV layout to", + default='PNG', + ) + size: IntVectorProperty( + name="Size", + size=2, + default=(1024, 1024), + min=8, max=32768, + description="Dimensions of the exported file", + ) + opacity: FloatProperty( + name="Fill Opacity", + min=0.0, max=1.0, + default=0.25, + description="Set amount of opacity for exported UV layout", + ) + # For the file-selector. + check_existing: BoolProperty( + default=True, + options={'HIDDEN'}, + ) + + @classmethod + def poll(cls, context): + obj = context.active_object + return obj is not None and obj.type == 'MESH' and obj.data.uv_layers + + def invoke(self, context, event): + self.size = self.get_image_size(context) + self.filepath = self.get_default_file_name(context) + "." + self.mode.lower() + context.window_manager.fileselect_add(self) + return {'RUNNING_MODAL'} + + def get_default_file_name(self, context): + AMOUNT = 3 + objects = list(self.iter_objects_to_export(context)) + name = " ".join(sorted([obj.name for obj in objects[:AMOUNT]])) + if len(objects) > AMOUNT: + name += " and more" + return name + + def check(self, context): + if any(self.filepath.endswith(ext) for ext in (".png", ".eps", ".svg")): + self.filepath = self.filepath[:-4] + + ext = "." + self.mode.lower() + self.filepath = bpy.path.ensure_ext(self.filepath, ext) + return True + + def execute(self, context): + obj = context.active_object + is_editmode = (obj.mode == 'EDIT') + if is_editmode: + bpy.ops.object.mode_set(mode='OBJECT', toggle=False) + + meshes = list(self.iter_meshes_to_export(context)) + polygon_data = list(self.iter_polygon_data_to_draw(context, meshes)) + different_colors = set(color for _, color in polygon_data) + if self.modified: + depsgraph = context.evaluated_depsgraph_get() + for obj in self.iter_objects_to_export(context): + obj_eval = obj.evaluated_get(depsgraph) + obj_eval.to_mesh_clear() + + tiles = self.tiles_to_export(polygon_data) + export = self.get_exporter() + dirname, filename = os.path.split(self.filepath) + + # Strip UDIM or UV numbering, and extension + import re + name_regex = r"^(.*?)" + udim_regex = r"(?:\.[0-9]{4})?" + uv_regex = r"(?:\.u[0-9]+_v[0-9]+)?" + ext_regex = r"(?:\.png|\.eps|\.svg)?$" + if self.export_tiles == 'NONE': + match = re.match(name_regex + ext_regex, filename) + elif self.export_tiles == 'UDIM': + match = re.match(name_regex + udim_regex + ext_regex, filename) + elif self.export_tiles == 'UV': + match = re.match(name_regex + uv_regex + ext_regex, filename) + if match: + filename = match.groups()[0] + + for tile in sorted(tiles): + filepath = os.path.join(dirname, filename) + if self.export_tiles == 'UDIM': + filepath += f".{1001 + tile[0] + tile[1] * 10:04}" + elif self.export_tiles == 'UV': + filepath += f".u{tile[0] + 1}_v{tile[1] + 1}" + filepath = bpy.path.ensure_ext(filepath, "." + self.mode.lower()) + + export(filepath, tile, polygon_data, different_colors, + self.size[0], self.size[1], self.opacity) + + if is_editmode: + bpy.ops.object.mode_set(mode='EDIT', toggle=False) + + return {'FINISHED'} + + def iter_meshes_to_export(self, context): + depsgraph = context.evaluated_depsgraph_get() + for obj in self.iter_objects_to_export(context): + if self.modified: + yield obj.evaluated_get(depsgraph).to_mesh() + else: + yield obj.data + + @staticmethod + def iter_objects_to_export(context): + for obj in {*context.selected_objects, context.active_object}: + if obj.type != 'MESH': + continue + mesh = obj.data + if mesh.uv_layers.active is None: + continue + yield obj + + def tiles_to_export(self, polygon_data): + """Get a set of tiles containing UVs. + This assumes there is no UV edge crossing an otherwise empty tile. + """ + if self.export_tiles == 'NONE': + return {(0, 0)} + + from math import floor + tiles = set() + for poly in polygon_data: + for uv in poly[0]: + # Ignore UVs at corners - precisely touching the right or upper edge + # of a tile should not load its right/upper neighbor as well. + # From intern/cycles/scene/attribute.cpp + u, v = uv[0], uv[1] + x, y = floor(u), floor(v) + if x > 0 and u < x + 1e-6: + x -= 1 + if y > 0 and v < y + 1e-6: + y -= 1 + if x >= 0 and y >= 0: + tiles.add((x, y)) + return tiles + + @staticmethod + def currently_image_image_editor(context): + return isinstance(context.space_data, bpy.types.SpaceImageEditor) + + def get_currently_opened_image(self, context): + if not self.currently_image_image_editor(context): + return None + return context.space_data.image + + def get_image_size(self, context): + # fallback if not in image context + image_width = self.size[0] + image_height = self.size[1] + + # get size of "active" image if some exist + image = self.get_currently_opened_image(context) + if image is not None: + width, height = image.size + if width and height: + image_width = width + image_height = height + + return image_width, image_height + + def iter_polygon_data_to_draw(self, context, meshes): + for mesh in meshes: + uv_layer = mesh.uv_layers.active.data + for polygon in mesh.polygons: + if self.export_all or polygon.select: + start = polygon.loop_start + end = start + polygon.loop_total + uvs = tuple(tuple(uv.uv) for uv in uv_layer[start:end]) + yield (uvs, self.get_polygon_color(mesh, polygon)) + + @staticmethod + def get_polygon_color(mesh, polygon, default=(0.8, 0.8, 0.8)): + if polygon.material_index < len(mesh.materials): + material = mesh.materials[polygon.material_index] + if material is not None: + return tuple(material.diffuse_color)[:3] + return default + + def get_exporter(self): + if self.mode == 'PNG': + from . import export_uv_png + return export_uv_png.export + elif self.mode == 'EPS': + from . import export_uv_eps + return export_uv_eps.export + elif self.mode == 'SVG': + from . import export_uv_svg + return export_uv_svg.export + else: + assert False + + +def menu_func(self, context): + self.layout.operator(ExportUVLayout.bl_idname) + + +def register(): + bpy.utils.register_class(ExportUVLayout) + bpy.types.IMAGE_MT_uvs.append(menu_func) + + +def unregister(): + bpy.utils.unregister_class(ExportUVLayout) + bpy.types.IMAGE_MT_uvs.remove(menu_func) + + +if __name__ == "__main__": + register() diff --git a/scripts/addons_core/io_mesh_uv_layout/export_uv_eps.py b/scripts/addons_core/io_mesh_uv_layout/export_uv_eps.py new file mode 100644 index 00000000000..dffb30747bc --- /dev/null +++ b/scripts/addons_core/io_mesh_uv_layout/export_uv_eps.py @@ -0,0 +1,82 @@ +# SPDX-FileCopyrightText: 2011-2022 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import bpy + + +def export(filepath, tile, face_data, colors, width, height, opacity): + with open(filepath, 'w', encoding='utf-8') as file: + for text in get_file_parts(tile, face_data, colors, width, height, opacity): + file.write(text) + + +def get_file_parts(tile, face_data, colors, width, height, opacity): + yield from header(width, height) + if opacity > 0.0: + name_by_color = {} + yield from prepare_colors(colors, name_by_color) + yield from draw_colored_polygons(tile, face_data, name_by_color, width, height) + yield from draw_lines(tile, face_data, width, height) + yield from footer() + + +def header(width, height): + yield "%!PS-Adobe-3.0 EPSF-3.0\n" + yield f"%%Creator: Blender {bpy.app.version_string}\n" + yield "%%Pages: 1\n" + yield "%%Orientation: Portrait\n" + yield f"%%BoundingBox: 0 0 {width} {height}\n" + yield f"%%HiResBoundingBox: 0.0 0.0 {width:.4f} {height:.4f}\n" + yield "%%EndComments\n" + yield "%%Page: 1 1\n" + yield "0 0 translate\n" + yield "1.0 1.0 scale\n" + yield "0 0 0 setrgbcolor\n" + yield "[] 0 setdash\n" + yield "1 setlinewidth\n" + yield "1 setlinejoin\n" + yield "1 setlinecap\n" + + +def prepare_colors(colors, out_name_by_color): + for i, color in enumerate(colors): + name = f"COLOR_{i}" + yield "/%s {" % name + out_name_by_color[color] = name + + yield "gsave\n" + yield "%.3g %.3g %.3g setrgbcolor\n" % color + yield "fill\n" + yield "grestore\n" + yield "0 setgray\n" + yield "} def\n" + + +def draw_colored_polygons(tile, face_data, name_by_color, width, height): + for uvs, color in face_data: + yield from draw_polygon_path(tile, uvs, width, height) + yield "closepath\n" + yield "%s\n" % name_by_color[color] + + +def draw_lines(tile, face_data, width, height): + for uvs, _ in face_data: + yield from draw_polygon_path(tile, uvs, width, height) + yield "closepath\n" + yield "stroke\n" + + +def draw_polygon_path(tile, uvs, width, height): + yield "newpath\n" + for j, uv in enumerate(uvs): + uv_scale = ((uv[0] - tile[0]) * width, (uv[1] - tile[1]) * height) + if j == 0: + yield "%.5f %.5f moveto\n" % uv_scale + else: + yield "%.5f %.5f lineto\n" % uv_scale + + +def footer(): + yield "showpage\n" + yield "%%EOF\n" diff --git a/scripts/addons_core/io_mesh_uv_layout/export_uv_png.py b/scripts/addons_core/io_mesh_uv_layout/export_uv_png.py new file mode 100644 index 00000000000..c3db77aad6a --- /dev/null +++ b/scripts/addons_core/io_mesh_uv_layout/export_uv_png.py @@ -0,0 +1,119 @@ +# SPDX-FileCopyrightText: 2011-2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import bpy +import gpu +from mathutils import Matrix +from mathutils.geometry import tessellate_polygon +from gpu_extras.batch import batch_for_shader + +# Use OIIO if available, else Blender for writing the image. +try: + import OpenImageIO as oiio +except ImportError: + oiio = None + + +def export(filepath, tile, face_data, colors, width, height, opacity): + offscreen = gpu.types.GPUOffScreen(width, height) + offscreen.bind() + + try: + fb = gpu.state.active_framebuffer_get() + fb.clear(color=(0.0, 0.0, 0.0, 0.0)) + draw_image(tile, face_data, opacity) + + pixel_data = fb.read_color(0, 0, width, height, 4, 0, 'UBYTE') + pixel_data.dimensions = width * height * 4 + save_pixels(filepath, pixel_data, width, height) + finally: + offscreen.unbind() + offscreen.free() + + +def draw_image(tile, face_data, opacity): + gpu.state.blend_set('ALPHA') + + with gpu.matrix.push_pop(): + gpu.matrix.load_matrix(get_normalize_uvs_matrix(tile)) + gpu.matrix.load_projection_matrix(Matrix.Identity(4)) + + draw_background_colors(face_data, opacity) + draw_lines(face_data) + + gpu.state.blend_set('NONE') + + +def get_normalize_uvs_matrix(tile): + '''matrix maps x and y coordinates from [0, 1] to [-1, 1]''' + matrix = Matrix.Identity(4) + matrix.col[3][0] = -1 - (tile[0] * 2) + matrix.col[3][1] = -1 - (tile[1] * 2) + matrix[0][0] = 2 + matrix[1][1] = 2 + + # OIIO writes arrays from the left-upper corner. + if oiio: + matrix.col[3][1] *= -1.0 + matrix[1][1] *= -1.0 + + return matrix + + +def draw_background_colors(face_data, opacity): + coords = [uv for uvs, _ in face_data for uv in uvs] + colors = [(*color, opacity) for uvs, color in face_data for _ in range(len(uvs))] + + indices = [] + offset = 0 + for uvs, _ in face_data: + triangles = tessellate_uvs(uvs) + indices.extend([index + offset for index in triangle] for triangle in triangles) + offset += len(uvs) + + shader = gpu.shader.from_builtin('FLAT_COLOR') + batch = batch_for_shader( + shader, 'TRIS', + {"pos": coords, "color": colors}, + indices=indices, + ) + batch.draw(shader) + + +def tessellate_uvs(uvs): + return tessellate_polygon([uvs]) + + +def draw_lines(face_data): + coords = [] + for uvs, _ in face_data: + for i in range(len(uvs)): + start = uvs[i] + end = uvs[(i + 1) % len(uvs)] + coords.append((start[0], start[1])) + coords.append((end[0], end[1])) + + shader = gpu.shader.from_builtin('POLYLINE_UNIFORM_COLOR') + shader.uniform_float("viewportSize", gpu.state.viewport_get()[2:]) + shader.uniform_float("lineWidth", 1.0) + shader.uniform_float("color", (0.0, 0.0, 0.0, 1.0)) + + batch = batch_for_shader(shader, 'LINES', {"pos": coords}) + batch.draw(shader) + + +def save_pixels(filepath, pixel_data, width, height): + if oiio: + spec = oiio.ImageSpec(width, height, 4, "uint8") + image = oiio.ImageOutput.create(filepath) + image.open(filepath, spec) + image.write_image(pixel_data) + image.close() + return + + image = bpy.data.images.new("temp", width, height, alpha=True) + image.filepath = filepath + image.pixels = [v / 255 for v in pixel_data] + image.save() + bpy.data.images.remove(image) diff --git a/scripts/addons_core/io_mesh_uv_layout/export_uv_svg.py b/scripts/addons_core/io_mesh_uv_layout/export_uv_svg.py new file mode 100644 index 00000000000..a4811ed11ff --- /dev/null +++ b/scripts/addons_core/io_mesh_uv_layout/export_uv_svg.py @@ -0,0 +1,54 @@ +# SPDX-FileCopyrightText: 2011-2022 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import bpy +from os.path import basename +from xml.sax.saxutils import escape + + +def export(filepath, tile, face_data, colors, width, height, opacity): + with open(filepath, 'w', encoding='utf-8') as file: + for text in get_file_parts(tile, face_data, colors, width, height, opacity): + file.write(text) + + +def get_file_parts(tile, face_data, colors, width, height, opacity): + yield from header(width, height) + yield from draw_polygons(tile, face_data, width, height, opacity) + yield from footer() + + +def header(width, height): + yield '\n' + yield '\n' + yield f'\n' + desc = f"{basename(bpy.data.filepath)}, (Blender {bpy.app.version_string})" + yield f'{escape(desc)}\n' + + +def draw_polygons(tile, face_data, width, height, opacity): + for uvs, color in face_data: + fill = f'fill="{get_color_string(color)}"' + + yield '\n' + + +def get_color_string(color): + r, g, b = color + return f"rgb({round(r*255)}, {round(g*255)}, {round(b*255)})" + + +def footer(): + yield '\n' + yield '\n' diff --git a/scripts/addons_core/io_scene_fbx/__init__.py b/scripts/addons_core/io_scene_fbx/__init__.py new file mode 100644 index 00000000000..7e52a11d4ec --- /dev/null +++ b/scripts/addons_core/io_scene_fbx/__init__.py @@ -0,0 +1,733 @@ +# SPDX-FileCopyrightText: 2011-2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +bl_info = { + "name": "FBX format", + "author": "Campbell Barton, Bastien Montagne, Jens Restemeier, @Mysteryem", + "version": (5, 12, 3), + "blender": (4, 2, 0), + "location": "File > Import-Export", + "description": "FBX IO meshes, UVs, vertex colors, materials, textures, cameras, lamps and actions", + "warning": "", + "doc_url": "{BLENDER_MANUAL_URL}/addons/import_export/scene_fbx.html", + "support": 'OFFICIAL', + "category": "Import-Export", +} + + +if "bpy" in locals(): + import importlib + if "import_fbx" in locals(): + importlib.reload(import_fbx) + if "export_fbx_bin" in locals(): + importlib.reload(export_fbx_bin) + if "export_fbx" in locals(): + importlib.reload(export_fbx) + + +import bpy +from bpy.props import ( + StringProperty, + BoolProperty, + FloatProperty, + EnumProperty, + CollectionProperty, +) +from bpy_extras.io_utils import ( + ImportHelper, + ExportHelper, + orientation_helper, + path_reference_mode, + axis_conversion, + poll_file_object_drop, +) + + +@orientation_helper(axis_forward='-Z', axis_up='Y') +class ImportFBX(bpy.types.Operator, ImportHelper): + """Load a FBX file""" + bl_idname = "import_scene.fbx" + bl_label = "Import FBX" + bl_options = {'UNDO', 'PRESET'} + + directory: StringProperty() + + filename_ext = ".fbx" + filter_glob: StringProperty(default="*.fbx", options={'HIDDEN'}) + + files: CollectionProperty( + name="File Path", + type=bpy.types.OperatorFileListElement, + ) + + ui_tab: EnumProperty( + items=(('MAIN', "Main", "Main basic settings"), + ('ARMATURE', "Armatures", "Armature-related settings"), + ), + name="ui_tab", + description="Import options categories", + ) + + use_manual_orientation: BoolProperty( + name="Manual Orientation", + description="Specify orientation and scale, instead of using embedded data in FBX file", + default=False, + ) + global_scale: FloatProperty( + name="Scale", + min=0.001, max=1000.0, + default=1.0, + ) + bake_space_transform: BoolProperty( + name="Apply Transform", + description="Bake space transform into object data, avoids getting unwanted rotations to objects when " + "target space is not aligned with Blender's space " + "(WARNING! experimental option, use at own risk, known to be broken with armatures/animations)", + default=False, + ) + + use_custom_normals: BoolProperty( + name="Custom Normals", + description="Import custom normals, if available (otherwise Blender will recompute them)", + default=True, + ) + colors_type: EnumProperty( + name="Vertex Colors", + items=(('NONE', "None", "Do not import color attributes"), + ('SRGB', "sRGB", "Expect file colors in sRGB color space"), + ('LINEAR', "Linear", "Expect file colors in linear color space"), + ), + description="Import vertex color attributes", + default='SRGB', + ) + + use_image_search: BoolProperty( + name="Image Search", + description="Search subdirs for any associated images (WARNING: may be slow)", + default=True, + ) + + use_alpha_decals: BoolProperty( + name="Alpha Decals", + description="Treat materials with alpha as decals (no shadow casting)", + default=False, + ) + decal_offset: FloatProperty( + name="Decal Offset", + description="Displace geometry of alpha meshes", + min=0.0, max=1.0, + default=0.0, + ) + + use_anim: BoolProperty( + name="Import Animation", + description="Import FBX animation", + default=True, + ) + anim_offset: FloatProperty( + name="Animation Offset", + description="Offset to apply to animation during import, in frames", + default=1.0, + ) + + use_subsurf: BoolProperty( + name="Subdivision Data", + description="Import FBX subdivision information as subdivision surface modifiers", + default=False, + ) + + use_custom_props: BoolProperty( + name="Custom Properties", + description="Import user properties as custom properties", + default=True, + ) + use_custom_props_enum_as_string: BoolProperty( + name="Import Enums As Strings", + description="Store enumeration values as strings", + default=True, + ) + + ignore_leaf_bones: BoolProperty( + name="Ignore Leaf Bones", + description="Ignore the last bone at the end of each chain (used to mark the length of the previous bone)", + default=False, + ) + force_connect_children: BoolProperty( + name="Force Connect Children", + description="Force connection of children bones to their parent, even if their computed head/tail " + "positions do not match (can be useful with pure-joints-type armatures)", + default=False, + ) + automatic_bone_orientation: BoolProperty( + name="Automatic Bone Orientation", + description="Try to align the major bone axis with the bone children", + default=False, + ) + primary_bone_axis: EnumProperty( + name="Primary Bone Axis", + items=(('X', "X Axis", ""), + ('Y', "Y Axis", ""), + ('Z', "Z Axis", ""), + ('-X', "-X Axis", ""), + ('-Y', "-Y Axis", ""), + ('-Z', "-Z Axis", ""), + ), + default='Y', + ) + secondary_bone_axis: EnumProperty( + name="Secondary Bone Axis", + items=(('X', "X Axis", ""), + ('Y', "Y Axis", ""), + ('Z', "Z Axis", ""), + ('-X', "-X Axis", ""), + ('-Y', "-Y Axis", ""), + ('-Z', "-Z Axis", ""), + ), + default='X', + ) + + use_prepost_rot: BoolProperty( + name="Use Pre/Post Rotation", + description="Use pre/post rotation from FBX transform (you may have to disable that in some cases)", + default=True, + ) + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False # No animation. + + import_panel_include(layout, self) + import_panel_transform(layout, self) + import_panel_animation(layout, self) + import_panel_armature(layout, self) + + def execute(self, context): + keywords = self.as_keywords(ignore=("filter_glob", "directory", "ui_tab", "filepath", "files")) + + from . import import_fbx + import os + + if self.files: + ret = {'CANCELLED'} + dirname = os.path.dirname(self.filepath) + for file in self.files: + path = os.path.join(dirname, file.name) + if import_fbx.load(self, context, filepath=path, **keywords) == {'FINISHED'}: + ret = {'FINISHED'} + return ret + else: + return import_fbx.load(self, context, filepath=self.filepath, **keywords) + + def invoke(self, context, event): + return self.invoke_popup(context) + + +def import_panel_include(layout, operator): + header, body = layout.panel("FBX_import_include", default_closed=False) + header.label(text="Include") + if body: + body.prop(operator, "use_custom_normals") + body.prop(operator, "use_subsurf") + body.prop(operator, "use_custom_props") + sub = body.row() + sub.enabled = operator.use_custom_props + sub.prop(operator, "use_custom_props_enum_as_string") + body.prop(operator, "use_image_search") + body.prop(operator, "colors_type") + + +def import_panel_transform(layout, operator): + header, body = layout.panel("FBX_import_transform", default_closed=False) + header.label(text="Transform") + if body: + body.prop(operator, "global_scale") + body.prop(operator, "decal_offset") + row = body.row() + row.prop(operator, "bake_space_transform") + row.label(text="", icon='ERROR') + body.prop(operator, "use_prepost_rot") + + import_panel_transform_orientation(body, operator) + + +def import_panel_transform_orientation(layout, operator): + header, body = layout.panel("FBX_import_transform_manual_orientation", default_closed=False) + header.use_property_split = False + header.prop(operator, "use_manual_orientation", text="") + header.label(text="Manual Orientation") + if body: + body.enabled = operator.use_manual_orientation + body.prop(operator, "axis_forward") + body.prop(operator, "axis_up") + + +def import_panel_animation(layout, operator): + header, body = layout.panel("FBX_import_animation", default_closed=True) + header.use_property_split = False + header.prop(operator, "use_anim", text="") + header.label(text="Animation") + if body: + body.enabled = operator.use_anim + body.prop(operator, "anim_offset") + + +def import_panel_armature(layout, operator): + header, body = layout.panel("FBX_import_armature", default_closed=True) + header.label(text="Armature") + if body: + body.prop(operator, "ignore_leaf_bones") + body.prop(operator, "force_connect_children"), + body.prop(operator, "automatic_bone_orientation"), + sub = body.column() + sub.enabled = not operator.automatic_bone_orientation + sub.prop(operator, "primary_bone_axis") + sub.prop(operator, "secondary_bone_axis") + + +@orientation_helper(axis_forward='-Z', axis_up='Y') +class ExportFBX(bpy.types.Operator, ExportHelper): + """Write a FBX file""" + bl_idname = "export_scene.fbx" + bl_label = "Export FBX" + bl_options = {'UNDO', 'PRESET'} + + filename_ext = ".fbx" + filter_glob: StringProperty(default="*.fbx", options={'HIDDEN'}) + + # List of operator properties, the attributes will be assigned + # to the class instance from the operator settings before calling. + + use_selection: BoolProperty( + name="Selected Objects", + description="Export selected and visible objects only", + default=False, + ) + use_visible: BoolProperty( + name='Visible Objects', + description='Export visible objects only', + default=False + ) + use_active_collection: BoolProperty( + name="Active Collection", + description="Export only objects from the active collection (and its children)", + default=False, + ) + collection: StringProperty( + name="Source Collection", + description="Export only objects from this collection (and its children)", + default="", + ) + global_scale: FloatProperty( + name="Scale", + description="Scale all data (Some importers do not support scaled armatures!)", + min=0.001, max=1000.0, + soft_min=0.01, soft_max=1000.0, + default=1.0, + ) + apply_unit_scale: BoolProperty( + name="Apply Unit", + description="Take into account current Blender units settings (if unset, raw Blender Units values are used as-is)", + default=True, + ) + apply_scale_options: EnumProperty( + items=(('FBX_SCALE_NONE', "All Local", + "Apply custom scaling and units scaling to each object transformation, FBX scale remains at 1.0"), + ('FBX_SCALE_UNITS', "FBX Units Scale", + "Apply custom scaling to each object transformation, and units scaling to FBX scale"), + ('FBX_SCALE_CUSTOM', "FBX Custom Scale", + "Apply custom scaling to FBX scale, and units scaling to each object transformation"), + ('FBX_SCALE_ALL', "FBX All", + "Apply custom scaling and units scaling to FBX scale"), + ), + name="Apply Scalings", + description="How to apply custom and units scalings in generated FBX file " + "(Blender uses FBX scale to detect units on import, " + "but many other applications do not handle the same way)", + ) + + use_space_transform: BoolProperty( + name="Use Space Transform", + description="Apply global space transform to the object rotations. When disabled " + "only the axis space is written to the file and all object transforms are left as-is", + default=True, + ) + bake_space_transform: BoolProperty( + name="Apply Transform", + description="Bake space transform into object data, avoids getting unwanted rotations to objects when " + "target space is not aligned with Blender's space " + "(WARNING! experimental option, use at own risk, known to be broken with armatures/animations)", + default=False, + ) + + object_types: EnumProperty( + name="Object Types", + options={'ENUM_FLAG'}, + items=(('EMPTY', "Empty", ""), + ('CAMERA', "Camera", ""), + ('LIGHT', "Lamp", ""), + ('ARMATURE', "Armature", "WARNING: not supported in dupli/group instances"), + ('MESH', "Mesh", ""), + ('OTHER', "Other", "Other geometry types, like curve, metaball, etc. (converted to meshes)"), + ), + description="Which kind of object to export", + default={'EMPTY', 'CAMERA', 'LIGHT', 'ARMATURE', 'MESH', 'OTHER'}, + ) + + use_mesh_modifiers: BoolProperty( + name="Apply Modifiers", + description="Apply modifiers to mesh objects (except Armature ones) - " + "WARNING: prevents exporting shape keys", + default=True, + ) + use_mesh_modifiers_render: BoolProperty( + name="Use Modifiers Render Setting", + description="Use render settings when applying modifiers to mesh objects (DISABLED in Blender 2.8)", + default=True, + ) + mesh_smooth_type: EnumProperty( + name="Smoothing", + items=(('OFF', "Normals Only", "Export only normals instead of writing edge or face smoothing data"), + ('FACE', "Face", "Write face smoothing"), + ('EDGE', "Edge", "Write edge smoothing"), + ), + description="Export smoothing information " + "(prefer 'Normals Only' option if your target importer understand split normals)", + default='OFF', + ) + colors_type: EnumProperty( + name="Vertex Colors", + items=(('NONE', "None", "Do not export color attributes"), + ('SRGB', "sRGB", "Export colors in sRGB color space"), + ('LINEAR', "Linear", "Export colors in linear color space"), + ), + description="Export vertex color attributes", + default='SRGB', + ) + prioritize_active_color: BoolProperty( + name="Prioritize Active Color", + description="Make sure active color will be exported first. Could be important " + "since some other software can discard other color attributes besides the first one", + default=False, + ) + use_subsurf: BoolProperty( + name="Export Subdivision Surface", + description="Export the last Catmull-Rom subdivision modifier as FBX subdivision " + "(does not apply the modifier even if 'Apply Modifiers' is enabled)", + default=False, + ) + use_mesh_edges: BoolProperty( + name="Loose Edges", + description="Export loose edges (as two-vertices polygons)", + default=False, + ) + use_tspace: BoolProperty( + name="Tangent Space", + description="Add binormal and tangent vectors, together with normal they form the tangent space " + "(will only work correctly with tris/quads only meshes!)", + default=False, + ) + use_triangles: BoolProperty( + name="Triangulate Faces", + description="Convert all faces to triangles", + default=False, + ) + use_custom_props: BoolProperty( + name="Custom Properties", + description="Export custom properties", + default=False, + ) + add_leaf_bones: BoolProperty( + name="Add Leaf Bones", + description="Append a final bone to the end of each chain to specify last bone length " + "(use this when you intend to edit the armature from exported data)", + default=True # False for commit! + ) + primary_bone_axis: EnumProperty( + name="Primary Bone Axis", + items=(('X', "X Axis", ""), + ('Y', "Y Axis", ""), + ('Z', "Z Axis", ""), + ('-X', "-X Axis", ""), + ('-Y', "-Y Axis", ""), + ('-Z', "-Z Axis", ""), + ), + default='Y', + ) + secondary_bone_axis: EnumProperty( + name="Secondary Bone Axis", + items=(('X', "X Axis", ""), + ('Y', "Y Axis", ""), + ('Z', "Z Axis", ""), + ('-X', "-X Axis", ""), + ('-Y', "-Y Axis", ""), + ('-Z', "-Z Axis", ""), + ), + default='X', + ) + use_armature_deform_only: BoolProperty( + name="Only Deform Bones", + description="Only write deforming bones (and non-deforming ones when they have deforming children)", + default=False, + ) + armature_nodetype: EnumProperty( + name="Armature FBXNode Type", + items=(('NULL', "Null", "'Null' FBX node, similar to Blender's Empty (default)"), + ('ROOT', "Root", "'Root' FBX node, supposed to be the root of chains of bones..."), + ('LIMBNODE', "LimbNode", "'LimbNode' FBX node, a regular joint between two bones..."), + ), + description="FBX type of node (object) used to represent Blender's armatures " + "(use the Null type unless you experience issues with the other app, " + "as other choices may not import back perfectly into Blender...)", + default='NULL', + ) + bake_anim: BoolProperty( + name="Baked Animation", + description="Export baked keyframe animation", + default=True, + ) + bake_anim_use_all_bones: BoolProperty( + name="Key All Bones", + description="Force exporting at least one key of animation for all bones " + "(needed with some target applications, like UE4)", + default=True, + ) + bake_anim_use_nla_strips: BoolProperty( + name="NLA Strips", + description="Export each non-muted NLA strip as a separated FBX's AnimStack, if any, " + "instead of global scene animation", + default=True, + ) + bake_anim_use_all_actions: BoolProperty( + name="All Actions", + description="Export each action as a separated FBX's AnimStack, instead of global scene animation " + "(note that animated objects will get all actions compatible with them, " + "others will get no animation at all)", + default=True, + ) + bake_anim_force_startend_keying: BoolProperty( + name="Force Start/End Keying", + description="Always add a keyframe at start and end of actions for animated channels", + default=True, + ) + bake_anim_step: FloatProperty( + name="Sampling Rate", + description="How often to evaluate animated values (in frames)", + min=0.01, max=100.0, + soft_min=0.1, soft_max=10.0, + default=1.0, + ) + bake_anim_simplify_factor: FloatProperty( + name="Simplify", + description="How much to simplify baked values (0.0 to disable, the higher the more simplified)", + min=0.0, max=100.0, # No simplification to up to 10% of current magnitude tolerance. + soft_min=0.0, soft_max=10.0, + default=1.0, # default: min slope: 0.005, max frame step: 10. + ) + path_mode: path_reference_mode + embed_textures: BoolProperty( + name="Embed Textures", + description="Embed textures in FBX binary file (only for \"Copy\" path mode!)", + default=False, + ) + batch_mode: EnumProperty( + name="Batch Mode", + items=(('OFF', "Off", "Active scene to file"), + ('SCENE', "Scene", "Each scene as a file"), + ('COLLECTION', "Collection", + "Each collection (data-block ones) as a file, does not include content of children collections"), + ('SCENE_COLLECTION', "Scene Collections", + "Each collection (including master, non-data-block ones) of each scene as a file, " + "including content from children collections"), + ('ACTIVE_SCENE_COLLECTION', "Active Scene Collections", + "Each collection (including master, non-data-block one) of the active scene as a file, " + "including content from children collections"), + ), + ) + use_batch_own_dir: BoolProperty( + name="Batch Own Dir", + description="Create a dir for each exported file", + default=True, + ) + use_metadata: BoolProperty( + name="Use Metadata", + default=True, + options={'HIDDEN'}, + ) + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False # No animation. + + # Are we inside the File browser + is_file_browser = context.space_data.type == 'FILE_BROWSER' + + export_main(layout, self, is_file_browser) + export_panel_include(layout, self, is_file_browser) + export_panel_transform(layout, self) + export_panel_geometry(layout, self) + export_panel_armature(layout, self) + export_panel_animation(layout, self) + + @property + def check_extension(self): + return self.batch_mode == 'OFF' + + def execute(self, context): + from mathutils import Matrix + if not self.filepath: + raise Exception("filepath not set") + + global_matrix = (axis_conversion(to_forward=self.axis_forward, + to_up=self.axis_up, + ).to_4x4() + if self.use_space_transform else Matrix()) + + keywords = self.as_keywords(ignore=("check_existing", + "filter_glob", + "ui_tab", + )) + + keywords["global_matrix"] = global_matrix + + from . import export_fbx_bin + return export_fbx_bin.save(self, context, **keywords) + + +def export_main(layout, operator, is_file_browser): + row = layout.row(align=True) + row.prop(operator, "path_mode") + sub = row.row(align=True) + sub.enabled = (operator.path_mode == 'COPY') + sub.prop(operator, "embed_textures", text="", icon='PACKAGE' if operator.embed_textures else 'UGLYPACKAGE') + if is_file_browser: + row = layout.row(align=True) + row.prop(operator, "batch_mode") + sub = row.row(align=True) + sub.prop(operator, "use_batch_own_dir", text="", icon='NEWFOLDER') + + +def export_panel_include(layout, operator, is_file_browser): + header, body = layout.panel("FBX_export_include", default_closed=False) + header.label(text="Include") + if body: + sublayout = body.column(heading="Limit to") + sublayout.enabled = (operator.batch_mode == 'OFF') + if is_file_browser: + sublayout.prop(operator, "use_selection") + sublayout.prop(operator, "use_visible") + sublayout.prop(operator, "use_active_collection") + + body.column().prop(operator, "object_types") + body.prop(operator, "use_custom_props") + + +def export_panel_transform(layout, operator): + header, body = layout.panel("FBX_export_transform", default_closed=False) + header.label(text="Transform") + if body: + body.prop(operator, "global_scale") + body.prop(operator, "apply_scale_options") + + body.prop(operator, "axis_forward") + body.prop(operator, "axis_up") + + body.prop(operator, "apply_unit_scale") + body.prop(operator, "use_space_transform") + row = body.row() + row.prop(operator, "bake_space_transform") + row.label(text="", icon='ERROR') + + +def export_panel_geometry(layout, operator): + header, body = layout.panel("FBX_export_geometry", default_closed=True) + header.label(text="Geometry") + if body: + body.prop(operator, "mesh_smooth_type") + body.prop(operator, "use_subsurf") + body.prop(operator, "use_mesh_modifiers") + #sub = body.row() + # sub.enabled = operator.use_mesh_modifiers and False # disabled in 2.8... + #sub.prop(operator, "use_mesh_modifiers_render") + body.prop(operator, "use_mesh_edges") + body.prop(operator, "use_triangles") + sub = body.row() + # ~ sub.enabled = operator.mesh_smooth_type in {'OFF'} + sub.prop(operator, "use_tspace") + body.prop(operator, "colors_type") + body.prop(operator, "prioritize_active_color") + + +def export_panel_armature(layout, operator): + header, body = layout.panel("FBX_export_armature", default_closed=True) + header.label(text="Armature") + if body: + body.prop(operator, "primary_bone_axis") + body.prop(operator, "secondary_bone_axis") + body.prop(operator, "armature_nodetype") + body.prop(operator, "use_armature_deform_only") + body.prop(operator, "add_leaf_bones") + + +def export_panel_animation(layout, operator): + header, body = layout.panel("FBX_export_bake_animation", default_closed=True) + header.use_property_split = False + header.prop(operator, "bake_anim", text="") + header.label(text="Animation") + if body: + body.enabled = operator.bake_anim + body.prop(operator, "bake_anim_use_all_bones") + body.prop(operator, "bake_anim_use_nla_strips") + body.prop(operator, "bake_anim_use_all_actions") + body.prop(operator, "bake_anim_force_startend_keying") + body.prop(operator, "bake_anim_step") + body.prop(operator, "bake_anim_simplify_factor") + + +class IO_FH_fbx(bpy.types.FileHandler): + bl_idname = "IO_FH_fbx" + bl_label = "FBX" + bl_import_operator = "import_scene.fbx" + bl_export_operator = "export_scene.fbx" + bl_file_extensions = ".fbx" + + @classmethod + def poll_drop(cls, context): + return poll_file_object_drop(context) + + +def menu_func_import(self, context): + self.layout.operator(ImportFBX.bl_idname, text="FBX (.fbx)") + + +def menu_func_export(self, context): + self.layout.operator(ExportFBX.bl_idname, text="FBX (.fbx)") + + +classes = ( + ImportFBX, + ExportFBX, + IO_FH_fbx, +) + + +def register(): + for cls in classes: + bpy.utils.register_class(cls) + + bpy.types.TOPBAR_MT_file_import.append(menu_func_import) + bpy.types.TOPBAR_MT_file_export.append(menu_func_export) + + +def unregister(): + bpy.types.TOPBAR_MT_file_import.remove(menu_func_import) + bpy.types.TOPBAR_MT_file_export.remove(menu_func_export) + + for cls in classes: + bpy.utils.unregister_class(cls) + + +if __name__ == "__main__": + register() diff --git a/scripts/addons_core/io_scene_fbx/data_types.py b/scripts/addons_core/io_scene_fbx/data_types.py new file mode 100644 index 00000000000..328ba3a9c27 --- /dev/null +++ b/scripts/addons_core/io_scene_fbx/data_types.py @@ -0,0 +1,62 @@ +# SPDX-FileCopyrightText: 2006-2012 assimp team +# SPDX-FileCopyrightText: 2013 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +BOOL = b'B'[0] +CHAR = b'C'[0] +INT8 = b'Z'[0] +INT16 = b'Y'[0] +INT32 = b'I'[0] +INT64 = b'L'[0] +FLOAT32 = b'F'[0] +FLOAT64 = b'D'[0] +BYTES = b'R'[0] +STRING = b'S'[0] +INT32_ARRAY = b'i'[0] +INT64_ARRAY = b'l'[0] +FLOAT32_ARRAY = b'f'[0] +FLOAT64_ARRAY = b'd'[0] +BOOL_ARRAY = b'b'[0] +BYTE_ARRAY = b'c'[0] + +# Some other misc defines +# Known combinations so far - supposed meaning: A = animatable, A+ = animated, U = UserProp +# VALID_NUMBER_FLAGS = {b'A', b'A+', b'AU', b'A+U'} # Not used... + +# array types - actual length may vary (depending on underlying C implementation)! +import array + +# For now, bytes and bool are assumed always 1byte. +ARRAY_BOOL = 'b' +ARRAY_BYTE = 'B' + +ARRAY_INT32 = None +ARRAY_INT64 = None +for _t in 'ilq': + size = array.array(_t).itemsize + if size == 4: + ARRAY_INT32 = _t + elif size == 8: + ARRAY_INT64 = _t + if ARRAY_INT32 and ARRAY_INT64: + break +if not ARRAY_INT32: + raise Exception("Impossible to get a 4-bytes integer type for array!") +if not ARRAY_INT64: + raise Exception("Impossible to get an 8-bytes integer type for array!") + +ARRAY_FLOAT32 = None +ARRAY_FLOAT64 = None +for _t in 'fd': + size = array.array(_t).itemsize + if size == 4: + ARRAY_FLOAT32 = _t + elif size == 8: + ARRAY_FLOAT64 = _t + if ARRAY_FLOAT32 and ARRAY_FLOAT64: + break +if not ARRAY_FLOAT32: + raise Exception("Impossible to get a 4-bytes float type for array!") +if not ARRAY_FLOAT64: + raise Exception("Impossible to get an 8-bytes float type for array!") diff --git a/scripts/addons_core/io_scene_fbx/encode_bin.py b/scripts/addons_core/io_scene_fbx/encode_bin.py new file mode 100644 index 00000000000..a36f4e0506f --- /dev/null +++ b/scripts/addons_core/io_scene_fbx/encode_bin.py @@ -0,0 +1,434 @@ +# SPDX-FileCopyrightText: 2013 Campbell Barton +# +# SPDX-License-Identifier: GPL-2.0-or-later + +try: + from . import data_types + from .fbx_utils_threading import MultiThreadedTaskConsumer +except: + import data_types + from fbx_utils_threading import MultiThreadedTaskConsumer + +from struct import pack +from contextlib import contextmanager +import array +import numpy as np +import zlib + +_BLOCK_SENTINEL_LENGTH = ... +_BLOCK_SENTINEL_DATA = ... +_ELEM_META_FORMAT = ... +_ELEM_META_SIZE = ... +_IS_BIG_ENDIAN = (__import__("sys").byteorder != 'little') +_HEAD_MAGIC = b'Kaydara FBX Binary\x20\x20\x00\x1a\x00' + +# fbx has very strict CRC rules, all based on file timestamp +# until we figure these out, write files at a fixed time. (workaround!) + +# Assumes: CreationTime +_TIME_ID = b'1970-01-01 10:00:00:000' +_FILE_ID = b'\x28\xb3\x2a\xeb\xb6\x24\xcc\xc2\xbf\xc8\xb0\x2a\xa9\x2b\xfc\xf1' +_FOOT_ID = b'\xfa\xbc\xab\x09\xd0\xc8\xd4\x66\xb1\x76\xfb\x83\x1c\xf7\x26\x7e' + +# Awful exceptions: those "classes" of elements seem to need block sentinel even when having no children and some props. +_ELEMS_ID_ALWAYS_BLOCK_SENTINEL = {b"AnimationStack", b"AnimationLayer"} + + +class FBXElem: + __slots__ = ( + "id", + "props", + "props_type", + "elems", + + "_props_length", # combine length of props + "_end_offset", # byte offset from the start of the file. + ) + + def __init__(self, id): + assert(len(id) < 256) # length must fit in a uint8 + self.id = id + self.props = [] + self.props_type = bytearray() + self.elems = [] + self._end_offset = -1 + self._props_length = -1 + + @classmethod + @contextmanager + def enable_multithreading_cm(cls): + """Temporarily enable multithreaded array compression. + + The context manager handles starting up and shutting down the threads. + + Only exits once all the threads are done (either all tasks were completed or an error occurred and the threads + were stopped prematurely). + + Writing to a file is temporarily disabled as a safeguard.""" + # __enter__() + orig_func = cls._add_compressed_array_helper + orig_write = cls._write + + def insert_compressed_array(props, insert_at, data, length): + # zlib.compress releases the GIL, so can be multithreaded. + data = zlib.compress(data, 1) + comp_len = len(data) + + encoding = 1 + data = pack('<3I', length, encoding, comp_len) + data + props[insert_at] = data + + with MultiThreadedTaskConsumer.new_cpu_bound_cm(insert_compressed_array) as wrapped_func: + try: + def _add_compressed_array_helper_multi(self, data, length): + # Append a dummy value that will be replaced with the compressed array data later. + self.props.append(...) + # The index to insert the compressed array into. + insert_at = len(self.props) - 1 + # Schedule the array to be compressed on a separate thread and then inserted into the hierarchy at + # `insert_at`. + wrapped_func(self.props, insert_at, data, length) + + # As an extra safeguard, temporarily replace the `_write` function to raise an error if called. + def temp_write(*_args, **_kwargs): + raise RuntimeError("Writing is not allowed until multithreaded array compression has been disabled") + + cls._add_compressed_array_helper = _add_compressed_array_helper_multi + cls._write = temp_write + + # Return control back to the caller of __enter__(). + yield + finally: + # __exit__() + # Restore the original functions. + cls._add_compressed_array_helper = orig_func + cls._write = orig_write + # Exiting the MultiThreadedTaskConsumer context manager will wait for all scheduled tasks to complete. + + def add_bool(self, data): + assert(isinstance(data, bool)) + data = pack('?', data) + + self.props_type.append(data_types.BOOL) + self.props.append(data) + + def add_char(self, data): + assert(isinstance(data, bytes)) + assert(len(data) == 1) + data = pack('vertex-indices array by the loop->edge-index array. + t_pvi_edge_keys = t_ev_pair_view[t_lei] + + # Sort each [edge_start_n, edge_end_n] pair to get edge keys. Heapsort seems to be the fastest for this specific + # use case. + t_pvi_edge_keys.sort(axis=1, kind='heapsort') + + # Note that finding unique edge keys means that if there are multiple edges that share the same vertices (which + # shouldn't normally happen), only the first edge found in loops will be exported along with its per-edge data. + # To export separate edges that share the same vertices, fast_first_axis_unique can be replaced with np.unique + # with t_lei as the first argument, finding unique edges rather than unique edge keys. + # + # Since we want the unique values in their original order, the only part we care about is the indices of the + # first occurrence of the unique elements in t_pvi_edge_keys, so we can use our fast uniqueness helper function. + t_eli = fast_first_axis_unique(t_pvi_edge_keys, return_unique=False, return_index=True) + + # To get the indices of the elements in t_pvi_edge_keys that produce unique values, but in the original order of + # t_pvi_edge_keys, t_eli must be sorted. + # Due to loops and their edge keys tending to have a partial ordering within meshes, sorting with kind='stable' + # with radix sort tends to be faster than the default of kind='quicksort' with introsort. + t_eli.sort(kind='stable') + + # Edge index of each element in unique t_pvi_edge_keys, used to map per-edge data such as sharp and creases. + t_pvi_edge_indices = t_lei[t_eli] + + # We have to ^-1 last index of each loop. + # Ensure t_pvi is the correct number of bits before inverting. + # t_lvi may be used again later, so always create a copy to avoid modifying it in the next step. + t_pvi = t_lvi.astype(pvi_fbx_dtype) + # The index of the end of each loop is one before the index of the start of the next loop. + t_pvi[t_ls[1:] - 1] ^= -1 + # The index of the end of the last loop will be the very last index. + t_pvi[-1] ^= -1 + del t_pvi_edge_keys + else: + # Should be empty, but make sure it's the correct type. + t_pvi = np.empty(0, dtype=pvi_fbx_dtype) + t_eli = np.empty(0, dtype=eli_fbx_dtype) + + # And finally we can write data! + t_pvi = astype_view_signedness(t_pvi, pvi_fbx_dtype) + t_eli = astype_view_signedness(t_eli, eli_fbx_dtype) + elem_data_single_int32_array(geom, b"PolygonVertexIndex", t_pvi) + elem_data_single_int32_array(geom, b"Edges", t_eli) + del t_pvi + del t_eli + del t_ev + del t_ev_pair_view + + # And now, layers! + + # Smoothing. + if smooth_type in {'FACE', 'EDGE'}: + ps_fbx_dtype = np.int32 + _map = b"" + if smooth_type == 'FACE': + # The FBX integer values are usually interpreted as boolean where 0 is False (sharp) and 1 is True + # (smooth). + # The values may also be used to represent smoothing group bitflags, but this does not seem well-supported. + t_ps = MESH_ATTRIBUTE_SHARP_FACE.get_ndarray(attributes) + if t_ps is not None: + # FBX sharp is False, but Blender sharp is True, so invert. + t_ps = np.logical_not(t_ps) + else: + # The mesh has no "sharp_face" attribute, so every face is smooth. + t_ps = np.ones(len(me.polygons), dtype=ps_fbx_dtype) + _map = b"ByPolygon" + else: # EDGE + _map = b"ByEdge" + if t_pvi_edge_indices.size: + # Write Edge Smoothing. + # Note edge is sharp also if it's used by more than two faces, or one of its faces is flat. + mesh_poly_nbr = len(me.polygons) + mesh_edge_nbr = len(me.edges) + mesh_loop_nbr = len(me.loops) + # t_ls and t_lei may contain extra polygons or loops added for loose edges that are not present in the + # mesh data, so create views that exclude the extra data added for loose edges. + mesh_t_ls_view = t_ls[:mesh_poly_nbr] + mesh_t_lei_view = t_lei[:mesh_loop_nbr] + + # - Get sharp edges from edges used by more than two loops (and therefore more than two faces) + e_more_than_two_faces_mask = np.bincount(mesh_t_lei_view, minlength=mesh_edge_nbr) > 2 + + # - Get sharp edges from the "sharp_edge" attribute. The attribute may not exist, in which case, there + # are no edges marked as sharp. + e_use_sharp_mask = MESH_ATTRIBUTE_SHARP_EDGE.get_ndarray(attributes) + if e_use_sharp_mask is not None: + # - Combine with edges that are sharp because they're in more than two faces + e_use_sharp_mask = np.logical_or(e_use_sharp_mask, e_more_than_two_faces_mask, out=e_use_sharp_mask) + else: + e_use_sharp_mask = e_more_than_two_faces_mask + + # - Get sharp edges from flat shaded faces + p_flat_mask = MESH_ATTRIBUTE_SHARP_FACE.get_ndarray(attributes) + if p_flat_mask is not None: + # Convert flat shaded polygons to flat shaded loops by repeating each element by the number of sides + # of that polygon. + # Polygon sides can be calculated from the element-wise difference of loop starts appended by the + # number of loops. Alternatively, polygon sides can be retrieved directly from the 'loop_total' + # attribute of polygons, but since we already have t_ls, it tends to be quicker to calculate from + # t_ls. + polygon_sides = np.diff(mesh_t_ls_view, append=mesh_loop_nbr) + p_flat_loop_mask = np.repeat(p_flat_mask, polygon_sides) + # Convert flat shaded loops to flat shaded (sharp) edge indices. + # Note that if an edge is in multiple loops that are part of flat shaded faces, its edge index will + # end up in sharp_edge_indices_from_polygons multiple times. + sharp_edge_indices_from_polygons = mesh_t_lei_view[p_flat_loop_mask] + + # - Combine with edges that are sharp because a polygon they're in has flat shading + e_use_sharp_mask[sharp_edge_indices_from_polygons] = True + del sharp_edge_indices_from_polygons + del p_flat_loop_mask + del polygon_sides + del p_flat_mask + + # - Convert sharp edges to sharp edge keys (t_pvi) + ek_use_sharp_mask = e_use_sharp_mask[t_pvi_edge_indices] + + # - Sharp edges are indicated in FBX as zero (False), so invert + t_ps = np.invert(ek_use_sharp_mask, out=ek_use_sharp_mask) + del ek_use_sharp_mask + del e_use_sharp_mask + del mesh_t_lei_view + del mesh_t_ls_view + else: + t_ps = np.empty(0, dtype=ps_fbx_dtype) + t_ps = t_ps.astype(ps_fbx_dtype, copy=False) + lay_smooth = elem_data_single_int32(geom, b"LayerElementSmoothing", 0) + elem_data_single_int32(lay_smooth, b"Version", FBX_GEOMETRY_SMOOTHING_VERSION) + elem_data_single_string(lay_smooth, b"Name", b"") + elem_data_single_string(lay_smooth, b"MappingInformationType", _map) + elem_data_single_string(lay_smooth, b"ReferenceInformationType", b"Direct") + elem_data_single_int32_array(lay_smooth, b"Smoothing", t_ps) # Sight, int32 for bool... + del t_ps + del t_ls + del t_lei + + # Edge crease for subdivision + if write_crease: + ec_fbx_dtype = np.float64 + if t_pvi_edge_indices.size: + ec_bl_dtype = np.single + edge_creases = me.edge_creases + if edge_creases: + t_ec_raw = np.empty(len(me.edges), dtype=ec_bl_dtype) + edge_creases.data.foreach_get("value", t_ec_raw) + + # Convert to t_pvi edge-keys. + t_ec_ek_raw = t_ec_raw[t_pvi_edge_indices] + + # Blender squares those values before sending them to OpenSubdiv, when other software don't, + # so we need to compensate that to get similar results through FBX... + # Use the precision of the fbx dtype for the calculation since it's usually higher precision. + t_ec_ek_raw = t_ec_ek_raw.astype(ec_fbx_dtype, copy=False) + t_ec = np.square(t_ec_ek_raw, out=t_ec_ek_raw) + del t_ec_ek_raw + del t_ec_raw + else: + # todo: Blender edge creases are optional now, we may be able to avoid writing the array to FBX when + # there are no edge creases. + t_ec = np.zeros(t_pvi_edge_indices.shape, dtype=ec_fbx_dtype) + else: + t_ec = np.empty(0, dtype=ec_fbx_dtype) + + lay_crease = elem_data_single_int32(geom, b"LayerElementEdgeCrease", 0) + elem_data_single_int32(lay_crease, b"Version", FBX_GEOMETRY_CREASE_VERSION) + elem_data_single_string(lay_crease, b"Name", b"") + elem_data_single_string(lay_crease, b"MappingInformationType", b"ByEdge") + elem_data_single_string(lay_crease, b"ReferenceInformationType", b"Direct") + elem_data_single_float64_array(lay_crease, b"EdgeCrease", t_ec) + del t_ec + + # And we are done with edges! + del t_pvi_edge_indices + + # Loop normals. + tspacenumber = 0 + if write_normals: + normal_bl_dtype = np.single + normal_fbx_dtype = np.float64 + match me.normals_domain: + case 'POINT': + # All faces are smooth shaded, so we can get normals from the vertices. + normal_source = me.vertex_normals + normal_mapping = b"ByVertice" + # External software support for b"ByPolygon" normals does not seem to be as widely available as the other + # mappings. See blender/blender#117470. + # case 'FACE': + # # Either all faces or all edges are sharp, so we can get normals from the faces. + # normal_source = me.polygon_normals + # normal_mapping = b"ByPolygon" + case 'CORNER' | 'FACE': + # We have a mix of sharp/smooth edges/faces or custom split normals, so need to get normals from + # corners. + normal_source = me.corner_normals + normal_mapping = b"ByPolygonVertex" + case _: + # Unreachable + raise AssertionError("Unexpected normals domain '%s'" % me.normals_domain) + # Each normal has 3 components, so the length is multiplied by 3. + t_normal = np.empty(len(normal_source) * 3, dtype=normal_bl_dtype) + normal_source.foreach_get("vector", t_normal) + t_normal = nors_transformed(t_normal, geom_mat_no, normal_fbx_dtype) + normal_idx_fbx_dtype = np.int32 + lay_nor = elem_data_single_int32(geom, b"LayerElementNormal", 0) + elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_NORMAL_VERSION) + elem_data_single_string(lay_nor, b"Name", b"") + elem_data_single_string(lay_nor, b"MappingInformationType", normal_mapping) + # FBX SDK documentation says that normals should use IndexToDirect. + elem_data_single_string(lay_nor, b"ReferenceInformationType", b"IndexToDirect") + + # Tuple of unique sorted normals and then the index in the unique sorted normals of each normal in t_normal. + # Since we don't care about how the normals are sorted, only that they're unique, we can use the fast unique + # helper function. + t_normal, t_normal_idx = fast_first_axis_unique(t_normal.reshape(-1, 3), return_inverse=True) + + # Convert to the type for fbx + t_normal_idx = astype_view_signedness(t_normal_idx, normal_idx_fbx_dtype) + + elem_data_single_float64_array(lay_nor, b"Normals", t_normal) + # Normal weights, no idea what it is. + # t_normal_w = np.zeros(len(t_normal), dtype=np.float64) + # elem_data_single_float64_array(lay_nor, b"NormalsW", t_normal_w) + + elem_data_single_int32_array(lay_nor, b"NormalsIndex", t_normal_idx) + + del t_normal_idx + # del t_normal_w + del t_normal + + # tspace + if scene_data.settings.use_tspace: + tspacenumber = len(me.uv_layers) + if tspacenumber: + # We can only compute tspace on tessellated meshes, need to check that here... + lt_bl_dtype = np.uintc + t_lt = np.empty(len(me.polygons), dtype=lt_bl_dtype) + me.polygons.foreach_get("loop_total", t_lt) + if (t_lt > 4).any(): + del t_lt + scene_data.settings.report( + {'WARNING'}, + tip_("Mesh '%s' has polygons with more than 4 vertices, " + "cannot compute/export tangent space for it") % me.name) + else: + del t_lt + num_loops = len(me.loops) + t_ln = np.empty(num_loops * 3, dtype=normal_bl_dtype) + # t_lnw = np.zeros(len(me.loops), dtype=np.float64) + uv_names = [uvlayer.name for uvlayer in me.uv_layers] + # Annoying, `me.calc_tangent` errors in case there is no geometry... + if num_loops > 0: + for name in uv_names: + me.calc_tangents(uvmap=name) + for idx, uvlayer in enumerate(me.uv_layers): + name = uvlayer.name + # Loop bitangents (aka binormals). + # NOTE: this is not supported by importer currently. + me.loops.foreach_get("bitangent", t_ln) + lay_nor = elem_data_single_int32(geom, b"LayerElementBinormal", idx) + elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_BINORMAL_VERSION) + elem_data_single_string_unicode(lay_nor, b"Name", name) + elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex") + elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct") + elem_data_single_float64_array(lay_nor, b"Binormals", + nors_transformed(t_ln, geom_mat_no, normal_fbx_dtype)) + # Binormal weights, no idea what it is. + # elem_data_single_float64_array(lay_nor, b"BinormalsW", t_lnw) + + # Loop tangents. + # NOTE: this is not supported by importer currently. + me.loops.foreach_get("tangent", t_ln) + lay_nor = elem_data_single_int32(geom, b"LayerElementTangent", idx) + elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_TANGENT_VERSION) + elem_data_single_string_unicode(lay_nor, b"Name", name) + elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex") + elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct") + elem_data_single_float64_array(lay_nor, b"Tangents", + nors_transformed(t_ln, geom_mat_no, normal_fbx_dtype)) + # Tangent weights, no idea what it is. + # elem_data_single_float64_array(lay_nor, b"TangentsW", t_lnw) + + del t_ln + # del t_lnw + me.free_tangents() + + # Write VertexColor Layers. + colors_type = scene_data.settings.colors_type + vcolnumber = 0 if colors_type == 'NONE' else len(me.color_attributes) + if vcolnumber: + color_prop_name = "color_srgb" if colors_type == 'SRGB' else "color" + # ByteColorAttribute color also gets returned by the API as single precision float + bl_lc_dtype = np.single + fbx_lc_dtype = np.float64 + fbx_lcidx_dtype = np.int32 + + color_attributes = me.color_attributes + if scene_data.settings.prioritize_active_color: + active_color = me.color_attributes.active_color + color_attributes = sorted(color_attributes, key=lambda x: x == active_color, reverse=True) + + for colindex, collayer in enumerate(color_attributes): + is_point = collayer.domain == "POINT" + vcollen = len(me.vertices if is_point else me.loops) + # Each rgba component is flattened in the array + t_lc = np.empty(vcollen * 4, dtype=bl_lc_dtype) + collayer.data.foreach_get(color_prop_name, t_lc) + lay_vcol = elem_data_single_int32(geom, b"LayerElementColor", colindex) + elem_data_single_int32(lay_vcol, b"Version", FBX_GEOMETRY_VCOLOR_VERSION) + elem_data_single_string_unicode(lay_vcol, b"Name", collayer.name) + elem_data_single_string(lay_vcol, b"MappingInformationType", b"ByPolygonVertex") + elem_data_single_string(lay_vcol, b"ReferenceInformationType", b"IndexToDirect") + + # Use the fast uniqueness helper function since we don't care about sorting. + t_lc, col_indices = fast_first_axis_unique(t_lc.reshape(-1, 4), return_inverse=True) + + if is_point: + # for "point" domain colors, we could directly emit them + # with a "ByVertex" mapping type, but some software does not + # properly understand that. So expand to full "ByPolygonVertex" + # index map. + # Ignore loops added for loose edges. + col_indices = col_indices[t_lvi[:len(me.loops)]] + + t_lc = t_lc.astype(fbx_lc_dtype, copy=False) + col_indices = astype_view_signedness(col_indices, fbx_lcidx_dtype) + + elem_data_single_float64_array(lay_vcol, b"Colors", t_lc) + elem_data_single_int32_array(lay_vcol, b"ColorIndex", col_indices) + + del t_lc + del col_indices + + # Write UV layers. + # Note: LayerElementTexture is deprecated since FBX 2011 - luckily! + # Textures are now only related to materials, in FBX! + uvnumber = len(me.uv_layers) + if uvnumber: + luv_bl_dtype = np.single + luv_fbx_dtype = np.float64 + lv_idx_fbx_dtype = np.int32 + + t_luv = np.empty(len(me.loops) * 2, dtype=luv_bl_dtype) + # Fast view for sort-based uniqueness of pairs. + t_luv_fast_pair_view = fast_first_axis_flat(t_luv.reshape(-1, 2)) + # It must be a view of t_luv otherwise it won't update when t_luv is updated. + assert(t_luv_fast_pair_view.base is t_luv) + + # Looks like this mapping is also expected to convey UV islands (arg..... :((((( ). + # So we need to generate unique triplets (uv, vertex_idx) here, not only just based on UV values. + # Ignore loops added for loose edges. + t_lvidx = t_lvi[:len(me.loops)] + + # If we were to create a combined array of (uv, vertex_idx) elements, we could find unique triplets by sorting + # that array by first sorting by the vertex_idx column and then sorting by the uv column using a stable sorting + # algorithm. + # This is exactly what we'll do, but without creating the combined array, because only the uv elements are + # included in the export and the vertex_idx column is the same for every uv layer. + + # Because the vertex_idx column is the same for every uv layer, the vertex_idx column can be sorted in advance. + # argsort gets the indices that sort the array, which are needed to be able to sort the array of uv pairs in the + # same way to create the indices that recreate the full uvs from the unique uvs. + # Loops and vertices tend to naturally have a partial ordering, which makes sorting with kind='stable' (radix + # sort) faster than the default of kind='quicksort' (introsort) in most cases. + perm_vidx = t_lvidx.argsort(kind='stable') + + # Mask and uv indices arrays will be modified and re-used by each uv layer. + unique_mask = np.empty(len(me.loops), dtype=np.bool_) + unique_mask[:1] = True + uv_indices = np.empty(len(me.loops), dtype=lv_idx_fbx_dtype) + + for uvindex, uvlayer in enumerate(me.uv_layers): + lay_uv = elem_data_single_int32(geom, b"LayerElementUV", uvindex) + elem_data_single_int32(lay_uv, b"Version", FBX_GEOMETRY_UV_VERSION) + elem_data_single_string_unicode(lay_uv, b"Name", uvlayer.name) + elem_data_single_string(lay_uv, b"MappingInformationType", b"ByPolygonVertex") + elem_data_single_string(lay_uv, b"ReferenceInformationType", b"IndexToDirect") + + uvlayer.uv.foreach_get("vector", t_luv) + + # t_luv_fast_pair_view is a view in a dtype that compares elements by individual bytes, but float types have + # separate byte representations of positive and negative zero. For uniqueness, these should be considered + # the same, so replace all -0.0 with 0.0 in advance. + t_luv[t_luv == -0.0] = 0.0 + + # These steps to create unique_uv_pairs are the same as how np.unique would find unique values by sorting a + # structured array where each element is a triplet of (uv, vertex_idx), except uv and vertex_idx are + # separate arrays here and vertex_idx has already been sorted in advance. + + # Sort according to the vertex_idx column, using the precalculated indices that sort it. + sorted_t_luv_fast = t_luv_fast_pair_view[perm_vidx] + + # Get the indices that would sort the sorted uv pairs. Stable sorting must be used to maintain the sorting + # of the vertex indices. + perm_uv_pairs = sorted_t_luv_fast.argsort(kind='stable') + # Use the indices to sort both the uv pairs and the vertex_idx columns. + perm_combined = perm_vidx[perm_uv_pairs] + sorted_vidx = t_lvidx[perm_combined] + sorted_t_luv_fast = sorted_t_luv_fast[perm_uv_pairs] + + # Create a mask where either the uv pair doesn't equal the previous value in the array, or the vertex index + # doesn't equal the previous value, these will be the unique uv-vidx triplets. + # For an imaginary triplet array: + # ... + # [(0.4, 0.2), 0] + # [(0.4, 0.2), 1] -> Unique because vertex index different from previous + # [(0.4, 0.2), 2] -> Unique because vertex index different from previous + # [(0.7, 0.6), 2] -> Unique because uv different from previous + # [(0.7, 0.6), 2] + # ... + # Output the result into unique_mask. + np.logical_or(sorted_t_luv_fast[1:] != sorted_t_luv_fast[:-1], sorted_vidx[1:] != sorted_vidx[:-1], + out=unique_mask[1:]) + + # Get each uv pair marked as unique by the unique_mask and then view as the original dtype. + unique_uvs = sorted_t_luv_fast[unique_mask].view(luv_bl_dtype) + + # NaN values are considered invalid and indicate a bug somewhere else in Blender or in an addon, we want + # these bugs to be reported instead of hiding them by allowing the export to continue. + if np.isnan(unique_uvs).any(): + raise RuntimeError("UV layer %s on %r has invalid UVs containing NaN values" % (uvlayer.name, me)) + + # Convert to the type needed for fbx + unique_uvs = unique_uvs.astype(luv_fbx_dtype, copy=False) + + # Set the indices of pairs in unique_uvs that reconstruct the pairs in t_luv into uv_indices. + # uv_indices will then be the same as an inverse array returned by np.unique with return_inverse=True. + uv_indices[perm_combined] = np.cumsum(unique_mask, dtype=uv_indices.dtype) - 1 + + elem_data_single_float64_array(lay_uv, b"UV", unique_uvs) + elem_data_single_int32_array(lay_uv, b"UVIndex", uv_indices) + del unique_uvs + del sorted_t_luv_fast + del sorted_vidx + del perm_uv_pairs + del perm_combined + del uv_indices + del unique_mask + del perm_vidx + del t_lvidx + del t_luv + del t_luv_fast_pair_view + del t_lvi + + # Face's materials. + me_fbxmaterials_idx = scene_data.mesh_material_indices.get(me) + if me_fbxmaterials_idx is not None: + # We cannot use me.materials here, as this array is filled with None in case materials are linked to object... + me_blmaterials = me_obj.materials + if me_fbxmaterials_idx and me_blmaterials: + lay_ma = elem_data_single_int32(geom, b"LayerElementMaterial", 0) + elem_data_single_int32(lay_ma, b"Version", FBX_GEOMETRY_MATERIAL_VERSION) + elem_data_single_string(lay_ma, b"Name", b"") + nbr_mats = len(me_fbxmaterials_idx) + multiple_fbx_mats = nbr_mats > 1 + # If a mesh does not have more than one material its material_index attribute can be ignored. + # If a mesh has multiple materials but all its polygons are assigned to the first material, its + # material_index attribute may not exist. + t_pm = None if not multiple_fbx_mats else MESH_ATTRIBUTE_MATERIAL_INDEX.get_ndarray(attributes) + if t_pm is not None: + fbx_pm_dtype = np.int32 + + # We have to validate mat indices, and map them to FBX indices. + # Note a mat might not be in me_fbxmaterials_idx (e.g. node mats are ignored). + + # The first valid material will be used for materials out of bounds of me_blmaterials or materials not + # in me_fbxmaterials_idx. + def_me_blmaterial_idx, def_ma = next( + (i, me_fbxmaterials_idx[m]) for i, m in enumerate(me_blmaterials) if m in me_fbxmaterials_idx) + + # Set material indices that are out of bounds to the default material index + mat_idx_limit = len(me_blmaterials) + # Material indices shouldn't be negative, but they technically could be. Viewing as unsigned before + # checking for indices that are too large means that a single >= check will pick up both negative + # indices and indices that are too large. + t_pm[t_pm.view("u%i" % t_pm.itemsize) >= mat_idx_limit] = def_me_blmaterial_idx + + # Map to FBX indices. Materials not in me_fbxmaterials_idx will be set to the default material index. + blmat_fbx_idx = np.fromiter((me_fbxmaterials_idx.get(m, def_ma) for m in me_blmaterials), + dtype=fbx_pm_dtype) + t_pm = blmat_fbx_idx[t_pm] + + elem_data_single_string(lay_ma, b"MappingInformationType", b"ByPolygon") + # XXX Logically, should be "Direct" reference type, since we do not have any index array, and have one + # value per polygon... + # But looks like FBX expects it to be IndexToDirect here (maybe because materials are already + # indices??? *sigh*). + elem_data_single_string(lay_ma, b"ReferenceInformationType", b"IndexToDirect") + elem_data_single_int32_array(lay_ma, b"Materials", t_pm) + else: + elem_data_single_string(lay_ma, b"MappingInformationType", b"AllSame") + elem_data_single_string(lay_ma, b"ReferenceInformationType", b"IndexToDirect") + if multiple_fbx_mats: + # There's no material_index attribute, so every material index is effectively zero. + # In the order of the mesh's materials, get the FBX index of the first material that is exported. + all_same_idx = next(me_fbxmaterials_idx[m] for m in me_blmaterials if m in me_fbxmaterials_idx) + else: + # There's only one fbx material, so the index will always be zero. + all_same_idx = 0 + elem_data_single_int32_array(lay_ma, b"Materials", [all_same_idx]) + del t_pm + + # And the "layer TOC"... + + layer = elem_data_single_int32(geom, b"Layer", 0) + elem_data_single_int32(layer, b"Version", FBX_GEOMETRY_LAYER_VERSION) + if write_normals: + lay_nor = elem_empty(layer, b"LayerElement") + elem_data_single_string(lay_nor, b"Type", b"LayerElementNormal") + elem_data_single_int32(lay_nor, b"TypedIndex", 0) + if tspacenumber: + lay_binor = elem_empty(layer, b"LayerElement") + elem_data_single_string(lay_binor, b"Type", b"LayerElementBinormal") + elem_data_single_int32(lay_binor, b"TypedIndex", 0) + lay_tan = elem_empty(layer, b"LayerElement") + elem_data_single_string(lay_tan, b"Type", b"LayerElementTangent") + elem_data_single_int32(lay_tan, b"TypedIndex", 0) + if smooth_type in {'FACE', 'EDGE'}: + lay_smooth = elem_empty(layer, b"LayerElement") + elem_data_single_string(lay_smooth, b"Type", b"LayerElementSmoothing") + elem_data_single_int32(lay_smooth, b"TypedIndex", 0) + if write_crease: + lay_smooth = elem_empty(layer, b"LayerElement") + elem_data_single_string(lay_smooth, b"Type", b"LayerElementEdgeCrease") + elem_data_single_int32(lay_smooth, b"TypedIndex", 0) + if vcolnumber: + lay_vcol = elem_empty(layer, b"LayerElement") + elem_data_single_string(lay_vcol, b"Type", b"LayerElementColor") + elem_data_single_int32(lay_vcol, b"TypedIndex", 0) + if uvnumber: + lay_uv = elem_empty(layer, b"LayerElement") + elem_data_single_string(lay_uv, b"Type", b"LayerElementUV") + elem_data_single_int32(lay_uv, b"TypedIndex", 0) + if me_fbxmaterials_idx is not None: + lay_ma = elem_empty(layer, b"LayerElement") + elem_data_single_string(lay_ma, b"Type", b"LayerElementMaterial") + elem_data_single_int32(lay_ma, b"TypedIndex", 0) + + # Add other uv and/or vcol layers... + for vcolidx, uvidx, tspaceidx in zip_longest(range(1, vcolnumber), range(1, uvnumber), range(1, tspacenumber), + fillvalue=0): + layer = elem_data_single_int32(geom, b"Layer", max(vcolidx, uvidx)) + elem_data_single_int32(layer, b"Version", FBX_GEOMETRY_LAYER_VERSION) + if vcolidx: + lay_vcol = elem_empty(layer, b"LayerElement") + elem_data_single_string(lay_vcol, b"Type", b"LayerElementColor") + elem_data_single_int32(lay_vcol, b"TypedIndex", vcolidx) + if uvidx: + lay_uv = elem_empty(layer, b"LayerElement") + elem_data_single_string(lay_uv, b"Type", b"LayerElementUV") + elem_data_single_int32(lay_uv, b"TypedIndex", uvidx) + if tspaceidx: + lay_binor = elem_empty(layer, b"LayerElement") + elem_data_single_string(lay_binor, b"Type", b"LayerElementBinormal") + elem_data_single_int32(lay_binor, b"TypedIndex", tspaceidx) + lay_tan = elem_empty(layer, b"LayerElement") + elem_data_single_string(lay_tan, b"Type", b"LayerElementTangent") + elem_data_single_int32(lay_tan, b"TypedIndex", tspaceidx) + + # Shape keys... + fbx_data_mesh_shapes_elements(root, me_obj, me, scene_data, tmpl, props) + + elem_props_template_finalize(tmpl, props) + done_meshes.add(me_key) + + +def fbx_data_material_elements(root, ma, scene_data): + """ + Write the Material data block. + """ + + ambient_color = (0.0, 0.0, 0.0) + if scene_data.data_world: + ambient_color = next(iter(scene_data.data_world.keys())).color + + ma_wrap = node_shader_utils.PrincipledBSDFWrapper(ma, is_readonly=True) + ma_key, _objs = scene_data.data_materials[ma] + ma_type = b"Phong" + + fbx_ma = elem_data_single_int64(root, b"Material", get_fbx_uuid_from_key(ma_key)) + fbx_ma.add_string(fbx_name_class(ma.name.encode(), b"Material")) + fbx_ma.add_string(b"") + + elem_data_single_int32(fbx_ma, b"Version", FBX_MATERIAL_VERSION) + # those are not yet properties, it seems... + elem_data_single_string(fbx_ma, b"ShadingModel", ma_type) + elem_data_single_int32(fbx_ma, b"MultiLayer", 0) # Should be bool... + + tmpl = elem_props_template_init(scene_data.templates, b"Material") + props = elem_properties(fbx_ma) + + elem_props_template_set(tmpl, props, "p_string", b"ShadingModel", ma_type.decode()) + elem_props_template_set(tmpl, props, "p_color", b"DiffuseColor", ma_wrap.base_color) + # Not in Principled BSDF, so assuming always 1 + elem_props_template_set(tmpl, props, "p_number", b"DiffuseFactor", 1.0) + # Principled BSDF only has an emissive color, so we assume factor to be always 1.0. + elem_props_template_set(tmpl, props, "p_color", b"EmissiveColor", ma_wrap.emission_color) + elem_props_template_set(tmpl, props, "p_number", b"EmissiveFactor", ma_wrap.emission_strength) + # Not in Principled BSDF, so assuming always 0 + elem_props_template_set(tmpl, props, "p_color", b"AmbientColor", ambient_color) + elem_props_template_set(tmpl, props, "p_number", b"AmbientFactor", 0.0) + # Sweetness... Looks like we are not the only ones to not know exactly how FBX is supposed to work (see T59850). + # According to one of its developers, Unity uses that formula to extract alpha value: + # + # alpha = 1 - TransparencyFactor + # if (alpha == 1 or alpha == 0): + # alpha = 1 - TransparentColor.r + # + # Until further info, let's assume this is correct way to do, hence the following code for TransparentColor. + if ma_wrap.alpha < 1.0e-5 or ma_wrap.alpha > (1.0 - 1.0e-5): + elem_props_template_set(tmpl, props, "p_color", b"TransparentColor", (1.0 - ma_wrap.alpha,) * 3) + else: + elem_props_template_set(tmpl, props, "p_color", b"TransparentColor", ma_wrap.base_color) + elem_props_template_set(tmpl, props, "p_number", b"TransparencyFactor", 1.0 - ma_wrap.alpha) + elem_props_template_set(tmpl, props, "p_number", b"Opacity", ma_wrap.alpha) + elem_props_template_set(tmpl, props, "p_vector_3d", b"NormalMap", (0.0, 0.0, 0.0)) + elem_props_template_set(tmpl, props, "p_double", b"BumpFactor", ma_wrap.normalmap_strength) + # Not sure about those... + """ + b"Bump": ((0.0, 0.0, 0.0), "p_vector_3d"), + b"DisplacementColor": ((0.0, 0.0, 0.0), "p_color_rgb"), + b"DisplacementFactor": (0.0, "p_double"), + """ + # TODO: use specular tint? + elem_props_template_set(tmpl, props, "p_color", b"SpecularColor", ma_wrap.base_color) + elem_props_template_set(tmpl, props, "p_number", b"SpecularFactor", ma_wrap.specular / 2.0) + # See Material template about those two! + # XXX Totally empirical conversion, trying to adapt it + # (from 0.0 - 100.0 FBX shininess range to 1.0 - 0.0 Principled BSDF range)... + shininess = (1.0 - ma_wrap.roughness) * 10 + shininess *= shininess + elem_props_template_set(tmpl, props, "p_number", b"Shininess", shininess) + elem_props_template_set(tmpl, props, "p_number", b"ShininessExponent", shininess) + elem_props_template_set(tmpl, props, "p_color", b"ReflectionColor", ma_wrap.base_color) + elem_props_template_set(tmpl, props, "p_number", b"ReflectionFactor", ma_wrap.metallic) + + elem_props_template_finalize(tmpl, props) + + # Custom properties. + if scene_data.settings.use_custom_props: + fbx_data_element_custom_properties(props, ma) + + +def _gen_vid_path(img, scene_data): + msetts = scene_data.settings.media_settings + fname_rel = bpy_extras.io_utils.path_reference(img.filepath, msetts.base_src, msetts.base_dst, msetts.path_mode, + msetts.subdir, msetts.copy_set, img.library) + fname_abs = os.path.normpath(os.path.abspath(os.path.join(msetts.base_dst, fname_rel))) + return fname_abs, fname_rel + + +def fbx_data_texture_file_elements(root, blender_tex_key, scene_data): + """ + Write the (file) Texture data block. + """ + # XXX All this is very fuzzy to me currently... + # Textures do not seem to use properties as much as they could. + # For now assuming most logical and simple stuff. + + ma, sock_name = blender_tex_key + ma_wrap = node_shader_utils.PrincipledBSDFWrapper(ma, is_readonly=True) + tex_key, _fbx_prop = scene_data.data_textures[blender_tex_key] + tex = getattr(ma_wrap, sock_name) + img = tex.image + fname_abs, fname_rel = _gen_vid_path(img, scene_data) + + fbx_tex = elem_data_single_int64(root, b"Texture", get_fbx_uuid_from_key(tex_key)) + fbx_tex.add_string(fbx_name_class(sock_name.encode(), b"Texture")) + fbx_tex.add_string(b"") + + elem_data_single_string(fbx_tex, b"Type", b"TextureVideoClip") + elem_data_single_int32(fbx_tex, b"Version", FBX_TEXTURE_VERSION) + elem_data_single_string(fbx_tex, b"TextureName", fbx_name_class(sock_name.encode(), b"Texture")) + elem_data_single_string(fbx_tex, b"Media", fbx_name_class(img.name.encode(), b"Video")) + elem_data_single_string_unicode(fbx_tex, b"FileName", fname_abs) + elem_data_single_string_unicode(fbx_tex, b"RelativeFilename", fname_rel) + + alpha_source = 0 # None + if img.alpha_mode != 'NONE': + # ~ if tex.texture.use_calculate_alpha: + # ~ alpha_source = 1 # RGBIntensity as alpha. + # ~ else: + # ~ alpha_source = 2 # Black, i.e. alpha channel. + alpha_source = 2 # Black, i.e. alpha channel. + # BlendMode not useful for now, only affects layered textures afaics. + mapping = 0 # UV. + uvset = None + if tex.texcoords == 'ORCO': # XXX Others? + if tex.projection == 'FLAT': + mapping = 1 # Planar + elif tex.projection == 'CUBE': + mapping = 4 # Box + elif tex.projection == 'TUBE': + mapping = 3 # Cylindrical + elif tex.projection == 'SPHERE': + mapping = 2 # Spherical + elif tex.texcoords == 'UV': + mapping = 0 # UV + # Yuck, UVs are linked by mere names it seems... :/ + # XXX TODO how to get that now??? + # uvset = tex.uv_layer + wrap_mode = 1 # Clamp + if tex.extension == 'REPEAT': + wrap_mode = 0 # Repeat + + tmpl = elem_props_template_init(scene_data.templates, b"TextureFile") + props = elem_properties(fbx_tex) + elem_props_template_set(tmpl, props, "p_enum", b"AlphaSource", alpha_source) + elem_props_template_set(tmpl, props, "p_bool", b"PremultiplyAlpha", + img.alpha_mode in {'STRAIGHT'}) # Or is it PREMUL? + elem_props_template_set(tmpl, props, "p_enum", b"CurrentMappingType", mapping) + if uvset is not None: + elem_props_template_set(tmpl, props, "p_string", b"UVSet", uvset) + elem_props_template_set(tmpl, props, "p_enum", b"WrapModeU", wrap_mode) + elem_props_template_set(tmpl, props, "p_enum", b"WrapModeV", wrap_mode) + elem_props_template_set(tmpl, props, "p_vector_3d", b"Translation", tex.translation) + elem_props_template_set(tmpl, props, "p_vector_3d", b"Rotation", (-r for r in tex.rotation)) + elem_props_template_set(tmpl, props, "p_vector_3d", b"Scaling", + (((1.0 / s) if s != 0.0 else 1.0) for s in tex.scale)) + # UseMaterial should always be ON imho. + elem_props_template_set(tmpl, props, "p_bool", b"UseMaterial", True) + elem_props_template_set(tmpl, props, "p_bool", b"UseMipMap", False) + elem_props_template_finalize(tmpl, props) + + # No custom properties, since that's not a data-block anymore. + + +def fbx_data_video_elements(root, vid, scene_data): + """ + Write the actual image data block. + """ + msetts = scene_data.settings.media_settings + + vid_key, _texs = scene_data.data_videos[vid] + fname_abs, fname_rel = _gen_vid_path(vid, scene_data) + + fbx_vid = elem_data_single_int64(root, b"Video", get_fbx_uuid_from_key(vid_key)) + fbx_vid.add_string(fbx_name_class(vid.name.encode(), b"Video")) + fbx_vid.add_string(b"Clip") + + elem_data_single_string(fbx_vid, b"Type", b"Clip") + # XXX No Version??? + + tmpl = elem_props_template_init(scene_data.templates, b"Video") + props = elem_properties(fbx_vid) + elem_props_template_set(tmpl, props, "p_string_url", b"Path", fname_abs) + elem_props_template_finalize(tmpl, props) + + elem_data_single_int32(fbx_vid, b"UseMipMap", 0) + elem_data_single_string_unicode(fbx_vid, b"Filename", fname_abs) + elem_data_single_string_unicode(fbx_vid, b"RelativeFilename", fname_rel) + + if scene_data.settings.media_settings.embed_textures: + if vid.packed_file is not None: + # We only ever embed a given file once! + if fname_abs not in msetts.embedded_set: + elem_data_single_bytes(fbx_vid, b"Content", vid.packed_file.data) + msetts.embedded_set.add(fname_abs) + else: + filepath = bpy.path.abspath(vid.filepath) + # We only ever embed a given file once! + if filepath not in msetts.embedded_set: + try: + with open(filepath, 'br') as f: + elem_data_single_bytes(fbx_vid, b"Content", f.read()) + except Exception as e: + print("WARNING: embedding file {} failed ({})".format(filepath, e)) + elem_data_single_bytes(fbx_vid, b"Content", b"") + msetts.embedded_set.add(filepath) + # Looks like we'd rather not write any 'Content' element in this case (see T44442). + # Sounds suspect, but let's try it! + # ~ else: + #~ elem_data_single_bytes(fbx_vid, b"Content", b"") + + # Blender currently has no UI for editing custom properties on Images, but the importer will import Image custom + # properties from either a Video Node or a Texture Node, preferring a Video node if one exists. We'll propagate + # these custom properties only to Video Nodes because that is most likely where they were imported from, and Texture + # Nodes are more like Blender's Shader Nodes than Images, which is what we're exporting here. + if scene_data.settings.use_custom_props: + fbx_data_element_custom_properties(props, vid) + + +def fbx_data_armature_elements(root, arm_obj, scene_data): + """ + Write: + * Bones "data" (NodeAttribute::LimbNode, contains pretty much nothing!). + * Deformers (i.e. Skin), bind between an armature and a mesh. + ** SubDeformers (i.e. Cluster), one per bone/vgroup pair. + * BindPose. + Note armature itself has no data, it is a mere "Null" Model... + """ + mat_world_arm = arm_obj.fbx_object_matrix(scene_data, global_space=True) + bones = tuple(bo_obj for bo_obj in arm_obj.bones if bo_obj in scene_data.objects) + + bone_radius_scale = 33.0 + + # Bones "data". + for bo_obj in bones: + bo = bo_obj.bdata + bo_data_key = scene_data.data_bones[bo_obj] + fbx_bo = elem_data_single_int64(root, b"NodeAttribute", get_fbx_uuid_from_key(bo_data_key)) + fbx_bo.add_string(fbx_name_class(bo.name.encode(), b"NodeAttribute")) + fbx_bo.add_string(b"LimbNode") + elem_data_single_string(fbx_bo, b"TypeFlags", b"Skeleton") + + tmpl = elem_props_template_init(scene_data.templates, b"Bone") + props = elem_properties(fbx_bo) + elem_props_template_set(tmpl, props, "p_double", b"Size", bo.head_radius * bone_radius_scale) + elem_props_template_finalize(tmpl, props) + + # Custom properties. + if scene_data.settings.use_custom_props: + fbx_data_element_custom_properties(props, bo) + + # Store Blender bone length - XXX Not much useful actually :/ + # (LimbLength can't be used because it is a scale factor 0-1 for the parent-child distance: + # http://docs.autodesk.com/FBX/2014/ENU/FBX-SDK-Documentation/cpp_ref/class_fbx_skeleton.html#a9bbe2a70f4ed82cd162620259e649f0f ) + # elem_props_set(props, "p_double", "BlenderBoneLength".encode(), (bo.tail_local - bo.head_local).length, custom=True) + + # Skin deformers and BindPoses. + # Note: we might also use Deformers for our "parent to vertex" stuff??? + deformer = scene_data.data_deformers_skin.get(arm_obj, None) + if deformer is not None: + for me, (skin_key, ob_obj, clusters) in deformer.items(): + # BindPose. + mat_world_obj, mat_world_bones = fbx_data_bindpose_element(root, ob_obj, me, scene_data, + arm_obj, mat_world_arm, bones) + + # Deformer. + fbx_skin = elem_data_single_int64(root, b"Deformer", get_fbx_uuid_from_key(skin_key)) + fbx_skin.add_string(fbx_name_class(arm_obj.name.encode(), b"Deformer")) + fbx_skin.add_string(b"Skin") + + elem_data_single_int32(fbx_skin, b"Version", FBX_DEFORMER_SKIN_VERSION) + elem_data_single_float64(fbx_skin, b"Link_DeformAcuracy", 50.0) # Only vague idea what it is... + + # Pre-process vertex weights so that the vertices only need to be iterated once. + ob = ob_obj.bdata + bo_vg_idx = {bo_obj.bdata.name: ob.vertex_groups[bo_obj.bdata.name].index + for bo_obj in clusters.keys() if bo_obj.bdata.name in ob.vertex_groups} + valid_idxs = set(bo_vg_idx.values()) + vgroups = {vg.index: {} for vg in ob.vertex_groups} + for idx, v in enumerate(me.vertices): + for vg in v.groups: + if (w := vg.weight) and (vg_idx := vg.group) in valid_idxs: + vgroups[vg_idx][idx] = w + + for bo_obj, clstr_key in clusters.items(): + bo = bo_obj.bdata + # Find which vertices are affected by this bone/vgroup pair, and matching weights. + # Note we still write a cluster for bones not affecting the mesh, to get 'rest pose' data + # (the TransformBlah matrices). + vg_idx = bo_vg_idx.get(bo.name, None) + indices, weights = ((), ()) if vg_idx is None or not vgroups[vg_idx] else zip(*vgroups[vg_idx].items()) + + # Create the cluster. + fbx_clstr = elem_data_single_int64(root, b"Deformer", get_fbx_uuid_from_key(clstr_key)) + fbx_clstr.add_string(fbx_name_class(bo.name.encode(), b"SubDeformer")) + fbx_clstr.add_string(b"Cluster") + + elem_data_single_int32(fbx_clstr, b"Version", FBX_DEFORMER_CLUSTER_VERSION) + # No idea what that user data might be... + fbx_userdata = elem_data_single_string(fbx_clstr, b"UserData", b"") + fbx_userdata.add_string(b"") + if indices: + elem_data_single_int32_array(fbx_clstr, b"Indexes", indices) + elem_data_single_float64_array(fbx_clstr, b"Weights", weights) + # Transform, TransformLink and TransformAssociateModel matrices... + # They seem to be doublons of BindPose ones??? Have armature (associatemodel) in addition, though. + # WARNING! Even though official FBX API presents Transform in global space, + # **it is stored in bone space in FBX data!** See: + # http://area.autodesk.com/forum/autodesk-fbx/fbx-sdk/why-the-values-return- + # by-fbxcluster-gettransformmatrix-x-not-same-with-the-value-in-ascii-fbx-file/ + elem_data_single_float64_array( + fbx_clstr, b"Transform", matrix4_to_array( + mat_world_bones[bo_obj].inverted_safe() @ mat_world_obj)) + elem_data_single_float64_array(fbx_clstr, b"TransformLink", matrix4_to_array(mat_world_bones[bo_obj])) + elem_data_single_float64_array(fbx_clstr, b"TransformAssociateModel", matrix4_to_array(mat_world_arm)) + + +def fbx_data_leaf_bone_elements(root, scene_data): + # Write a dummy leaf bone that is used by applications to show the length of the last bone in a chain + for (node_name, _par_uuid, node_uuid, attr_uuid, matrix, hide, size) in scene_data.data_leaf_bones: + # Bone 'data'... + fbx_bo = elem_data_single_int64(root, b"NodeAttribute", attr_uuid) + fbx_bo.add_string(fbx_name_class(node_name.encode(), b"NodeAttribute")) + fbx_bo.add_string(b"LimbNode") + elem_data_single_string(fbx_bo, b"TypeFlags", b"Skeleton") + + tmpl = elem_props_template_init(scene_data.templates, b"Bone") + props = elem_properties(fbx_bo) + elem_props_template_set(tmpl, props, "p_double", b"Size", size) + elem_props_template_finalize(tmpl, props) + + # And bone object. + model = elem_data_single_int64(root, b"Model", node_uuid) + model.add_string(fbx_name_class(node_name.encode(), b"Model")) + model.add_string(b"LimbNode") + + elem_data_single_int32(model, b"Version", FBX_MODELS_VERSION) + + # Object transform info. + loc, rot, scale = matrix.decompose() + rot = rot.to_euler('XYZ') + rot = tuple(convert_rad_to_deg_iter(rot)) + + tmpl = elem_props_template_init(scene_data.templates, b"Model") + # For now add only loc/rot/scale... + props = elem_properties(model) + # Generated leaf bones are obviously never animated! + elem_props_template_set(tmpl, props, "p_lcl_translation", b"Lcl Translation", loc) + elem_props_template_set(tmpl, props, "p_lcl_rotation", b"Lcl Rotation", rot) + elem_props_template_set(tmpl, props, "p_lcl_scaling", b"Lcl Scaling", scale) + elem_props_template_set(tmpl, props, "p_visibility", b"Visibility", float(not hide)) + + # Absolutely no idea what this is, but seems mandatory for validity of the file, and defaults to + # invalid -1 value... + elem_props_template_set(tmpl, props, "p_integer", b"DefaultAttributeIndex", 0) + + elem_props_template_set(tmpl, props, "p_enum", b"InheritType", 1) # RSrs + + # Those settings would obviously need to be edited in a complete version of the exporter, may depends on + # object type, etc. + elem_data_single_int32(model, b"MultiLayer", 0) + elem_data_single_int32(model, b"MultiTake", 0) + # Probably the FbxNode.EShadingMode enum. Full description in fbx_data_object_elements. + elem_data_single_char(model, b"Shading", b"\x01") + elem_data_single_string(model, b"Culling", b"CullingOff") + + elem_props_template_finalize(tmpl, props) + + +def fbx_data_object_elements(root, ob_obj, scene_data): + """ + Write the Object (Model) data blocks. + Note this "Model" can also be bone or dupli! + """ + obj_type = b"Null" # default, sort of empty... + if ob_obj.is_bone: + obj_type = b"LimbNode" + elif (ob_obj.type == 'ARMATURE'): + if scene_data.settings.armature_nodetype == 'ROOT': + obj_type = b"Root" + elif scene_data.settings.armature_nodetype == 'LIMBNODE': + obj_type = b"LimbNode" + else: # Default, preferred option... + obj_type = b"Null" + elif (ob_obj.type in BLENDER_OBJECT_TYPES_MESHLIKE): + obj_type = b"Mesh" + elif (ob_obj.type == 'LIGHT'): + obj_type = b"Light" + elif (ob_obj.type == 'CAMERA'): + obj_type = b"Camera" + model = elem_data_single_int64(root, b"Model", ob_obj.fbx_uuid) + model.add_string(fbx_name_class(ob_obj.name.encode(), b"Model")) + model.add_string(obj_type) + + elem_data_single_int32(model, b"Version", FBX_MODELS_VERSION) + + # Object transform info. + loc, rot, scale, matrix, matrix_rot = ob_obj.fbx_object_tx(scene_data) + rot = tuple(convert_rad_to_deg_iter(rot)) + + tmpl = elem_props_template_init(scene_data.templates, b"Model") + # For now add only loc/rot/scale... + props = elem_properties(model) + elem_props_template_set(tmpl, props, "p_lcl_translation", b"Lcl Translation", loc, + animatable=True, animated=((ob_obj.key, "Lcl Translation") in scene_data.animated)) + elem_props_template_set(tmpl, props, "p_lcl_rotation", b"Lcl Rotation", rot, + animatable=True, animated=((ob_obj.key, "Lcl Rotation") in scene_data.animated)) + elem_props_template_set(tmpl, props, "p_lcl_scaling", b"Lcl Scaling", scale, + animatable=True, animated=((ob_obj.key, "Lcl Scaling") in scene_data.animated)) + elem_props_template_set(tmpl, props, "p_visibility", b"Visibility", float(not ob_obj.hide)) + + # Absolutely no idea what this is, but seems mandatory for validity of the file, and defaults to + # invalid -1 value... + elem_props_template_set(tmpl, props, "p_integer", b"DefaultAttributeIndex", 0) + + elem_props_template_set(tmpl, props, "p_enum", b"InheritType", 1) # RSrs + + # Custom properties. + if scene_data.settings.use_custom_props: + # Here we want customprops from the 'pose' bone, not the 'edit' bone... + bdata = ob_obj.bdata_pose_bone if ob_obj.is_bone else ob_obj.bdata + fbx_data_element_custom_properties(props, bdata) + + # Those settings would obviously need to be edited in a complete version of the exporter, may depends on + # object type, etc. + elem_data_single_int32(model, b"MultiLayer", 0) + elem_data_single_int32(model, b"MultiTake", 0) + # This is probably the FbxNode.EShadingMode enum. Not directly used by the FBX SDK, but the SDK guarantees that the + # value will be passed through from an imported file to an exported one. Common values are 'Y' and 'T'. 'U' and 'W' + # have also been seen in older FBX files. It's not clear which enum member each of these values corresponds to or if + # these values are actually application specific. Blender had been exporting this as a `True` bool for a long time + # seemingly without issue. The '\x01' char is the same value as `True` in raw bytes. + elem_data_single_char(model, b"Shading", b"\x01") + elem_data_single_string(model, b"Culling", b"CullingOff") + + if obj_type == b"Camera": + # Why, oh why are FBX cameras such a mess??? + # And WHY add camera data HERE??? Not even sure this is needed... + render = scene_data.scene.render + width = render.resolution_x * 1.0 + height = render.resolution_y * 1.0 + elem_props_template_set(tmpl, props, "p_enum", b"ResolutionMode", 0) # Don't know what it means + elem_props_template_set(tmpl, props, "p_double", b"AspectW", width) + elem_props_template_set(tmpl, props, "p_double", b"AspectH", height) + elem_props_template_set(tmpl, props, "p_bool", b"ViewFrustum", True) + elem_props_template_set(tmpl, props, "p_enum", b"BackgroundMode", 0) # Don't know what it means + elem_props_template_set(tmpl, props, "p_bool", b"ForegroundTransparent", True) + + elem_props_template_finalize(tmpl, props) + + +def fbx_data_animation_elements(root, scene_data): + """ + Write animation data. + """ + animations = scene_data.animations + if not animations: + return + + # Animation stacks. + for astack_key, alayers, alayer_key, name, f_start, f_end in animations: + astack = elem_data_single_int64(root, b"AnimationStack", get_fbx_uuid_from_key(astack_key)) + astack.add_string(fbx_name_class(name, b"AnimStack")) + astack.add_string(b"") + + astack_tmpl = elem_props_template_init(scene_data.templates, b"AnimationStack") + astack_props = elem_properties(astack) + r = scene_data.scene.render + fps = r.fps / r.fps_base + start = int(convert_sec_to_ktime(f_start / fps)) + end = int(convert_sec_to_ktime(f_end / fps)) + elem_props_template_set(astack_tmpl, astack_props, "p_timestamp", b"LocalStart", start) + elem_props_template_set(astack_tmpl, astack_props, "p_timestamp", b"LocalStop", end) + elem_props_template_set(astack_tmpl, astack_props, "p_timestamp", b"ReferenceStart", start) + elem_props_template_set(astack_tmpl, astack_props, "p_timestamp", b"ReferenceStop", end) + elem_props_template_finalize(astack_tmpl, astack_props) + + # For now, only one layer for all animations. + alayer = elem_data_single_int64(root, b"AnimationLayer", get_fbx_uuid_from_key(alayer_key)) + alayer.add_string(fbx_name_class(name, b"AnimLayer")) + alayer.add_string(b"") + + for ob_obj, (alayer_key, acurvenodes) in alayers.items(): + # Animation layer. + # alayer = elem_data_single_int64(root, b"AnimationLayer", get_fbx_uuid_from_key(alayer_key)) + # alayer.add_string(fbx_name_class(ob_obj.name.encode(), b"AnimLayer")) + # alayer.add_string(b"") + + for fbx_prop, (acurvenode_key, acurves, acurvenode_name) in acurvenodes.items(): + # Animation curve node. + acurvenode = elem_data_single_int64(root, b"AnimationCurveNode", get_fbx_uuid_from_key(acurvenode_key)) + acurvenode.add_string(fbx_name_class(acurvenode_name.encode(), b"AnimCurveNode")) + acurvenode.add_string(b"") + + acn_tmpl = elem_props_template_init(scene_data.templates, b"AnimationCurveNode") + acn_props = elem_properties(acurvenode) + + for fbx_item, (acurve_key, def_value, (keys, values), _acurve_valid) in acurves.items(): + elem_props_template_set(acn_tmpl, acn_props, "p_number", fbx_item.encode(), + def_value, animatable=True) + + # Only create Animation curve if needed! + nbr_keys = len(keys) + if nbr_keys: + acurve = elem_data_single_int64(root, b"AnimationCurve", get_fbx_uuid_from_key(acurve_key)) + acurve.add_string(fbx_name_class(b"", b"AnimCurve")) + acurve.add_string(b"") + + # key attributes... + # flags... + keyattr_flags = ( + 1 << 2 | # interpolation mode, 1 = constant, 2 = linear, 3 = cubic. + 1 << 8 | # tangent mode, 8 = auto, 9 = TCB, 10 = user, 11 = generic break, + 1 << 13 | # tangent mode, 12 = generic clamp, 13 = generic time independent, + 1 << 14 | # tangent mode, 13 + 14 = generic clamp progressive. + 0, + ) + # Maybe values controlling TCB & co??? + keyattr_datafloat = (0.0, 0.0, 9.419963346924634e-30, 0.0) + + # And now, the *real* data! + elem_data_single_float64(acurve, b"Default", def_value) + elem_data_single_int32(acurve, b"KeyVer", FBX_ANIM_KEY_VERSION) + elem_data_single_int64_array(acurve, b"KeyTime", astype_view_signedness(keys, np.int64)) + elem_data_single_float32_array(acurve, b"KeyValueFloat", values.astype(np.float32, copy=False)) + elem_data_single_int32_array(acurve, b"KeyAttrFlags", keyattr_flags) + elem_data_single_float32_array(acurve, b"KeyAttrDataFloat", keyattr_datafloat) + elem_data_single_int32_array(acurve, b"KeyAttrRefCount", (nbr_keys,)) + + elem_props_template_finalize(acn_tmpl, acn_props) + + +# ##### Top-level FBX data container. ##### + +# Mapping Blender -> FBX (principled_socket_name, fbx_name). +PRINCIPLED_TEXTURE_SOCKETS_TO_FBX = ( + # ("diffuse", "diffuse", b"DiffuseFactor"), + ("base_color_texture", b"DiffuseColor"), + ("alpha_texture", b"TransparencyFactor"), # Will be inverted in fact, not much we can do really... + # ("base_color_texture", b"TransparentColor"), # Uses diffuse color in Blender! + ("emission_strength_texture", b"EmissiveFactor"), + ("emission_color_texture", b"EmissiveColor"), + # ("ambient", "ambient", b"AmbientFactor"), + # ("", "", b"AmbientColor"), # World stuff in Blender, for now ignore... + ("normalmap_texture", b"NormalMap"), + # Note: unsure about those... :/ + # ("", "", b"Bump"), + # ("", "", b"BumpFactor"), + # ("", "", b"DisplacementColor"), + # ("", "", b"DisplacementFactor"), + ("specular_texture", b"SpecularFactor"), + # ("base_color", b"SpecularColor"), # TODO: use tint? + # See Material template about those two! + ("roughness_texture", b"Shininess"), + ("roughness_texture", b"ShininessExponent"), + # ("mirror", "mirror", b"ReflectionColor"), + ("metallic_texture", b"ReflectionFactor"), +) + + +def fbx_skeleton_from_armature(scene, settings, arm_obj, objects, data_meshes, + data_bones, data_deformers_skin, data_empties, arm_parents): + """ + Create skeleton from armature/bones (NodeAttribute/LimbNode and Model/LimbNode), and for each deformed mesh, + create Pose/BindPose(with sub PoseNode) and Deformer/Skin(with Deformer/SubDeformer/Cluster). + Also supports "parent to bone" (simple parent to Model/LimbNode). + arm_parents is a set of tuples (armature, object) for all successful armature bindings. + """ + # We need some data for our armature 'object' too!!! + data_empties[arm_obj] = get_blender_empty_key(arm_obj.bdata) + + arm_data = arm_obj.bdata.data + bones = {} + for bo in arm_obj.bones: + if settings.use_armature_deform_only: + if bo.bdata.use_deform: + bones[bo] = True + bo_par = bo.parent + while bo_par.is_bone: + bones[bo_par] = True + bo_par = bo_par.parent + elif bo not in bones: # Do not override if already set in the loop above! + bones[bo] = False + else: + bones[bo] = True + + bones = {bo: None for bo, use in bones.items() if use} + + if not bones: + return + + data_bones.update((bo, get_blender_bone_key(arm_obj.bdata, bo.bdata)) for bo in bones) + + for ob_obj in objects: + if not ob_obj.is_deformed_by_armature(arm_obj): + continue + + # Always handled by an Armature modifier... + found = False + for mod in ob_obj.bdata.modifiers: + if mod.type not in {'ARMATURE'} or not mod.object: + continue + # We only support vertex groups binding method, not bone envelopes one! + if mod.object == arm_obj.bdata and mod.use_vertex_groups: + found = True + break + + if not found: + continue + + # Now we have a mesh using this armature. + # Note: bindpose have no relations at all (no connections), so no need for any preprocess for them. + # Create skin & clusters relations (note skins are connected to geometry, *not* model!). + _key, me, _free = data_meshes[ob_obj] + clusters = {bo: get_blender_bone_cluster_key(arm_obj.bdata, me, bo.bdata) for bo in bones} + data_deformers_skin.setdefault(arm_obj, {})[me] = (get_blender_armature_skin_key(arm_obj.bdata, me), + ob_obj, clusters) + + # We don't want a regular parent relationship for those in FBX... + arm_parents.add((arm_obj, ob_obj)) + # Needed to handle matrices/spaces (since we do not parent them to 'armature' in FBX :/ ). + ob_obj.parented_to_armature = True + + objects.update(bones) + + +def fbx_generate_leaf_bones(settings, data_bones): + # find which bons have no children + child_count = {bo: 0 for bo in data_bones.keys()} + for bo in data_bones.keys(): + if bo.parent and bo.parent.is_bone: + child_count[bo.parent] += 1 + + bone_radius_scale = settings.global_scale * 33.0 + + # generate bone data + leaf_parents = [bo for bo, count in child_count.items() if count == 0] + leaf_bones = [] + for parent in leaf_parents: + node_name = parent.name + "_end" + parent_uuid = parent.fbx_uuid + parent_key = parent.key + node_uuid = get_fbx_uuid_from_key(parent_key + "_end_node") + attr_uuid = get_fbx_uuid_from_key(parent_key + "_end_nodeattr") + + hide = parent.hide + size = parent.bdata.head_radius * bone_radius_scale + bone_length = (parent.bdata.tail_local - parent.bdata.head_local).length + matrix = Matrix.Translation((0, bone_length, 0)) + if settings.bone_correction_matrix_inv: + matrix = settings.bone_correction_matrix_inv @ matrix + if settings.bone_correction_matrix: + matrix = matrix @ settings.bone_correction_matrix + leaf_bones.append((node_name, parent_uuid, node_uuid, attr_uuid, matrix, hide, size)) + + return leaf_bones + + +def fbx_animations_do(scene_data, ref_id, f_start, f_end, start_zero, objects=None, force_keep=False): + """ + Generate animation data (a single AnimStack) from objects, for a given frame range. + """ + bake_step = scene_data.settings.bake_anim_step + simplify_fac = scene_data.settings.bake_anim_simplify_factor + scene = scene_data.scene + depsgraph = scene_data.depsgraph + force_keying = scene_data.settings.bake_anim_use_all_bones + force_sek = scene_data.settings.bake_anim_force_startend_keying + gscale = scene_data.settings.global_scale + + if objects is not None: + # Add bones and duplis! + for ob_obj in tuple(objects): + if not ob_obj.is_object: + continue + if ob_obj.type == 'ARMATURE': + objects |= {bo_obj for bo_obj in ob_obj.bones if bo_obj in scene_data.objects} + for dp_obj in ob_obj.dupli_list_gen(depsgraph): + if dp_obj in scene_data.objects: + objects.add(dp_obj) + else: + objects = scene_data.objects + + back_currframe = scene.frame_current + animdata_ob = {} + p_rots = {} + + for ob_obj in objects: + if ob_obj.parented_to_armature: + continue + ACNW = AnimationCurveNodeWrapper + loc, rot, scale, _m, _mr = ob_obj.fbx_object_tx(scene_data) + rot_deg = tuple(convert_rad_to_deg_iter(rot)) + force_key = (simplify_fac == 0.0) or (ob_obj.is_bone and force_keying) + animdata_ob[ob_obj] = (ACNW(ob_obj.key, 'LCL_TRANSLATION', force_key, force_sek, loc), + ACNW(ob_obj.key, 'LCL_ROTATION', force_key, force_sek, rot_deg), + ACNW(ob_obj.key, 'LCL_SCALING', force_key, force_sek, scale)) + p_rots[ob_obj] = rot + + force_key = (simplify_fac == 0.0) + animdata_shapes = {} + + for me, (me_key, _shapes_key, shapes) in scene_data.data_deformers_shape.items(): + # Ignore absolute shape keys for now! + if not me.shape_keys.use_relative: + continue + for shape, (channel_key, geom_key, _shape_verts_co, _shape_verts_idx) in shapes.items(): + acnode = AnimationCurveNodeWrapper(channel_key, 'SHAPE_KEY', force_key, force_sek, (0.0,)) + # Sooooo happy to have to twist again like a mad snake... Yes, we need to write those curves twice. :/ + acnode.add_group(me_key, shape.name, shape.name, (shape.name,)) + animdata_shapes[channel_key] = (acnode, me, shape) + + animdata_cameras = {} + for cam_obj, cam_key in scene_data.data_cameras.items(): + cam = cam_obj.bdata.data + acnode_lens = AnimationCurveNodeWrapper(cam_key, 'CAMERA_FOCAL', force_key, force_sek, (cam.lens,)) + acnode_focus_distance = AnimationCurveNodeWrapper(cam_key, 'CAMERA_FOCUS_DISTANCE', force_key, + force_sek, (cam.dof.focus_distance,)) + animdata_cameras[cam_key] = (acnode_lens, acnode_focus_distance, cam) + + # Get all parent bdata of animated dupli instances, so that we can quickly identify which instances in + # `depsgraph.object_instances` are animated and need their ObjectWrappers' matrices updated each frame. + dupli_parent_bdata = {dup.get_parent().bdata for dup in animdata_ob if dup.is_dupli} + has_animated_duplis = bool(dupli_parent_bdata) + + # Initialize keyframe times array. Each AnimationCurveNodeWrapper will share the same instance. + # `np.arange` excludes the `stop` argument like when using `range`, so we use np.nextafter to get the next + # representable value after f_end and use that as the `stop` argument instead. + currframes = np.arange(f_start, np.nextafter(f_end, np.inf), step=bake_step) + + # Convert from Blender time to FBX time. + fps = scene.render.fps / scene.render.fps_base + real_currframes = currframes - f_start if start_zero else currframes + real_currframes = (real_currframes / fps * FBX_KTIME).astype(np.int64) + + # Generator that yields the animated values of each frame in order. + def frame_values_gen(): + # Precalculate integer frames and subframes. + int_currframes = currframes.astype(int) + subframes = currframes - int_currframes + + # Create simpler iterables that return only the values we care about. + animdata_shapes_only = [shape for _anim_shape, _me, shape in animdata_shapes.values()] + animdata_cameras_only = [camera for _anim_camera_lens, _anim_camera_focus_distance, camera + in animdata_cameras.values()] + # Previous frame's rotation for each object in animdata_ob, this will be updated each frame. + animdata_ob_p_rots = p_rots.values() + + # Iterate through each frame and yield the values for that frame. + # Iterating .data, the memoryview of an array, is faster than iterating the array directly. + for int_currframe, subframe in zip(int_currframes.data, subframes.data): + scene.frame_set(int_currframe, subframe=subframe) + + if has_animated_duplis: + # Changing the scene's frame invalidates existing dupli instances. To get the updated matrices of duplis + # for this frame, we must get the duplis from the depsgraph again. + for dup in depsgraph.object_instances: + if (parent := dup.parent) and parent.original in dupli_parent_bdata: + # ObjectWrapper caches its instances. Attempting to create a new instance updates the existing + # ObjectWrapper instance with the current frame's matrix and then returns the existing instance. + ObjectWrapper(dup) + next_p_rots = [] + for ob_obj, p_rot in zip(animdata_ob, animdata_ob_p_rots): + # We compute baked loc/rot/scale for all objects (rot being euler-compat with previous value!). + loc, rot, scale, _m, _mr = ob_obj.fbx_object_tx(scene_data, rot_euler_compat=p_rot) + next_p_rots.append(rot) + yield from loc + yield from rot + yield from scale + animdata_ob_p_rots = next_p_rots + for shape in animdata_shapes_only: + yield shape.value + for camera in animdata_cameras_only: + yield camera.lens + yield camera.dof.focus_distance + + # Providing `count` to np.fromiter pre-allocates the array, avoiding extra memory allocations while iterating. + num_ob_values = len(animdata_ob) * 9 # Location, rotation and scale, each of which have x, y, and z components + num_shape_values = len(animdata_shapes) # Only 1 value per shape key + num_camera_values = len(animdata_cameras) * 2 # Focal length (`.lens`) and focus distance + num_values_per_frame = num_ob_values + num_shape_values + num_camera_values + num_frames = len(real_currframes) + all_values_flat = np.fromiter(frame_values_gen(), dtype=float, count=num_frames * num_values_per_frame) + + # Restore the scene's current frame. + scene.frame_set(back_currframe, subframe=0.0) + + # View such that each column is all values for a single frame and each row is all values for a single curve. + all_values = all_values_flat.reshape(num_frames, num_values_per_frame).T + # Split into views of the arrays for each curve type. + split_at = [num_ob_values, num_shape_values, num_camera_values] + # For unequal sized splits, np.split takes indices to split at, which can be acquired through a cumulative sum + # across the list. + # The last value isn't needed, because the last split is assumed to go to the end of the array. + split_at = np.cumsum(split_at[:-1]) + all_ob_values, all_shape_key_values, all_camera_values = np.split(all_values, split_at) + + all_anims = [] + + # Set location/rotation/scale curves. + # Split into equal sized views of the arrays for each object. + split_into = len(animdata_ob) + per_ob_values = np.split(all_ob_values, split_into) if split_into > 0 else () + for anims, ob_values in zip(animdata_ob.values(), per_ob_values): + # Split again into equal sized views of the location, rotation and scaling arrays. + loc_xyz, rot_xyz, sca_xyz = np.split(ob_values, 3) + # In-place convert from Blender rotation to FBX rotation. + np.rad2deg(rot_xyz, out=rot_xyz) + + anim_loc, anim_rot, anim_scale = anims + anim_loc.set_keyframes(real_currframes, loc_xyz) + anim_rot.set_keyframes(real_currframes, rot_xyz) + anim_scale.set_keyframes(real_currframes, sca_xyz) + all_anims.extend(anims) + + # Set shape key curves. + # There's only one array per shape key, so there's no need to split `all_shape_key_values`. + for (anim_shape, _me, _shape), shape_key_values in zip(animdata_shapes.values(), all_shape_key_values): + # In-place convert from Blender Shape Key Value to FBX Deform Percent. + shape_key_values *= 100.0 + anim_shape.set_keyframes(real_currframes, shape_key_values) + all_anims.append(anim_shape) + + # Set camera curves. + # Split into equal sized views of the arrays for each camera. + split_into = len(animdata_cameras) + per_camera_values = np.split(all_camera_values, split_into) if split_into > 0 else () + zipped = zip(animdata_cameras.values(), per_camera_values) + for (anim_camera_lens, anim_camera_focus_distance, _camera), (lens_values, focus_distance_values) in zipped: + # In-place convert from Blender focus distance to FBX. + focus_distance_values *= (1000 * gscale) + anim_camera_lens.set_keyframes(real_currframes, lens_values) + anim_camera_focus_distance.set_keyframes(real_currframes, focus_distance_values) + all_anims.append(anim_camera_lens) + all_anims.append(anim_camera_focus_distance) + + animations = {} + + # And now, produce final data (usable by FBX export code) + for anim in all_anims: + anim.simplify(simplify_fac, bake_step, force_keep) + if not anim: + continue + for obj_key, group_key, group, fbx_group, fbx_gname in anim.get_final_data(scene, ref_id, force_keep): + anim_data = animations.setdefault(obj_key, ("dummy_unused_key", {})) + anim_data[1][fbx_group] = (group_key, group, fbx_gname) + + astack_key = get_blender_anim_stack_key(scene, ref_id) + alayer_key = get_blender_anim_layer_key(scene, ref_id) + name = (get_blenderID_name(ref_id) if ref_id else scene.name).encode() + + if start_zero: + f_end -= f_start + f_start = 0.0 + + return (astack_key, animations, alayer_key, name, f_start, f_end) if animations else None + + +def fbx_animations(scene_data): + """ + Generate global animation data from objects. + """ + scene = scene_data.scene + animations = [] + animated = set() + frame_start = 1e100 + frame_end = -1e100 + + def add_anim(animations, animated, anim): + nonlocal frame_start, frame_end + if anim is not None: + animations.append(anim) + f_start, f_end = anim[4:6] + if f_start < frame_start: + frame_start = f_start + if f_end > frame_end: + frame_end = f_end + + _astack_key, astack, _alayer_key, _name, _fstart, _fend = anim + for elem_key, (alayer_key, acurvenodes) in astack.items(): + for fbx_prop, (acurvenode_key, acurves, acurvenode_name) in acurvenodes.items(): + animated.add((elem_key, fbx_prop)) + + # Per-NLA strip animstacks. + if scene_data.settings.bake_anim_use_nla_strips: + strips = [] + ob_actions = [] + for ob_obj in scene_data.objects: + # NLA tracks only for objects, not bones! + if not ob_obj.is_object: + continue + ob = ob_obj.bdata # Back to real Blender Object. + if not ob.animation_data: + continue + + # Some actions are read-only, one cause is being in NLA tweakmode + restore_use_tweak_mode = ob.animation_data.use_tweak_mode + if ob.animation_data.is_property_readonly('action'): + ob.animation_data.use_tweak_mode = False + + # We have to remove active action from objects, it overwrites strips actions otherwise... + ob_actions.append((ob, ob.animation_data.action, restore_use_tweak_mode)) + ob.animation_data.action = None + for track in ob.animation_data.nla_tracks: + if track.mute: + continue + for strip in track.strips: + if strip.mute: + continue + strips.append(strip) + strip.mute = True + + for strip in strips: + strip.mute = False + add_anim(animations, animated, + fbx_animations_do(scene_data, strip, strip.frame_start, strip.frame_end, True, force_keep=True)) + strip.mute = True + scene.frame_set(scene.frame_current, subframe=0.0) + + for strip in strips: + strip.mute = False + + for ob, ob_act, restore_use_tweak_mode in ob_actions: + ob.animation_data.action = ob_act + ob.animation_data.use_tweak_mode = restore_use_tweak_mode + + # All actions. + if scene_data.settings.bake_anim_use_all_actions: + def validate_actions(act, path_resolve): + for fc in act.fcurves: + data_path = fc.data_path + if fc.array_index: + data_path = data_path + "[%d]" % fc.array_index + try: + path_resolve(data_path) + except ValueError: + return False # Invalid. + return True # Valid. + + def restore_object(ob_to, ob_from): + # Restore org state of object (ugh :/ ). + props = ( + 'location', 'rotation_quaternion', 'rotation_axis_angle', 'rotation_euler', 'rotation_mode', 'scale', + 'delta_location', 'delta_rotation_euler', 'delta_rotation_quaternion', 'delta_scale', + 'lock_location', 'lock_rotation', 'lock_rotation_w', 'lock_rotations_4d', 'lock_scale', + 'tag', 'track_axis', 'up_axis', 'active_material', 'active_material_index', + 'matrix_parent_inverse', 'empty_display_type', 'empty_display_size', 'empty_image_offset', 'pass_index', + 'color', 'hide_viewport', 'hide_select', 'hide_render', 'instance_type', + 'use_instance_vertices_rotation', 'use_instance_faces_scale', 'instance_faces_scale', + 'display_type', 'show_bounds', 'display_bounds_type', 'show_name', 'show_axis', 'show_texture_space', + 'show_wire', 'show_all_edges', 'show_transparent', 'show_in_front', + 'show_only_shape_key', 'use_shape_key_edit_mode', 'active_shape_key_index', + ) + for p in props: + if not ob_to.is_property_readonly(p): + setattr(ob_to, p, getattr(ob_from, p)) + + for ob_obj in scene_data.objects: + # Actions only for objects, not bones! + if not ob_obj.is_object: + continue + + ob = ob_obj.bdata # Back to real Blender Object. + + if not ob.animation_data: + continue # Do not export animations for objects that are absolutely not animated, see T44386. + + if ob.animation_data.is_property_readonly('action'): + continue # Cannot re-assign 'active action' to this object (usually related to NLA usage, see T48089). + + # We can't play with animdata and actions and get back to org state easily. + # So we have to add a temp copy of the object to the scene, animate it, and remove it... :/ + ob_copy = ob.copy() + # Great, have to handle bones as well if needed... + pbones_matrices = [pbo.matrix_basis.copy() for pbo in ob.pose.bones] if ob.type == 'ARMATURE' else ... + + org_act = ob.animation_data.action + path_resolve = ob.path_resolve + + for act in bpy.data.actions: + # For now, *all* paths in the action must be valid for the object, to validate the action. + # Unless that action was already assigned to the object! + if act != org_act and not validate_actions(act, path_resolve): + continue + ob.animation_data.action = act + frame_start, frame_end = act.frame_range # sic! + add_anim(animations, animated, + fbx_animations_do(scene_data, (ob, act), frame_start, frame_end, True, + objects={ob_obj}, force_keep=True)) + # Ugly! :/ + if pbones_matrices is not ...: + for pbo, mat in zip(ob.pose.bones, pbones_matrices): + pbo.matrix_basis = mat.copy() + ob.animation_data.action = org_act + restore_object(ob, ob_copy) + scene.frame_set(scene.frame_current, subframe=0.0) + + if pbones_matrices is not ...: + for pbo, mat in zip(ob.pose.bones, pbones_matrices): + pbo.matrix_basis = mat.copy() + ob.animation_data.action = org_act + + bpy.data.objects.remove(ob_copy) + scene.frame_set(scene.frame_current, subframe=0.0) + + # Global (containing everything) animstack, only if not exporting NLA strips and/or all actions. + if not scene_data.settings.bake_anim_use_nla_strips and not scene_data.settings.bake_anim_use_all_actions: + add_anim(animations, animated, fbx_animations_do(scene_data, None, scene.frame_start, scene.frame_end, False)) + + # Be sure to update all matrices back to org state! + scene.frame_set(scene.frame_current, subframe=0.0) + + return animations, animated, frame_start, frame_end + + +def fbx_data_from_scene(scene, depsgraph, settings): + """ + Do some pre-processing over scene's data... + """ + objtypes = settings.object_types + dp_objtypes = objtypes - {'ARMATURE'} # Armatures are not supported as dupli instances currently... + perfmon = PerfMon() + perfmon.level_up() + + # ##### Gathering data... + + perfmon.step("FBX export prepare: Wrapping Objects...") + + # This is rather simple for now, maybe we could end generating templates with most-used values + # instead of default ones? + objects = {} # Because we do not have any ordered set... + for ob in settings.context_objects: + if ob.type not in objtypes: + continue + ob_obj = ObjectWrapper(ob) + objects[ob_obj] = None + # Duplis... + for dp_obj in ob_obj.dupli_list_gen(depsgraph): + if dp_obj.type not in dp_objtypes: + continue + objects[dp_obj] = None + + perfmon.step("FBX export prepare: Wrapping Data (lamps, cameras, empties)...") + + data_lights = {ob_obj.bdata.data: get_blenderID_key(ob_obj.bdata.data) + for ob_obj in objects if ob_obj.type == 'LIGHT'} + # Unfortunately, FBX camera data contains object-level data (like position, orientation, etc.)... + data_cameras = {ob_obj: get_blenderID_key(ob_obj.bdata.data) + for ob_obj in objects if ob_obj.type == 'CAMERA'} + # Yep! Contains nothing, but needed! + data_empties = {ob_obj: get_blender_empty_key(ob_obj.bdata) + for ob_obj in objects if ob_obj.type == 'EMPTY'} + + perfmon.step("FBX export prepare: Wrapping Meshes...") + + data_meshes = {} + for ob_obj in objects: + if ob_obj.type not in BLENDER_OBJECT_TYPES_MESHLIKE: + continue + ob = ob_obj.bdata + org_ob_obj = None + + # Do not want to systematically recreate a new mesh for dupliobject instances, kind of break purpose of those. + if ob_obj.is_dupli: + org_ob_obj = ObjectWrapper(ob) # We get the "real" object wrapper from that dupli instance. + if org_ob_obj in data_meshes: + data_meshes[ob_obj] = data_meshes[org_ob_obj] + continue + + # There are 4 different cases for what we need to do with the original data of each Object: + # 1) The original data can be used without changes. + # 2) A copy of the original data needs to be made. + # - If an export option modifies the data, e.g. Triangulate Faces is enabled. + # - If the Object has Object-linked materials. This is because our current mapping of materials to FBX requires + # that multiple Objects sharing a single mesh must have the same materials. + # 3) The Object needs to be converted to a mesh. + # - All mesh-like Objects that are not meshes need to be converted to a mesh in order to be exported. + # 4) The Object needs to be evaluated and then converted to a mesh. + # - Whenever use_mesh_modifiers is enabled and either there are modifiers to apply or the Object needs to be + # converted to a mesh. + # If multiple cases apply to an Object, then only the last applicable case is relevant. + do_copy = any(ms.link == 'OBJECT' for ms in ob.material_slots) or settings.use_triangles + do_convert = ob.type in BLENDER_OTHER_OBJECT_TYPES + do_evaluate = do_convert and settings.use_mesh_modifiers + + # If the Object is a mesh, and we're applying modifiers, check if there are actually any modifiers to apply. + # If there are then the mesh will need to be evaluated, and we may need to make some temporary changes to the + # modifiers or scene before the mesh is evaluated. + backup_pose_positions = [] + tmp_mods = [] + if ob.type == 'MESH' and settings.use_mesh_modifiers: + # No need to create a new mesh in this case, if no modifier is active! + last_subsurf = None + for mod in ob.modifiers: + # For meshes, when armature export is enabled, disable Armature modifiers here! + # XXX Temp hacks here since currently we only have access to a viewport depsgraph... + # + # NOTE: We put armature to the rest pose instead of disabling it so we still + # have vertex groups in the evaluated mesh. + if mod.type == 'ARMATURE' and 'ARMATURE' in settings.object_types: + object = mod.object + if object and object.type == 'ARMATURE': + armature = object.data + # If armature is already in REST position, there's nothing to back-up + # This cuts down on export time dramatically, if all armatures are already in REST position + # by not triggering dependency graph update + if armature.pose_position != 'REST': + backup_pose_positions.append((armature, armature.pose_position)) + armature.pose_position = 'REST' + elif mod.show_render or mod.show_viewport: + # If exporting with subsurf collect the last Catmull-Clark subsurf modifier + # and disable it. We can use the original data as long as this is the first + # found applicable subsurf modifier. + if settings.use_subsurf and mod.type == 'SUBSURF' and mod.subdivision_type == 'CATMULL_CLARK': + if last_subsurf: + do_evaluate = True + last_subsurf = mod + else: + do_evaluate = True + if settings.use_subsurf and last_subsurf: + # XXX: When exporting with subsurf information temporarily disable + # the last subsurf modifier. + tmp_mods.append((last_subsurf, last_subsurf.show_render, last_subsurf.show_viewport)) + + if do_evaluate: + # If modifiers has been altered need to update dependency graph. + if backup_pose_positions or tmp_mods: + depsgraph.update() + ob_to_convert = ob.evaluated_get(depsgraph) + # NOTE: The dependency graph might be re-evaluating multiple times, which could + # potentially free the mesh created early on. So we put those meshes to bmain and + # free them afterwards. Not ideal but ensures correct ownership. + tmp_me = bpy.data.meshes.new_from_object( + ob_to_convert, preserve_all_data_layers=True, depsgraph=depsgraph) + + # Usually the materials of the evaluated object will be the same, but modifiers, such as Geometry Nodes, + # can change the materials. + orig_mats = tuple(slot.material for slot in ob.material_slots) + eval_mats = tuple(slot.material.original if slot.material else None + for slot in ob_to_convert.material_slots) + if orig_mats != eval_mats: + # Override the default behaviour of getting materials from ob_obj.bdata.material_slots. + ob_obj.override_materials = eval_mats + elif do_convert: + tmp_me = bpy.data.meshes.new_from_object(ob, preserve_all_data_layers=True, depsgraph=depsgraph) + elif do_copy: + # bpy.data.meshes.new_from_object removes shape keys (see #104714), so create a copy of the mesh instead. + tmp_me = ob.data.copy() + else: + tmp_me = None + + if tmp_me is None: + # Use the original data of this Object. + data_meshes[ob_obj] = (get_blenderID_key(ob.data), ob.data, False) + else: + # Triangulate the mesh if requested + if settings.use_triangles: + import bmesh + bm = bmesh.new() + bm.from_mesh(tmp_me) + bmesh.ops.triangulate(bm, faces=bm.faces) + bm.to_mesh(tmp_me) + bm.free() + # A temporary mesh was created for this Object, which should be deleted once the export is complete. + data_meshes[ob_obj] = (get_blenderID_key(tmp_me), tmp_me, True) + + # Change armatures back. + for armature, pose_position in backup_pose_positions: + print((armature, pose_position)) + armature.pose_position = pose_position + # Update now, so we don't leave modified state after last object was exported. + # Re-enable temporary disabled modifiers. + for mod, show_render, show_viewport in tmp_mods: + mod.show_render = show_render + mod.show_viewport = show_viewport + if backup_pose_positions or tmp_mods: + depsgraph.update() + + # In case "real" source object of that dupli did not yet still existed in data_meshes, create it now! + if org_ob_obj is not None: + data_meshes[org_ob_obj] = data_meshes[ob_obj] + + perfmon.step("FBX export prepare: Wrapping ShapeKeys...") + + # ShapeKeys. + data_deformers_shape = {} + geom_mat_co = settings.global_matrix if settings.bake_space_transform else None + co_bl_dtype = np.single + co_fbx_dtype = np.float64 + idx_fbx_dtype = np.int32 + + def empty_verts_fallbacks(): + """Create fallback arrays for when there are no verts""" + # FBX does not like empty shapes (makes Unity crash e.g.). + # To prevent this, we add a vertex that does nothing, but it keeps the shape key intact + single_vert_co = np.zeros((1, 3), dtype=co_fbx_dtype) + single_vert_idx = np.zeros(1, dtype=idx_fbx_dtype) + return single_vert_co, single_vert_idx + + for me_key, me, _free in data_meshes.values(): + if not (me.shape_keys and len(me.shape_keys.key_blocks) > 1): # We do not want basis-only relative skeys... + continue + if me in data_deformers_shape: + continue + + shapes_key = get_blender_mesh_shape_key(me) + + sk_base = me.shape_keys.key_blocks[0] + + # Get and cache only the cos that we need + @cache + def sk_cos(shape_key): + if shape_key == sk_base: + _cos = MESH_ATTRIBUTE_POSITION.to_ndarray(me.attributes) + else: + _cos = np.empty(len(me.vertices) * 3, dtype=co_bl_dtype) + shape_key.points.foreach_get("co", _cos) + return vcos_transformed(_cos, geom_mat_co, co_fbx_dtype) + + for shape in me.shape_keys.key_blocks[1:]: + # Only write vertices really different from base coordinates! + relative_key = shape.relative_key + if shape == relative_key: + # Shape is its own relative key, so it does nothing + shape_verts_co, shape_verts_idx = empty_verts_fallbacks() + else: + sv_cos = sk_cos(shape) + ref_cos = sk_cos(shape.relative_key) + + # Exclude cos similar to ref_cos and get the indices of the cos that remain + shape_verts_co, shape_verts_idx = shape_difference_exclude_similar(sv_cos, ref_cos) + + if not shape_verts_co.size: + shape_verts_co, shape_verts_idx = empty_verts_fallbacks() + else: + # Ensure the indices are of the correct type + shape_verts_idx = astype_view_signedness(shape_verts_idx, idx_fbx_dtype) + + channel_key, geom_key = get_blender_mesh_shape_channel_key(me, shape) + data = (channel_key, geom_key, shape_verts_co, shape_verts_idx) + data_deformers_shape.setdefault(me, (me_key, shapes_key, {}))[2][shape] = data + + del sk_cos + + perfmon.step("FBX export prepare: Wrapping Armatures...") + + # Armatures! + data_deformers_skin = {} + data_bones = {} + arm_parents = set() + for ob_obj in tuple(objects): + if not (ob_obj.is_object and ob_obj.type in {'ARMATURE'}): + continue + fbx_skeleton_from_armature(scene, settings, ob_obj, objects, data_meshes, + data_bones, data_deformers_skin, data_empties, arm_parents) + + # Generate leaf bones + data_leaf_bones = [] + if settings.add_leaf_bones: + data_leaf_bones = fbx_generate_leaf_bones(settings, data_bones) + + perfmon.step("FBX export prepare: Wrapping World...") + + # Some world settings are embedded in FBX materials... + if scene.world: + data_world = {scene.world: get_blenderID_key(scene.world)} + else: + data_world = {} + + perfmon.step("FBX export prepare: Wrapping Materials...") + + # TODO: Check all the material stuff works even when they are linked to Objects + # (we can then have the same mesh used with different materials...). + # *Should* work, as FBX always links its materials to Models (i.e. objects). + # XXX However, material indices would probably break... + data_materials = {} + for ob_obj in objects: + # If obj is not a valid object for materials, wrapper will just return an empty tuple... + for ma in ob_obj.materials: + if ma is None: + continue # Empty slots! + # Note theoretically, FBX supports any kind of materials, even GLSL shaders etc. + # However, I doubt anything else than Lambert/Phong is really portable! + # Note we want to keep a 'dummy' empty material even when we can't really support it, see T41396. + ma_data = data_materials.setdefault(ma, (get_blenderID_key(ma), [])) + ma_data[1].append(ob_obj) + + perfmon.step("FBX export prepare: Wrapping Textures...") + + # Note FBX textures also hold their mapping info. + # TODO: Support layers? + data_textures = {} + # FbxVideo also used to store static images... + data_videos = {} + # For now, do not use world textures, don't think they can be linked to anything FBX wise... + for ma in data_materials.keys(): + # Note: with nodal shaders, we'll could be generating much more textures, but that's kind of unavoidable, + # given that textures actually do not exist anymore in material context in Blender... + ma_wrap = node_shader_utils.PrincipledBSDFWrapper(ma, is_readonly=True) + for sock_name, fbx_name in PRINCIPLED_TEXTURE_SOCKETS_TO_FBX: + tex = getattr(ma_wrap, sock_name) + if tex is None or tex.image is None: + continue + blender_tex_key = (ma, sock_name) + data_textures[blender_tex_key] = (get_blender_nodetexture_key(*blender_tex_key), fbx_name) + + img = tex.image + vid_data = data_videos.setdefault(img, (get_blenderID_key(img), [])) + vid_data[1].append(blender_tex_key) + + perfmon.step("FBX export prepare: Wrapping Animations...") + + # Animation... + animations = () + animated = set() + frame_start = scene.frame_start + frame_end = scene.frame_end + if settings.bake_anim: + # From objects & bones only for a start. + # Kind of hack, we need a temp scene_data for object's space handling to bake animations... + tmp_scdata = FBXExportData( + None, None, None, + settings, scene, depsgraph, objects, None, None, 0.0, 0.0, + data_empties, data_lights, data_cameras, data_meshes, None, + data_bones, data_leaf_bones, data_deformers_skin, data_deformers_shape, + data_world, data_materials, data_textures, data_videos, + ) + animations, animated, frame_start, frame_end = fbx_animations(tmp_scdata) + + # ##### Creation of templates... + + perfmon.step("FBX export prepare: Generating templates...") + + templates = {} + templates[b"GlobalSettings"] = fbx_template_def_globalsettings(scene, settings, nbr_users=1) + + if data_empties: + templates[b"Null"] = fbx_template_def_null(scene, settings, nbr_users=len(data_empties)) + + if data_lights: + templates[b"Light"] = fbx_template_def_light(scene, settings, nbr_users=len(data_lights)) + + if data_cameras: + templates[b"Camera"] = fbx_template_def_camera(scene, settings, nbr_users=len(data_cameras)) + + if data_bones: + templates[b"Bone"] = fbx_template_def_bone(scene, settings, nbr_users=len(data_bones)) + + if data_meshes: + nbr = len({me_key for me_key, _me, _free in data_meshes.values()}) + if data_deformers_shape: + nbr += sum(len(shapes[2]) for shapes in data_deformers_shape.values()) + templates[b"Geometry"] = fbx_template_def_geometry(scene, settings, nbr_users=nbr) + + if objects: + templates[b"Model"] = fbx_template_def_model(scene, settings, nbr_users=len(objects)) + + if arm_parents: + # Number of Pose|BindPose elements should be the same as number of meshes-parented-to-armatures + templates[b"BindPose"] = fbx_template_def_pose(scene, settings, nbr_users=len(arm_parents)) + + if data_deformers_skin or data_deformers_shape: + nbr = 0 + if data_deformers_skin: + nbr += len(data_deformers_skin) + nbr += sum(len(clusters) for def_me in data_deformers_skin.values() for a, b, clusters in def_me.values()) + if data_deformers_shape: + nbr += len(data_deformers_shape) + nbr += sum(len(shapes[2]) for shapes in data_deformers_shape.values()) + assert(nbr != 0) + templates[b"Deformers"] = fbx_template_def_deformer(scene, settings, nbr_users=nbr) + + # No world support in FBX... + """ + if data_world: + templates[b"World"] = fbx_template_def_world(scene, settings, nbr_users=len(data_world)) + """ + + if data_materials: + templates[b"Material"] = fbx_template_def_material(scene, settings, nbr_users=len(data_materials)) + + if data_textures: + templates[b"TextureFile"] = fbx_template_def_texture_file(scene, settings, nbr_users=len(data_textures)) + + if data_videos: + templates[b"Video"] = fbx_template_def_video(scene, settings, nbr_users=len(data_videos)) + + if animations: + nbr_astacks = len(animations) + nbr_acnodes = 0 + nbr_acurves = 0 + for _astack_key, astack, _al, _n, _fs, _fe in animations: + for _alayer_key, alayer in astack.values(): + for _acnode_key, acnode, _acnode_name in alayer.values(): + nbr_acnodes += 1 + for _acurve_key, _dval, (keys, _values), acurve_valid in acnode.values(): + if len(keys): + nbr_acurves += 1 + + templates[b"AnimationStack"] = fbx_template_def_animstack(scene, settings, nbr_users=nbr_astacks) + # Would be nice to have one layer per animated object, but this seems tricky and not that well supported. + # So for now, only one layer per anim stack. + templates[b"AnimationLayer"] = fbx_template_def_animlayer(scene, settings, nbr_users=nbr_astacks) + templates[b"AnimationCurveNode"] = fbx_template_def_animcurvenode(scene, settings, nbr_users=nbr_acnodes) + templates[b"AnimationCurve"] = fbx_template_def_animcurve(scene, settings, nbr_users=nbr_acurves) + + templates_users = sum(tmpl.nbr_users for tmpl in templates.values()) + + # ##### Creation of connections... + + perfmon.step("FBX export prepare: Generating Connections...") + + connections = [] + + # Objects (with classical parenting). + for ob_obj in objects: + # Bones are handled later. + if not ob_obj.is_bone: + par_obj = ob_obj.parent + # Meshes parented to armature are handled separately, yet we want the 'no parent' connection (0). + if par_obj and ob_obj.has_valid_parent(objects) and (par_obj, ob_obj) not in arm_parents: + connections.append((b"OO", ob_obj.fbx_uuid, par_obj.fbx_uuid, None)) + else: + connections.append((b"OO", ob_obj.fbx_uuid, 0, None)) + + # Armature & Bone chains. + for bo_obj in data_bones.keys(): + par_obj = bo_obj.parent + if par_obj not in objects: + continue + connections.append((b"OO", bo_obj.fbx_uuid, par_obj.fbx_uuid, None)) + + # Object data. + for ob_obj in objects: + if ob_obj.is_bone: + bo_data_key = data_bones[ob_obj] + connections.append((b"OO", get_fbx_uuid_from_key(bo_data_key), ob_obj.fbx_uuid, None)) + else: + if ob_obj.type == 'LIGHT': + light_key = data_lights[ob_obj.bdata.data] + connections.append((b"OO", get_fbx_uuid_from_key(light_key), ob_obj.fbx_uuid, None)) + elif ob_obj.type == 'CAMERA': + cam_key = data_cameras[ob_obj] + connections.append((b"OO", get_fbx_uuid_from_key(cam_key), ob_obj.fbx_uuid, None)) + elif ob_obj.type == 'EMPTY' or ob_obj.type == 'ARMATURE': + empty_key = data_empties[ob_obj] + connections.append((b"OO", get_fbx_uuid_from_key(empty_key), ob_obj.fbx_uuid, None)) + elif ob_obj.type in BLENDER_OBJECT_TYPES_MESHLIKE: + mesh_key, _me, _free = data_meshes[ob_obj] + connections.append((b"OO", get_fbx_uuid_from_key(mesh_key), ob_obj.fbx_uuid, None)) + + # Leaf Bones + for (_node_name, par_uuid, node_uuid, attr_uuid, _matrix, _hide, _size) in data_leaf_bones: + connections.append((b"OO", node_uuid, par_uuid, None)) + connections.append((b"OO", attr_uuid, node_uuid, None)) + + # 'Shape' deformers (shape keys, only for meshes currently)... + for me_key, shapes_key, shapes in data_deformers_shape.values(): + # shape -> geometry + connections.append((b"OO", get_fbx_uuid_from_key(shapes_key), get_fbx_uuid_from_key(me_key), None)) + for channel_key, geom_key, _shape_verts_co, _shape_verts_idx in shapes.values(): + # shape channel -> shape + connections.append((b"OO", get_fbx_uuid_from_key(channel_key), get_fbx_uuid_from_key(shapes_key), None)) + # geometry (keys) -> shape channel + connections.append((b"OO", get_fbx_uuid_from_key(geom_key), get_fbx_uuid_from_key(channel_key), None)) + + # 'Skin' deformers (armature-to-geometry, only for meshes currently)... + for arm, deformed_meshes in data_deformers_skin.items(): + for me, (skin_key, ob_obj, clusters) in deformed_meshes.items(): + # skin -> geometry + mesh_key, _me, _free = data_meshes[ob_obj] + assert(me == _me) + connections.append((b"OO", get_fbx_uuid_from_key(skin_key), get_fbx_uuid_from_key(mesh_key), None)) + for bo_obj, clstr_key in clusters.items(): + # cluster -> skin + connections.append((b"OO", get_fbx_uuid_from_key(clstr_key), get_fbx_uuid_from_key(skin_key), None)) + # bone -> cluster + connections.append((b"OO", bo_obj.fbx_uuid, get_fbx_uuid_from_key(clstr_key), None)) + + # Materials + mesh_material_indices = {} + _objs_indices = {} + for ma, (ma_key, ob_objs) in data_materials.items(): + for ob_obj in ob_objs: + connections.append((b"OO", get_fbx_uuid_from_key(ma_key), ob_obj.fbx_uuid, None)) + # Get index of this material for this object (or dupliobject). + # Material indices for mesh faces are determined by their order in 'ma to ob' connections. + # Only materials for meshes currently... + # Note in case of dupliobjects a same me/ma idx will be generated several times... + # Should not be an issue in practice, and it's needed in case we export duplis but not the original! + if ob_obj.type not in BLENDER_OBJECT_TYPES_MESHLIKE: + continue + _mesh_key, me, _free = data_meshes[ob_obj] + idx = _objs_indices[ob_obj] = _objs_indices.get(ob_obj, -1) + 1 + # XXX If a mesh has multiple material slots with the same material, they are combined into one slot. + # Even if duplicate materials were exported without combining them into one slot, keeping duplicate + # materials separated does not appear to be common behaviour of external software when importing FBX. + mesh_material_indices.setdefault(me, {})[ma] = idx + del _objs_indices + + # Textures + for (ma, sock_name), (tex_key, fbx_prop) in data_textures.items(): + ma_key, _ob_objs = data_materials[ma] + # texture -> material properties + connections.append((b"OP", get_fbx_uuid_from_key(tex_key), get_fbx_uuid_from_key(ma_key), fbx_prop)) + + # Images + for vid, (vid_key, blender_tex_keys) in data_videos.items(): + for blender_tex_key in blender_tex_keys: + tex_key, _fbx_prop = data_textures[blender_tex_key] + connections.append((b"OO", get_fbx_uuid_from_key(vid_key), get_fbx_uuid_from_key(tex_key), None)) + + # Animations + for astack_key, astack, alayer_key, _name, _fstart, _fend in animations: + # Animstack itself is linked nowhere! + astack_id = get_fbx_uuid_from_key(astack_key) + # For now, only one layer! + alayer_id = get_fbx_uuid_from_key(alayer_key) + connections.append((b"OO", alayer_id, astack_id, None)) + for elem_key, (alayer_key, acurvenodes) in astack.items(): + elem_id = get_fbx_uuid_from_key(elem_key) + # Animlayer -> animstack. + # alayer_id = get_fbx_uuid_from_key(alayer_key) + # connections.append((b"OO", alayer_id, astack_id, None)) + for fbx_prop, (acurvenode_key, acurves, acurvenode_name) in acurvenodes.items(): + # Animcurvenode -> animalayer. + acurvenode_id = get_fbx_uuid_from_key(acurvenode_key) + connections.append((b"OO", acurvenode_id, alayer_id, None)) + # Animcurvenode -> object property. + connections.append((b"OP", acurvenode_id, elem_id, fbx_prop.encode())) + for fbx_item, (acurve_key, default_value, (keys, values), acurve_valid) in acurves.items(): + if len(keys): + # Animcurve -> Animcurvenode. + connections.append((b"OP", get_fbx_uuid_from_key(acurve_key), acurvenode_id, fbx_item.encode())) + + perfmon.level_down() + + # ##### And pack all this! + + return FBXExportData( + templates, templates_users, connections, + settings, scene, depsgraph, objects, animations, animated, frame_start, frame_end, + data_empties, data_lights, data_cameras, data_meshes, mesh_material_indices, + data_bones, data_leaf_bones, data_deformers_skin, data_deformers_shape, + data_world, data_materials, data_textures, data_videos, + ) + + +def fbx_scene_data_cleanup(scene_data): + """ + Some final cleanup... + """ + # Delete temp meshes. + done_meshes = set() + for me_key, me, free in scene_data.data_meshes.values(): + if free and me_key not in done_meshes: + bpy.data.meshes.remove(me) + done_meshes.add(me_key) + + +# ##### Top-level FBX elements generators. ##### + +def fbx_header_elements(root, scene_data, time=None): + """ + Write boiling code of FBX root. + time is expected to be a datetime.datetime object, or None (using now() in this case). + """ + app_vendor = "Blender Foundation" + app_name = "Blender (stable FBX IO)" + app_ver = bpy.app.version_string + + from . import bl_info + addon_ver = bl_info["version"] + del bl_info + + # ##### Start of FBXHeaderExtension element. + header_ext = elem_empty(root, b"FBXHeaderExtension") + + elem_data_single_int32(header_ext, b"FBXHeaderVersion", FBX_HEADER_VERSION) + + elem_data_single_int32(header_ext, b"FBXVersion", FBX_VERSION) + + # No encryption! + elem_data_single_int32(header_ext, b"EncryptionType", 0) + + if time is None: + time = datetime.datetime.now() + elem = elem_empty(header_ext, b"CreationTimeStamp") + elem_data_single_int32(elem, b"Version", 1000) + elem_data_single_int32(elem, b"Year", time.year) + elem_data_single_int32(elem, b"Month", time.month) + elem_data_single_int32(elem, b"Day", time.day) + elem_data_single_int32(elem, b"Hour", time.hour) + elem_data_single_int32(elem, b"Minute", time.minute) + elem_data_single_int32(elem, b"Second", time.second) + elem_data_single_int32(elem, b"Millisecond", time.microsecond // 1000) + + elem_data_single_string_unicode(header_ext, b"Creator", "%s - %s - %d.%d.%d" + % (app_name, app_ver, addon_ver[0], addon_ver[1], addon_ver[2])) + + # 'SceneInfo' seems mandatory to get a valid FBX file... + # TODO use real values! + # XXX Should we use scene.name.encode() here? + scene_info = elem_data_single_string(header_ext, b"SceneInfo", fbx_name_class(b"GlobalInfo", b"SceneInfo")) + scene_info.add_string(b"UserData") + elem_data_single_string(scene_info, b"Type", b"UserData") + elem_data_single_int32(scene_info, b"Version", FBX_SCENEINFO_VERSION) + meta_data = elem_empty(scene_info, b"MetaData") + elem_data_single_int32(meta_data, b"Version", FBX_SCENEINFO_VERSION) + elem_data_single_string(meta_data, b"Title", b"") + elem_data_single_string(meta_data, b"Subject", b"") + elem_data_single_string(meta_data, b"Author", b"") + elem_data_single_string(meta_data, b"Keywords", b"") + elem_data_single_string(meta_data, b"Revision", b"") + elem_data_single_string(meta_data, b"Comment", b"") + + props = elem_properties(scene_info) + elem_props_set(props, "p_string_url", b"DocumentUrl", "/foobar.fbx") + elem_props_set(props, "p_string_url", b"SrcDocumentUrl", "/foobar.fbx") + original = elem_props_compound(props, b"Original") + original("p_string", b"ApplicationVendor", app_vendor) + original("p_string", b"ApplicationName", app_name) + original("p_string", b"ApplicationVersion", app_ver) + original("p_datetime", b"DateTime_GMT", "01/01/1970 00:00:00.000") + original("p_string", b"FileName", "/foobar.fbx") + lastsaved = elem_props_compound(props, b"LastSaved") + lastsaved("p_string", b"ApplicationVendor", app_vendor) + lastsaved("p_string", b"ApplicationName", app_name) + lastsaved("p_string", b"ApplicationVersion", app_ver) + lastsaved("p_datetime", b"DateTime_GMT", "01/01/1970 00:00:00.000") + original("p_string", b"ApplicationNativeFile", bpy.data.filepath) + + # ##### End of FBXHeaderExtension element. + + # FileID is replaced by dummy value currently... + elem_data_single_bytes(root, b"FileId", b"FooBar") + + # CreationTime is replaced by dummy value currently, but anyway... + elem_data_single_string_unicode(root, b"CreationTime", + "{:04}-{:02}-{:02} {:02}:{:02}:{:02}:{:03}" + "".format(time.year, time.month, time.day, time.hour, time.minute, time.second, + time.microsecond * 1000)) + + elem_data_single_string_unicode(root, b"Creator", "%s - %s - %d.%d.%d" + % (app_name, app_ver, addon_ver[0], addon_ver[1], addon_ver[2])) + + # ##### Start of GlobalSettings element. + global_settings = elem_empty(root, b"GlobalSettings") + scene = scene_data.scene + + elem_data_single_int32(global_settings, b"Version", 1000) + + props = elem_properties(global_settings) + up_axis, front_axis, coord_axis = RIGHT_HAND_AXES[scene_data.settings.to_axes] + # ~ # DO NOT take into account global scale here! That setting is applied to object transformations during export + # ~ # (in other words, this is pure blender-exporter feature, and has nothing to do with FBX data). + # ~ if scene_data.settings.apply_unit_scale: + # ~ # Unit scaling is applied to objects' scale, so our unit is effectively FBX one (centimeter). + # ~ scale_factor_org = 1.0 + # ~ scale_factor = 1.0 / units_blender_to_fbx_factor(scene) + # ~ else: + # ~ scale_factor_org = units_blender_to_fbx_factor(scene) + # ~ scale_factor = scale_factor_org + scale_factor = scale_factor_org = scene_data.settings.unit_scale + elem_props_set(props, "p_integer", b"UpAxis", up_axis[0]) + elem_props_set(props, "p_integer", b"UpAxisSign", up_axis[1]) + elem_props_set(props, "p_integer", b"FrontAxis", front_axis[0]) + elem_props_set(props, "p_integer", b"FrontAxisSign", front_axis[1]) + elem_props_set(props, "p_integer", b"CoordAxis", coord_axis[0]) + elem_props_set(props, "p_integer", b"CoordAxisSign", coord_axis[1]) + elem_props_set(props, "p_integer", b"OriginalUpAxis", -1) + elem_props_set(props, "p_integer", b"OriginalUpAxisSign", 1) + elem_props_set(props, "p_double", b"UnitScaleFactor", scale_factor) + elem_props_set(props, "p_double", b"OriginalUnitScaleFactor", scale_factor_org) + elem_props_set(props, "p_color_rgb", b"AmbientColor", (0.0, 0.0, 0.0)) + elem_props_set(props, "p_string", b"DefaultCamera", "Producer Perspective") + + # Global timing data. + r = scene.render + _, fbx_fps_mode = FBX_FRAMERATES[0] # Custom framerate. + fbx_fps = fps = r.fps / r.fps_base + for ref_fps, fps_mode in FBX_FRAMERATES: + if similar_values(fps, ref_fps): + fbx_fps = ref_fps + fbx_fps_mode = fps_mode + break + elem_props_set(props, "p_enum", b"TimeMode", fbx_fps_mode) + elem_props_set(props, "p_timestamp", b"TimeSpanStart", 0) + elem_props_set(props, "p_timestamp", b"TimeSpanStop", FBX_KTIME) + elem_props_set(props, "p_double", b"CustomFrameRate", fbx_fps) + + # ##### End of GlobalSettings element. + + +def fbx_documents_elements(root, scene_data): + """ + Write 'Document' part of FBX root. + Seems like FBX support multiple documents, but until I find examples of such, we'll stick to single doc! + time is expected to be a datetime.datetime object, or None (using now() in this case). + """ + name = scene_data.scene.name + + # ##### Start of Documents element. + docs = elem_empty(root, b"Documents") + + elem_data_single_int32(docs, b"Count", 1) + + doc_uid = get_fbx_uuid_from_key("__FBX_Document__" + name) + doc = elem_data_single_int64(docs, b"Document", doc_uid) + doc.add_string_unicode(name) + doc.add_string_unicode(name) + + props = elem_properties(doc) + elem_props_set(props, "p_object", b"SourceObject") + elem_props_set(props, "p_string", b"ActiveAnimStackName", "") + + # XXX Some kind of ID? Offset? + # Anyway, as long as we have only one doc, probably not an issue. + elem_data_single_int64(doc, b"RootNode", 0) + + +def fbx_references_elements(root, scene_data): + """ + Have no idea what references are in FBX currently... Just writing empty element. + """ + docs = elem_empty(root, b"References") + + +def fbx_definitions_elements(root, scene_data): + """ + Templates definitions. Only used by Objects data afaik (apart from dummy GlobalSettings one). + """ + definitions = elem_empty(root, b"Definitions") + + elem_data_single_int32(definitions, b"Version", FBX_TEMPLATES_VERSION) + elem_data_single_int32(definitions, b"Count", scene_data.templates_users) + + fbx_templates_generate(definitions, scene_data.templates) + + +def fbx_objects_elements(root, scene_data): + """ + Data (objects, geometry, material, textures, armatures, etc.). + """ + perfmon = PerfMon() + perfmon.level_up() + objects = elem_empty(root, b"Objects") + + perfmon.step("FBX export fetch empties (%d)..." % len(scene_data.data_empties)) + + for empty in scene_data.data_empties: + fbx_data_empty_elements(objects, empty, scene_data) + + perfmon.step("FBX export fetch lamps (%d)..." % len(scene_data.data_lights)) + + for lamp in scene_data.data_lights: + fbx_data_light_elements(objects, lamp, scene_data) + + perfmon.step("FBX export fetch cameras (%d)..." % len(scene_data.data_cameras)) + + for cam in scene_data.data_cameras: + fbx_data_camera_elements(objects, cam, scene_data) + + perfmon.step("FBX export fetch meshes (%d)..." + % len({me_key for me_key, _me, _free in scene_data.data_meshes.values()})) + + done_meshes = set() + for me_obj in scene_data.data_meshes: + fbx_data_mesh_elements(objects, me_obj, scene_data, done_meshes) + del done_meshes + + perfmon.step("FBX export fetch objects (%d)..." % len(scene_data.objects)) + + for ob_obj in scene_data.objects: + if ob_obj.is_dupli: + continue + fbx_data_object_elements(objects, ob_obj, scene_data) + for dp_obj in ob_obj.dupli_list_gen(scene_data.depsgraph): + if dp_obj not in scene_data.objects: + continue + fbx_data_object_elements(objects, dp_obj, scene_data) + + perfmon.step("FBX export fetch remaining...") + + for ob_obj in scene_data.objects: + if not (ob_obj.is_object and ob_obj.type == 'ARMATURE'): + continue + fbx_data_armature_elements(objects, ob_obj, scene_data) + + if scene_data.data_leaf_bones: + fbx_data_leaf_bone_elements(objects, scene_data) + + for ma in scene_data.data_materials: + fbx_data_material_elements(objects, ma, scene_data) + + for blender_tex_key in scene_data.data_textures: + fbx_data_texture_file_elements(objects, blender_tex_key, scene_data) + + for vid in scene_data.data_videos: + fbx_data_video_elements(objects, vid, scene_data) + + perfmon.step("FBX export fetch animations...") + start_time = time.process_time() + + fbx_data_animation_elements(objects, scene_data) + + perfmon.level_down() + + +def fbx_connections_elements(root, scene_data): + """ + Relations between Objects (which material uses which texture, and so on). + """ + connections = elem_empty(root, b"Connections") + + for c in scene_data.connections: + elem_connection(connections, *c) + + +def fbx_takes_elements(root, scene_data): + """ + Animations. + """ + # XXX Pretty sure takes are no more needed... + takes = elem_empty(root, b"Takes") + elem_data_single_string(takes, b"Current", b"") + + animations = scene_data.animations + for astack_key, animations, alayer_key, name, f_start, f_end in animations: + scene = scene_data.scene + fps = scene.render.fps / scene.render.fps_base + start_ktime = int(convert_sec_to_ktime(f_start / fps)) + end_ktime = int(convert_sec_to_ktime(f_end / fps)) + + take = elem_data_single_string(takes, b"Take", name) + elem_data_single_string(take, b"FileName", name + b".tak") + take_loc_time = elem_data_single_int64(take, b"LocalTime", start_ktime) + take_loc_time.add_int64(end_ktime) + take_ref_time = elem_data_single_int64(take, b"ReferenceTime", start_ktime) + take_ref_time.add_int64(end_ktime) + + +# ##### "Main" functions. ##### + +# This func can be called with just the filepath +def save_single(operator, scene, depsgraph, filepath="", + global_matrix=Matrix(), + apply_unit_scale=False, + global_scale=1.0, + apply_scale_options='FBX_SCALE_NONE', + axis_up="Z", + axis_forward="Y", + context_objects=None, + object_types=None, + use_mesh_modifiers=True, + use_mesh_modifiers_render=True, + mesh_smooth_type='FACE', + use_subsurf=False, + use_armature_deform_only=False, + bake_anim=True, + bake_anim_use_all_bones=True, + bake_anim_use_nla_strips=True, + bake_anim_use_all_actions=True, + bake_anim_step=1.0, + bake_anim_simplify_factor=1.0, + bake_anim_force_startend_keying=True, + add_leaf_bones=False, + primary_bone_axis='Y', + secondary_bone_axis='X', + use_metadata=True, + path_mode='AUTO', + use_mesh_edges=True, + use_tspace=True, + use_triangles=False, + embed_textures=False, + use_custom_props=False, + bake_space_transform=False, + armature_nodetype='NULL', + colors_type='SRGB', + prioritize_active_color=False, + **kwargs + ): + + # Clear cached ObjectWrappers (just in case...). + ObjectWrapper.cache_clear() + + if object_types is None: + object_types = {'EMPTY', 'CAMERA', 'LIGHT', 'ARMATURE', 'MESH', 'OTHER'} + + if 'OTHER' in object_types: + object_types |= BLENDER_OTHER_OBJECT_TYPES + + # Default Blender unit is equivalent to meter, while FBX one is centimeter... + unit_scale = units_blender_to_fbx_factor(scene) if apply_unit_scale else 100.0 + if apply_scale_options == 'FBX_SCALE_NONE': + global_matrix = Matrix.Scale(unit_scale * global_scale, 4) @ global_matrix + unit_scale = 1.0 + elif apply_scale_options == 'FBX_SCALE_UNITS': + global_matrix = Matrix.Scale(global_scale, 4) @ global_matrix + elif apply_scale_options == 'FBX_SCALE_CUSTOM': + global_matrix = Matrix.Scale(unit_scale, 4) @ global_matrix + unit_scale = global_scale + else: # if apply_scale_options == 'FBX_SCALE_ALL': + unit_scale = global_scale * unit_scale + + global_scale = global_matrix.median_scale + global_matrix_inv = global_matrix.inverted() + # For transforming mesh normals. + global_matrix_inv_transposed = global_matrix_inv.transposed() + + # Only embed textures in COPY mode! + if embed_textures and path_mode != 'COPY': + embed_textures = False + + # Calculate bone correction matrix + bone_correction_matrix = None # Default is None = no change + bone_correction_matrix_inv = None + if (primary_bone_axis, secondary_bone_axis) != ('Y', 'X'): + from bpy_extras.io_utils import axis_conversion + bone_correction_matrix = axis_conversion(from_forward=secondary_bone_axis, + from_up=primary_bone_axis, + to_forward='X', + to_up='Y', + ).to_4x4() + bone_correction_matrix_inv = bone_correction_matrix.inverted() + + media_settings = FBXExportSettingsMedia( + path_mode, + os.path.dirname(bpy.data.filepath), # base_src + os.path.dirname(filepath), # base_dst + # Local dir where to put images (media), using FBX conventions. + os.path.splitext(os.path.basename(filepath))[0] + ".fbm", # subdir + embed_textures, + set(), # copy_set + set(), # embedded_set + ) + + settings = FBXExportSettings( + operator.report, (axis_up, axis_forward), global_matrix, global_scale, apply_unit_scale, unit_scale, + bake_space_transform, global_matrix_inv, global_matrix_inv_transposed, + context_objects, object_types, use_mesh_modifiers, use_mesh_modifiers_render, + mesh_smooth_type, use_subsurf, use_mesh_edges, use_tspace, use_triangles, + armature_nodetype, use_armature_deform_only, + add_leaf_bones, bone_correction_matrix, bone_correction_matrix_inv, + bake_anim, bake_anim_use_all_bones, bake_anim_use_nla_strips, bake_anim_use_all_actions, + bake_anim_step, bake_anim_simplify_factor, bake_anim_force_startend_keying, + False, media_settings, use_custom_props, colors_type, prioritize_active_color + ) + + import bpy_extras.io_utils + + print('\nFBX export starting... %r' % filepath) + start_time = time.process_time() + + # Generate some data about exported scene... + scene_data = fbx_data_from_scene(scene, depsgraph, settings) + + # Enable multithreaded array compression in FBXElem and wait until all threads are done before exiting the context + # manager. + with encode_bin.FBXElem.enable_multithreading_cm(): + # Writing elements into an FBX hierarchy can now begin. + root = elem_empty(None, b"") # Root element has no id, as it is not saved per se! + + # Mostly FBXHeaderExtension and GlobalSettings. + fbx_header_elements(root, scene_data) + + # Documents and References are pretty much void currently. + fbx_documents_elements(root, scene_data) + fbx_references_elements(root, scene_data) + + # Templates definitions. + fbx_definitions_elements(root, scene_data) + + # Actual data. + fbx_objects_elements(root, scene_data) + + # How data are inter-connected. + fbx_connections_elements(root, scene_data) + + # Animation. + fbx_takes_elements(root, scene_data) + + # Cleanup! + fbx_scene_data_cleanup(scene_data) + + # And we are done, all multithreaded tasks are complete, and we can write the whole thing to file! + encode_bin.write(filepath, root, FBX_VERSION) + + # Clear cached ObjectWrappers! + ObjectWrapper.cache_clear() + + # copy all collected files, if we did not embed them. + if not media_settings.embed_textures: + bpy_extras.io_utils.path_reference_copy(media_settings.copy_set) + + print('export finished in %.4f sec.' % (time.process_time() - start_time)) + return {'FINISHED'} + + +# defaults for applications, currently only unity but could add others. +def defaults_unity3d(): + return { + # These options seem to produce the same result as the old Ascii exporter in Unity3D: + "axis_up": 'Y', + "axis_forward": '-Z', + "global_matrix": Matrix.Rotation(-math.pi / 2.0, 4, 'X'), + # Should really be True, but it can cause problems if a model is already in a scene or prefab + # with the old transforms. + "bake_space_transform": False, + + "use_selection": False, + + "object_types": {'ARMATURE', 'EMPTY', 'MESH', 'OTHER'}, + "use_mesh_modifiers": True, + "use_mesh_modifiers_render": True, + "use_mesh_edges": False, + "mesh_smooth_type": 'FACE', + "colors_type": 'SRGB', + "use_subsurf": False, + "use_tspace": False, # XXX Why? Unity is expected to support tspace import... + "use_triangles": False, + + "use_armature_deform_only": True, + + "use_custom_props": True, + + "bake_anim": True, + "bake_anim_simplify_factor": 1.0, + "bake_anim_step": 1.0, + "bake_anim_use_nla_strips": True, + "bake_anim_use_all_actions": True, + "add_leaf_bones": False, # Avoid memory/performance cost for something only useful for modelling + "primary_bone_axis": 'Y', # Doesn't really matter for Unity, so leave unchanged + "secondary_bone_axis": 'X', + + "path_mode": 'AUTO', + "embed_textures": False, + "batch_mode": 'OFF', + } + + +def save(operator, context, + filepath="", + use_selection=False, + use_visible=False, + use_active_collection=False, + collection="", + batch_mode='OFF', + use_batch_own_dir=False, + **kwargs + ): + """ + This is a wrapper around save_single, which handles multi-scenes (or collections) cases, when batch-exporting + a whole .blend file. + """ + + ret = {'FINISHED'} + + active_object = context.view_layer.objects.active + + org_mode = None + if active_object and active_object.mode != 'OBJECT' and bpy.ops.object.mode_set.poll(): + org_mode = active_object.mode + bpy.ops.object.mode_set(mode='OBJECT') + + if batch_mode == 'OFF': + kwargs_mod = kwargs.copy() + + source_collection = None + if use_active_collection: + source_collection = context.view_layer.active_layer_collection.collection + elif collection: + local_collection = bpy.data.collections.get((collection, None)) + if local_collection: + source_collection = local_collection + else: + operator.report({'ERROR'}, "Collection '%s' was not found" % collection) + return {'CANCELLED'} + + if source_collection: + if use_selection: + ctx_objects = tuple(obj for obj in source_collection.all_objects if obj.select_get()) + else: + ctx_objects = source_collection.all_objects + else: + if use_selection: + ctx_objects = context.selected_objects + else: + ctx_objects = context.view_layer.objects + if use_visible: + ctx_objects = tuple(obj for obj in ctx_objects if obj.visible_get()) + + # Ensure no Objects are in Edit mode. + # Copy to a tuple for safety, to avoid the risk of modifying ctx_objects while iterating. + for obj in tuple(ctx_objects): + if not ensure_object_not_in_edit_mode(context, obj): + operator.report({'ERROR'}, "%s could not be set out of Edit Mode, so cannot be exported" % obj.name) + return {'CANCELLED'} + + kwargs_mod["context_objects"] = ctx_objects + + depsgraph = context.evaluated_depsgraph_get() + ret = save_single(operator, context.scene, depsgraph, filepath, **kwargs_mod) + else: + # XXX We need a way to generate a depsgraph for inactive view_layers first... + # XXX Also, what to do in case of batch-exporting scenes, when there is more than one view layer? + # Scenes have no concept of 'active' view layer, that's on window level... + fbxpath = filepath + + prefix = os.path.basename(fbxpath) + if prefix: + fbxpath = os.path.dirname(fbxpath) + + if batch_mode == 'COLLECTION': + data_seq = tuple((coll, coll.name, 'objects') for coll in bpy.data.collections if coll.objects) + elif batch_mode in {'SCENE_COLLECTION', 'ACTIVE_SCENE_COLLECTION'}: + scenes = [context.scene] if batch_mode == 'ACTIVE_SCENE_COLLECTION' else bpy.data.scenes + data_seq = [] + for scene in scenes: + if not scene.objects: + continue + # Needed to avoid having tens of 'Scene Collection' entries. + todo_collections = [(scene.collection, "_".join((scene.name, scene.collection.name)))] + while todo_collections: + coll, coll_name = todo_collections.pop() + todo_collections.extend(((c, c.name) for c in coll.children if c.all_objects)) + data_seq.append((coll, coll_name, 'all_objects')) + else: + data_seq = tuple((scene, scene.name, 'objects') for scene in bpy.data.scenes if scene.objects) + + # Ensure no Objects are in Edit mode. + for data, data_name, data_obj_propname in data_seq: + # Copy to a tuple for safety, to avoid the risk of modifying the data prop while iterating it. + for obj in tuple(getattr(data, data_obj_propname)): + if not ensure_object_not_in_edit_mode(context, obj): + operator.report({'ERROR'}, + "%s in %s could not be set out of Edit Mode, so cannot be exported" + % (obj.name, data_name)) + return {'CANCELLED'} + + # call this function within a loop with BATCH_ENABLE == False + + new_fbxpath = fbxpath # own dir option modifies, we need to keep an original + for data, data_name, data_obj_propname in data_seq: # scene or collection + newname = "_".join((prefix, bpy.path.clean_name(data_name))) if prefix else bpy.path.clean_name(data_name) + + if use_batch_own_dir: + new_fbxpath = os.path.join(fbxpath, newname) + # path may already exist... and be a file. + while os.path.isfile(new_fbxpath): + new_fbxpath = "_".join((new_fbxpath, "dir")) + if not os.path.exists(new_fbxpath): + os.makedirs(new_fbxpath) + + filepath = os.path.join(new_fbxpath, newname + '.fbx') + + print('\nBatch exporting %s as...\n\t%r' % (data, filepath)) + + if batch_mode in {'COLLECTION', 'SCENE_COLLECTION', 'ACTIVE_SCENE_COLLECTION'}: + # Collection, so that objects update properly, add a dummy scene. + scene = bpy.data.scenes.new(name="FBX_Temp") + src_scenes = {} # Count how much each 'source' scenes are used. + for obj in getattr(data, data_obj_propname): + for src_sce in obj.users_scene: + src_scenes[src_sce] = src_scenes.setdefault(src_sce, 0) + 1 + scene.collection.objects.link(obj) + + # Find the 'most used' source scene, and use its unit settings. This is somewhat weak, but should work + # fine in most cases, and avoids stupid issues like T41931. + best_src_scene = None + best_src_scene_users = -1 + for sce, nbr_users in src_scenes.items(): + if (nbr_users) > best_src_scene_users: + best_src_scene_users = nbr_users + best_src_scene = sce + scene.unit_settings.system = best_src_scene.unit_settings.system + scene.unit_settings.system_rotation = best_src_scene.unit_settings.system_rotation + scene.unit_settings.scale_length = best_src_scene.unit_settings.scale_length + + # new scene [only one viewlayer to update] + scene.view_layers[0].update() + # TODO - BUMMER! Armatures not in the group wont animate the mesh + else: + scene = data + + kwargs_batch = kwargs.copy() + kwargs_batch["context_objects"] = getattr(data, data_obj_propname) + + save_single(operator, scene, scene.view_layers[0].depsgraph, filepath, **kwargs_batch) + + if batch_mode in {'COLLECTION', 'SCENE_COLLECTION', 'ACTIVE_SCENE_COLLECTION'}: + # Remove temp collection scene. + bpy.data.scenes.remove(scene) + + if active_object and org_mode: + context.view_layer.objects.active = active_object + if bpy.ops.object.mode_set.poll(): + bpy.ops.object.mode_set(mode=org_mode) + + return ret diff --git a/scripts/addons_core/io_scene_fbx/fbx2json.py b/scripts/addons_core/io_scene_fbx/fbx2json.py new file mode 100755 index 00000000000..b7102287da4 --- /dev/null +++ b/scripts/addons_core/io_scene_fbx/fbx2json.py @@ -0,0 +1,341 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: 2006-2012 assimp team +# SPDX-FileCopyrightText: 2013 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +Usage +===== + + fbx2json [FILES]... + +This script will write a JSON file for each FBX argument given. + + +Output +====== + +The JSON data is formatted into a list of nested lists of 4 items: + + ``[id, [data, ...], "data_types", [subtree, ...]]`` + +Where each list may be empty, and the items in +the subtree are formatted the same way. + +data_types is a string, aligned with data that spesifies a type +for each property. + +The types are as follows: + +* 'Z': - INT8 +* 'Y': - INT16 +* 'B': - BOOL +* 'C': - CHAR +* 'I': - INT32 +* 'F': - FLOAT32 +* 'D': - FLOAT64 +* 'L': - INT64 +* 'R': - BYTES +* 'S': - STRING +* 'f': - FLOAT32_ARRAY +* 'i': - INT32_ARRAY +* 'd': - FLOAT64_ARRAY +* 'l': - INT64_ARRAY +* 'b': - BOOL ARRAY +* 'c': - BYTE ARRAY + +Note that key:value pairs aren't used since the id's are not +ensured to be unique. +""" + + +# ---------------------------------------------------------------------------- +# FBX Binary Parser + +from struct import unpack +import array +import zlib + +# at the end of each nested block, there is a NUL record to indicate +# that the sub-scope exists (i.e. to distinguish between P: and P : {}) +_BLOCK_SENTINEL_LENGTH = ... +_BLOCK_SENTINEL_DATA = ... +read_fbx_elem_uint = ... +_IS_BIG_ENDIAN = (__import__("sys").byteorder != 'little') +_HEAD_MAGIC = b'Kaydara FBX Binary\x20\x20\x00\x1a\x00' +from collections import namedtuple +FBXElem = namedtuple("FBXElem", ("id", "props", "props_type", "elems")) +del namedtuple + + +def read_uint(read): + return unpack(b'"TCDefinition" to control the FBX_KTIME opt-in in FBX version 7700. +FBX_HEADER_VERSION = 1003 +FBX_SCENEINFO_VERSION = 100 +FBX_TEMPLATES_VERSION = 100 + +FBX_MODELS_VERSION = 232 + +FBX_GEOMETRY_VERSION = 124 +# Revert back normals to 101 (simple 3D values) for now, 102 (4D + weights) seems not well supported by most apps +# currently, apart from some AD products. +FBX_GEOMETRY_NORMAL_VERSION = 101 +FBX_GEOMETRY_BINORMAL_VERSION = 101 +FBX_GEOMETRY_TANGENT_VERSION = 101 +FBX_GEOMETRY_SMOOTHING_VERSION = 102 +FBX_GEOMETRY_CREASE_VERSION = 101 +FBX_GEOMETRY_VCOLOR_VERSION = 101 +FBX_GEOMETRY_UV_VERSION = 101 +FBX_GEOMETRY_MATERIAL_VERSION = 101 +FBX_GEOMETRY_LAYER_VERSION = 100 +FBX_GEOMETRY_SHAPE_VERSION = 100 +FBX_DEFORMER_SHAPE_VERSION = 100 +FBX_DEFORMER_SHAPECHANNEL_VERSION = 100 +FBX_POSE_BIND_VERSION = 100 +FBX_DEFORMER_SKIN_VERSION = 101 +FBX_DEFORMER_CLUSTER_VERSION = 100 +FBX_MATERIAL_VERSION = 102 +FBX_TEXTURE_VERSION = 202 +FBX_ANIM_KEY_VERSION = 4008 + +FBX_NAME_CLASS_SEP = b"\x00\x01" +FBX_ANIM_PROPSGROUP_NAME = "d" + +FBX_KTIME_V7 = 46186158000 # This is the number of "ktimes" in one second (yep, precision over the nanosecond...) +# FBX 2019.5 (FBX version 7700) changed the number of "ktimes" per second, however, the new value is opt-in until FBX +# version 8000 where it will probably become opt-out. +FBX_KTIME_V8 = 141120000 +# To explicitly use the V7 value in FBX versions 7700-7XXX: fbx_root->"FBXHeaderExtension"->"OtherFlags"->"TCDefinition" +# is set to 127. +# To opt in to the V8 value in FBX version 7700-7XXX: "TCDefinition" is set to 0. +FBX_TIMECODE_DEFINITION_TO_KTIME_PER_SECOND = { + 0: FBX_KTIME_V8, + 127: FBX_KTIME_V7, +} +# The "ktimes" per second for Blender exported FBX is constant because the exported `FBX_VERSION` is constant. +FBX_KTIME = FBX_KTIME_V8 if FBX_VERSION >= 8000 else FBX_KTIME_V7 + + +MAT_CONVERT_LIGHT = Matrix.Rotation(math.pi / 2.0, 4, 'X') # Blender is -Z, FBX is -Y. +MAT_CONVERT_CAMERA = Matrix.Rotation(math.pi / 2.0, 4, 'Y') # Blender is -Z, FBX is +X. +# XXX I can't get this working :( +# MAT_CONVERT_BONE = Matrix.Rotation(math.pi / 2.0, 4, 'Z') # Blender is +Y, FBX is -X. +MAT_CONVERT_BONE = Matrix() + + +BLENDER_OTHER_OBJECT_TYPES = {'CURVE', 'SURFACE', 'FONT', 'META'} +BLENDER_OBJECT_TYPES_MESHLIKE = {'MESH'} | BLENDER_OTHER_OBJECT_TYPES + +SHAPE_KEY_SLIDER_HARD_MIN = bpy.types.ShapeKey.bl_rna.properties["slider_min"].hard_min +SHAPE_KEY_SLIDER_HARD_MAX = bpy.types.ShapeKey.bl_rna.properties["slider_max"].hard_max + + +# Lamps. +FBX_LIGHT_TYPES = { + 'POINT': 0, # Point. + 'SUN': 1, # Directional. + 'SPOT': 2, # Spot. + 'HEMI': 1, # Directional. + 'AREA': 3, # Area. +} +FBX_LIGHT_DECAY_TYPES = { + 'CONSTANT': 0, # None. + 'INVERSE_LINEAR': 1, # Linear. + 'INVERSE_SQUARE': 2, # Quadratic. + 'INVERSE_COEFFICIENTS': 2, # Quadratic... + 'CUSTOM_CURVE': 2, # Quadratic. + 'LINEAR_QUADRATIC_WEIGHTED': 2, # Quadratic. +} + + +RIGHT_HAND_AXES = { + # Up, Forward -> FBX values (tuples of (axis, sign), Up, Front, Coord). + ('X', '-Y'): ((0, 1), (1, 1), (2, 1)), + ('X', 'Y'): ((0, 1), (1, -1), (2, -1)), + ('X', '-Z'): ((0, 1), (2, 1), (1, -1)), + ('X', 'Z'): ((0, 1), (2, -1), (1, 1)), + ('-X', '-Y'): ((0, -1), (1, 1), (2, -1)), + ('-X', 'Y'): ((0, -1), (1, -1), (2, 1)), + ('-X', '-Z'): ((0, -1), (2, 1), (1, 1)), + ('-X', 'Z'): ((0, -1), (2, -1), (1, -1)), + ('Y', '-X'): ((1, 1), (0, 1), (2, -1)), + ('Y', 'X'): ((1, 1), (0, -1), (2, 1)), + ('Y', '-Z'): ((1, 1), (2, 1), (0, 1)), + ('Y', 'Z'): ((1, 1), (2, -1), (0, -1)), + ('-Y', '-X'): ((1, -1), (0, 1), (2, 1)), + ('-Y', 'X'): ((1, -1), (0, -1), (2, -1)), + ('-Y', '-Z'): ((1, -1), (2, 1), (0, -1)), + ('-Y', 'Z'): ((1, -1), (2, -1), (0, 1)), + ('Z', '-X'): ((2, 1), (0, 1), (1, 1)), + ('Z', 'X'): ((2, 1), (0, -1), (1, -1)), + ('Z', '-Y'): ((2, 1), (1, 1), (0, -1)), + ('Z', 'Y'): ((2, 1), (1, -1), (0, 1)), # Blender system! + ('-Z', '-X'): ((2, -1), (0, 1), (1, -1)), + ('-Z', 'X'): ((2, -1), (0, -1), (1, 1)), + ('-Z', '-Y'): ((2, -1), (1, 1), (0, 1)), + ('-Z', 'Y'): ((2, -1), (1, -1), (0, -1)), +} + + +# NOTE: Not fully in enum value order, since when exporting the first entry matching the framerate value is used +# (e.g. better have NTSC fullframe than NTSC drop frame for 29.97 framerate). +FBX_FRAMERATES = ( + # (-1.0, 0), # Default framerate. + (-1.0, 14), # Custom framerate. + (120.0, 1), + (100.0, 2), + (60.0, 3), + (50.0, 4), + (48.0, 5), + (30.0, 6), # BW NTSC, full frame. + (30.0, 7), # Drop frame. + (30.0 / 1.001, 9), # Color NTSC, full frame. + (30.0 / 1.001, 8), # Color NTSC, drop frame. + (25.0, 10), + (24.0, 11), + # (1.0, 12), # 1000 milli/s (use for date time?). + (24.0 / 1.001, 13), + (96.0, 15), + (72.0, 16), + (60.0 / 1.001, 17), + (120.0 / 1.001, 18), +) + + +# ##### Misc utilities ##### + +# Enable performance reports (measuring time used to perform various steps of importing or exporting). +DO_PERFMON = False + +if DO_PERFMON: + class PerfMon(): + def __init__(self): + self.level = -1 + self.ref_time = [] + + def level_up(self, message=""): + self.level += 1 + self.ref_time.append(None) + if message: + print("\t" * self.level, message, sep="") + + def level_down(self, message=""): + if not self.ref_time: + if message: + print(message) + return + ref_time = self.ref_time[self.level] + print("\t" * self.level, + "\tDone (%f sec)\n" % ((time.process_time() - ref_time) if ref_time is not None else 0.0), + sep="") + if message: + print("\t" * self.level, message, sep="") + del self.ref_time[self.level] + self.level -= 1 + + def step(self, message=""): + ref_time = self.ref_time[self.level] + curr_time = time.process_time() + if ref_time is not None: + print("\t" * self.level, "\tDone (%f sec)\n" % (curr_time - ref_time), sep="") + self.ref_time[self.level] = curr_time + print("\t" * self.level, message, sep="") +else: + class PerfMon(): + def __init__(self): + pass + + def level_up(self, message=""): + pass + + def level_down(self, message=""): + pass + + def step(self, message=""): + pass + + +# Scale/unit mess. FBX can store the 'reference' unit of a file in its UnitScaleFactor property +# (1.0 meaning centimeter, afaik). We use that to reflect user's default unit as set in Blender with scale_length. +# However, we always get values in BU (i.e. meters), so we have to reverse-apply that scale in global matrix... +# Note that when no default unit is available, we assume 'meters' (and hence scale by 100). +def units_blender_to_fbx_factor(scene): + return 100.0 if (scene.unit_settings.system == 'NONE') else (100.0 * scene.unit_settings.scale_length) + + +# Note: this could be in a utility (math.units e.g.)... + +UNITS = { + "meter": 1.0, # Ref unit! + "kilometer": 0.001, + "millimeter": 1000.0, + "foot": 1.0 / 0.3048, + "inch": 1.0 / 0.0254, + "turn": 1.0, # Ref unit! + "degree": 360.0, + "radian": math.pi * 2.0, + "second": 1.0, # Ref unit! + "ktime": FBX_KTIME, # For export use only because the imported "ktimes" per second may vary. +} + + +def units_convertor(u_from, u_to): + """Return a convertor between specified units.""" + conv = UNITS[u_to] / UNITS[u_from] + return lambda v: v * conv + + +def units_convertor_iter(u_from, u_to): + """Return an iterable convertor between specified units.""" + conv = units_convertor(u_from, u_to) + + def convertor(it): + for v in it: + yield(conv(v)) + + return convertor + + +def matrix4_to_array(mat): + """Concatenate matrix's columns into a single, flat tuple""" + # blender matrix is row major, fbx is col major so transpose on write + return tuple(f for v in mat.transposed() for f in v) + + +def array_to_matrix4(arr): + """Convert a single 16-len tuple into a valid 4D Blender matrix""" + # Blender matrix is row major, fbx is col major so transpose on read + return Matrix(tuple(zip(*[iter(arr)] * 4))).transposed() + + +def parray_as_ndarray(arr): + """Convert an array.array into an np.ndarray that shares the same memory""" + return np.frombuffer(arr, dtype=arr.typecode) + + +def similar_values(v1, v2, e=1e-6): + """Return True if v1 and v2 are nearly the same.""" + if v1 == v2: + return True + return ((abs(v1 - v2) / max(abs(v1), abs(v2))) <= e) + + +def similar_values_iter(v1, v2, e=1e-6): + """Return True if iterables v1 and v2 are nearly the same.""" + if v1 == v2: + return True + for v1, v2 in zip(v1, v2): + if (v1 != v2) and ((abs(v1 - v2) / max(abs(v1), abs(v2))) > e): + return False + return True + + +def shape_difference_exclude_similar(sv_cos, ref_cos, e=1e-6): + """Return a tuple of: + the difference between the vertex cos in sv_cos and ref_cos, excluding any that are nearly the same, + and the indices of the vertices that are not nearly the same""" + assert(sv_cos.size == ref_cos.size) + + # Create views of 1 co per row of the arrays, only making copies if needed. + sv_cos = sv_cos.reshape(-1, 3) + ref_cos = ref_cos.reshape(-1, 3) + + # Quick check for equality + if np.array_equal(sv_cos, ref_cos): + # There's no difference between the two arrays. + empty_cos = np.empty((0, 3), dtype=sv_cos.dtype) + empty_indices = np.empty(0, dtype=np.int32) + return empty_cos, empty_indices + + # Note that unlike math.isclose(a,b), np.isclose(a,b) is not symmetrical and the second argument 'b', is + # considered to be the reference value. + # Note that atol=0 will mean that if only one co component being compared is zero, they won't be considered close. + similar_mask = np.isclose(sv_cos, ref_cos, atol=0, rtol=e) + + # A co is only similar if every component in it is similar. + co_similar_mask = np.all(similar_mask, axis=1) + + # Get the indices of cos that are not similar. + not_similar_verts_idx = np.flatnonzero(~co_similar_mask) + + # Subtracting first over the entire arrays and then indexing seems faster than indexing both arrays first and then + # subtracting, until less than about 3% of the cos are being indexed. + difference_cos = (sv_cos - ref_cos)[not_similar_verts_idx] + return difference_cos, not_similar_verts_idx + + +def _mat4_vec3_array_multiply(mat4, vec3_array, dtype=None, return_4d=False): + """Multiply a 4d matrix by each 3d vector in an array and return as an array of either 3d or 4d vectors. + + A view of the input array is returned if return_4d=False, the dtype matches the input array and either the matrix is + None or, ignoring the last row, is a 3x3 identity matrix with no translation: + ┌1, 0, 0, 0┐ + │0, 1, 0, 0│ + └0, 0, 1, 0┘ + + When dtype=None, it defaults to the dtype of the input array.""" + return_dtype = dtype if dtype is not None else vec3_array.dtype + vec3_array = vec3_array.reshape(-1, 3) + + # Multiplying a 4d mathutils.Matrix by a 3d mathutils.Vector implicitly extends the Vector to 4d during the + # calculation by appending 1.0 to the Vector and then the 4d result is truncated back to 3d. + # Numpy does not do an implicit extension to 4d, so it would have to be done explicitly by extending the entire + # vec3_array to 4d. + # However, since the w component of the vectors is always 1.0, the last column can be excluded from the + # multiplication and then added to every multiplied vector afterwards, which avoids having to make a 4d copy of + # vec3_array beforehand. + # For a single column vector: + # ┌a, b, c, d┐ ┌x┐ ┌ax+by+cz+d┐ + # │e, f, g, h│ @ │y│ = │ex+fy+gz+h│ + # │i, j, k, l│ │z│ │ix+jy+kz+l│ + # └m, n, o, p┘ └1┘ └mx+ny+oz+p┘ + # ┌a, b, c┐ ┌x┐ ┌d┐ ┌ax+by+cz┐ ┌d┐ ┌ax+by+cz+d┐ + # │e, f, g│ @ │y│ + │h│ = │ex+fy+gz│ + │h│ = │ex+fy+gz+h│ + # │i, j, k│ └z┘ │l│ │ix+jy+kz│ │l│ │ix+jy+kz+l│ + # └m, n, o┘ └p┘ └mx+ny+oz┘ └p┘ └mx+ny+oz+p┘ + + # column_vector_multiplication in mathutils_Vector.c uses double precision math for Matrix @ Vector by casting the + # matrix's values to double precision and then casts back to single precision when returning the result, so at least + # double precision math is always be used to match standard Blender behaviour. + math_precision = np.result_type(np.double, vec3_array) + + to_multiply = None + to_add = None + w_to_set = 1.0 + if mat4 is not None: + mat_np = np.array(mat4, dtype=math_precision) + # Identity matrix is compared against to check if any matrix multiplication is required. + identity = np.identity(4, dtype=math_precision) + if not return_4d: + # If returning 3d, the entire last row of the matrix can be ignored because it only affects the w component. + mat_np = mat_np[:3] + identity = identity[:3] + + # Split mat_np into the columns to multiply and the column to add afterwards. + # First 3 columns + multiply_columns = mat_np[:, :3] + multiply_identity = identity[:, :3] + # Last column only + add_column = mat_np.T[3] + + # Analyze the split parts of the matrix to figure out if there is anything to multiply and anything to add. + if not np.array_equal(multiply_columns, multiply_identity): + to_multiply = multiply_columns + + if return_4d and to_multiply is None: + # When there's nothing to multiply, the w component of add_column can be set directly into the array because + # mx+ny+oz+p becomes 0x+0y+0z+p where p is add_column[3]. + w_to_set = add_column[3] + # Replace add_column with a view of only the translation. + add_column = add_column[:3] + + if add_column.any(): + to_add = add_column + + if to_multiply is None: + # If there's anything to add, ensure it's added using the precision being used for math. + array_dtype = math_precision if to_add is not None else return_dtype + if return_4d: + multiplied_vectors = np.empty((len(vec3_array), 4), dtype=array_dtype) + multiplied_vectors[:, :3] = vec3_array + multiplied_vectors[:, 3] = w_to_set + else: + # If there's anything to add, ensure a copy is made so that the input vec3_array isn't modified. + multiplied_vectors = vec3_array.astype(array_dtype, copy=to_add is not None) + else: + # Matrix multiplication has the signature (n,k) @ (k,m) -> (n,m). + # Where v is the number of vectors in vec3_array and d is the number of vector dimensions to return: + # to_multiply has shape (d,3), vec3_array has shape (v,3) and the result should have shape (v,d). + # Either vec3_array or to_multiply must be transposed: + # Can transpose vec3_array and then transpose the result: + # (v,3).T -> (3,v); (d,3) @ (3,v) -> (d,v); (d,v).T -> (v,d) + # Or transpose to_multiply and swap the order of multiplication: + # (d,3).T -> (3,d); (v,3) @ (3,d) -> (v,d) + # There's no, or negligible, performance difference between the two options, however, the result of the latter + # will be C contiguous in memory, making it faster to convert to flattened bytes with .tobytes(). + multiplied_vectors = vec3_array @ to_multiply.T + + if to_add is not None: + for axis, to_add_to_axis in zip(multiplied_vectors.T, to_add): + if to_add_to_axis != 0: + axis += to_add_to_axis + + # Cast to the desired return type before returning. + return multiplied_vectors.astype(return_dtype, copy=False) + + +def vcos_transformed(raw_cos, m=None, dtype=None): + return _mat4_vec3_array_multiply(m, raw_cos, dtype) + + +def nors_transformed(raw_nors, m=None, dtype=None): + # Great, now normals are also expected 4D! + # XXX Back to 3D normals for now! + # return _mat4_vec3_array_multiply(m, raw_nors, dtype, return_4d=True) + return _mat4_vec3_array_multiply(m, raw_nors, dtype) + + +def astype_view_signedness(arr, new_dtype): + """Unsafely views arr as new_dtype if the itemsize and byteorder of arr matches but the signedness does not. + + Safely views arr as new_dtype if both arr and new_dtype have the same itemsize, byteorder and signedness, but could + have a different character code, e.g. 'i' and 'l'. np.ndarray.astype with copy=False does not normally create this + view, but Blender can be picky about the character code used, so this function will create the view. + + Otherwise, calls np.ndarray.astype with copy=False. + + The benefit of copy=False is that if the array can be safely viewed as the new type, then a view is made, instead of + a copy with the new type. + + Unsigned types can't be viewed safely as signed or vice-versa, meaning that a copy would always be made by + .astype(..., copy=False). + + This is intended for viewing uintc data (a common Blender C type with variable itemsize, though usually 4 bytes, so + uint32) as int32 (a common FBX type), when the itemsizes match.""" + arr_dtype = arr.dtype + + if not isinstance(new_dtype, np.dtype): + # new_dtype could be a type instance or a string, but it needs to be a dtype to compare its itemsize, byteorder + # and kind. + new_dtype = np.dtype(new_dtype) + + # For simplicity, only dtypes of the same itemsize and byteorder, but opposite signedness, are handled. Everything + # else is left to .astype. + arr_kind = arr_dtype.kind + new_kind = new_dtype.kind + # Signed and unsigned int are opposite in terms of signedness. Other types don't have signedness. + integer_kinds = {'i', 'u'} + if ( + arr_kind in integer_kinds and new_kind in integer_kinds + and arr_dtype.itemsize == new_dtype.itemsize + and arr_dtype.byteorder == new_dtype.byteorder + ): + # arr and new_dtype have signedness and matching itemsize and byteorder, so return a view of the new type. + return arr.view(new_dtype) + else: + return arr.astype(new_dtype, copy=False) + + +def fast_first_axis_flat(ar): + """Get a flat view (or a copy if a view is not possible) of the input array whereby each element is a single element + of a dtype that is fast to sort, sorts according to individual bytes and contains the data for an entire row (and + any further dimensions) of the input array. + + Since the dtype of the view could sort in a different order to the dtype of the input array, this isn't typically + useful for actual sorting, but it is useful for sorting-based uniqueness, such as np.unique.""" + # If there are no rows, each element will be viewed as the new dtype. + elements_per_row = math.prod(ar.shape[1:]) + row_itemsize = ar.itemsize * elements_per_row + + # Get a dtype with itemsize that equals row_itemsize. + # Integer types sort the fastest, but are only available for specific itemsizes. + uint_dtypes_by_itemsize = {1: np.uint8, 2: np.uint16, 4: np.uint32, 8: np.uint64} + # Signed/unsigned makes no noticeable speed difference, but using unsigned will result in ordering according to + # individual bytes like the other, non-integer types. + if row_itemsize in uint_dtypes_by_itemsize: + entire_row_dtype = uint_dtypes_by_itemsize[row_itemsize] + else: + # When using kind='stable' sorting, numpy only uses radix sort with integer types, but it's still + # significantly faster to sort by a single item per row instead of multiple row elements or multiple structured + # type fields. + # Construct a flexible size dtype with matching itemsize. + # Should always be 4 because each character in a unicode string is UCS4. + str_itemsize = np.dtype((np.str_, 1)).itemsize + if row_itemsize % str_itemsize == 0: + # Unicode strings seem to be slightly faster to sort than bytes. + entire_row_dtype = np.dtype((np.str_, row_itemsize // str_itemsize)) + else: + # Bytes seem to be slightly faster to sort than raw bytes (np.void). + entire_row_dtype = np.dtype((np.bytes_, row_itemsize)) + + # View each element along the first axis as a single element. + # View (or copy if a view is not possible) as flat + ar = ar.reshape(-1) + # To view as a dtype of different size, the last axis (entire array in NumPy 1.22 and earlier) must be C-contiguous. + if row_itemsize != ar.itemsize and not ar.flags.c_contiguous: + ar = np.ascontiguousarray(ar) + return ar.view(entire_row_dtype) + + +def fast_first_axis_unique(ar, return_unique=True, return_index=False, return_inverse=False, return_counts=False): + """np.unique with axis=0 but optimised for when the input array has multiple elements per row, and the returned + unique array doesn't need to be sorted. + + Arrays with more than one element per row are more costly to sort in np.unique due to being compared one + row-element at a time, like comparing tuples. + + By viewing each entire row as a single non-structured element, much faster sorting can be achieved. Since the values + are viewed as a different type to their original, this means that the returned array of unique values may not be + sorted according to their original type. + + The array of unique values can be excluded from the returned tuple by specifying return_unique=False. + + Float type caveats: + All elements of -0.0 in the input array will be replaced with 0.0 to ensure that both values are collapsed into one. + NaN values can have lots of different byte representations (e.g. signalling/quiet and custom payloads). Only the + duplicates of each unique byte representation will be collapsed into one.""" + # At least something should always be returned. + assert(return_unique or return_index or return_inverse or return_counts) + # Only signed integer, unsigned integer and floating-point kinds of data are allowed. Other kinds of data have not + # been tested. + assert(ar.dtype.kind in "iuf") + + # Floating-point types have different byte representations for -0.0 and 0.0. Collapse them together by replacing all + # -0.0 in the input array with 0.0. + if ar.dtype.kind == 'f': + ar[ar == -0.0] = 0.0 + + # It's a bit annoying that the unique array is always calculated even when it might not be needed, but it is + # generally insignificant compared to the cost of sorting. + result = np.unique(fast_first_axis_flat(ar), return_index=return_index, + return_inverse=return_inverse, return_counts=return_counts) + + if return_unique: + unique = result[0] if isinstance(result, tuple) else result + # View in the original dtype. + unique = unique.view(ar.dtype) + # Return the same number of elements per row and any extra dimensions per row as the input array. + unique.shape = (-1, *ar.shape[1:]) + if isinstance(result, tuple): + return (unique,) + result[1:] + else: + return unique + else: + # Remove the first element, the unique array. + result = result[1:] + if len(result) == 1: + # Unpack single element tuples. + return result[0] + else: + return result + + +def ensure_object_not_in_edit_mode(context, obj): + """Objects in Edit mode usually cannot be exported because much of the API used when exporting is not available for + Objects in Edit mode. + + Exiting the currently active Object (and any other Objects opened in multi-editing) from Edit mode is simple and + should be done with `bpy.ops.mesh.mode_set(mode='OBJECT')` instead of using this function. + + This function is for the rare case where an Object is in Edit mode, but the current context mode is not Edit mode. + This can occur from a state where the current context mode is Edit mode, but then the active Object of the current + View Layer is changed to a different Object that is not in Edit mode. This changes the current context mode, but + leaves the other Object(s) in Edit mode. + """ + if obj.mode != 'EDIT': + return True + + # Get the active View Layer. + view_layer = context.view_layer + + # A View Layer belongs to a scene. + scene = view_layer.id_data + + # Get the current active Object of this View Layer, so we can restore it once done. + orig_active = view_layer.objects.active + + # Check if obj is in the View Layer. If obj is not in the View Layer, it cannot be set as the active Object. + # We don't use `obj.name in view_layer.objects` because an Object from a Library could have the same name. + is_in_view_layer = any(o == obj for o in view_layer.objects) + + do_unlink_from_scene_collection = False + try: + if not is_in_view_layer: + # There might not be any enabled collections in the View Layer, so link obj into the Scene Collection + # instead, which is always available to all View Layers of that Scene. + scene.collection.objects.link(obj) + do_unlink_from_scene_collection = True + view_layer.objects.active = obj + + # Now we're finally ready to attempt to change obj's mode. + if bpy.ops.object.mode_set.poll(): + bpy.ops.object.mode_set(mode='OBJECT') + if obj.mode == 'EDIT': + # The Object could not be set out of EDIT mode and therefore cannot be exported. + return False + finally: + # Always restore the original active Object and unlink obj from the Scene Collection if it had to be linked. + view_layer.objects.active = orig_active + if do_unlink_from_scene_collection: + scene.collection.objects.unlink(obj) + + return True + + +def expand_shape_key_range(shape_key, value_to_fit): + """Attempt to expand the slider_min/slider_max of a shape key to fit `value_to_fit` within the slider range, + expanding slightly beyond `value_to_fit` if possible, so that the new slider_min/slider_max is not the same as + `value_to_fit`. Blender has a hard minimum and maximum for slider values, so it may not be possible to fit the value + within the slider range. + + If `value_to_fit` is already within the slider range, no changes are made. + + First tries setting slider_min/slider_max to double `value_to_fit`, otherwise, expands the range in the direction of + `value_to_fit` by double the distance to `value_to_fit`. + + The new slider_min/slider_max is rounded down/up to the nearest whole number for a more visually pleasing result. + + Returns whether it was possible to expand the slider range to fit `value_to_fit`.""" + if value_to_fit < (slider_min := shape_key.slider_min): + if value_to_fit < 0.0: + # For the most common case, set slider_min to double value_to_fit. + target_slider_min = value_to_fit * 2.0 + else: + # Doubling value_to_fit would make it larger, so instead decrease slider_min by double the distance between + # slider_min and value_to_fit. + target_slider_min = slider_min - (slider_min - value_to_fit) * 2.0 + # Set slider_min to the first whole number less than or equal to target_slider_min. + shape_key.slider_min = math.floor(target_slider_min) + + return value_to_fit >= SHAPE_KEY_SLIDER_HARD_MIN + elif value_to_fit > (slider_max := shape_key.slider_max): + if value_to_fit > 0.0: + # For the most common case, set slider_max to double value_to_fit. + target_slider_max = value_to_fit * 2.0 + else: + # Doubling value_to_fit would make it smaller, so instead increase slider_max by double the distance between + # slider_max and value_to_fit. + target_slider_max = slider_max + (value_to_fit - slider_max) * 2.0 + # Set slider_max to the first whole number greater than or equal to target_slider_max. + shape_key.slider_max = math.ceil(target_slider_max) + + return value_to_fit <= SHAPE_KEY_SLIDER_HARD_MAX + else: + # Value is already within the range. + return True + + +# ##### Attribute utils. ##### +AttributeDataTypeInfo = namedtuple("AttributeDataTypeInfo", ["dtype", "foreach_attribute", "item_size"]) +_attribute_data_type_info_lookup = { + 'FLOAT': AttributeDataTypeInfo(np.single, "value", 1), + 'INT': AttributeDataTypeInfo(np.intc, "value", 1), + 'FLOAT_VECTOR': AttributeDataTypeInfo(np.single, "vector", 3), + 'FLOAT_COLOR': AttributeDataTypeInfo(np.single, "color", 4), # color_srgb is an alternative + 'BYTE_COLOR': AttributeDataTypeInfo(np.single, "color", 4), # color_srgb is an alternative + 'STRING': AttributeDataTypeInfo(None, "value", 1), # Not usable with foreach_get/set + 'BOOLEAN': AttributeDataTypeInfo(bool, "value", 1), + 'FLOAT2': AttributeDataTypeInfo(np.single, "vector", 2), + 'INT8': AttributeDataTypeInfo(np.intc, "value", 1), + 'INT32_2D': AttributeDataTypeInfo(np.intc, "value", 2), +} + + +def attribute_get(attributes, name, data_type, domain): + """Get an attribute by its name, data_type and domain. + + Returns None if no attribute with this name, data_type and domain exists.""" + attr = attributes.get(name) + if not attr: + return None + if attr.data_type == data_type and attr.domain == domain: + return attr + # It shouldn't normally happen, but it's possible there are multiple attributes with the same name, but different + # data_types or domains. + for attr in attributes: + if attr.name == name and attr.data_type == data_type and attr.domain == domain: + return attr + return None + + +def attribute_foreach_set(attribute, array_or_list, foreach_attribute=None): + """Set every value of an attribute with foreach_set.""" + if foreach_attribute is None: + foreach_attribute = _attribute_data_type_info_lookup[attribute.data_type].foreach_attribute + attribute.data.foreach_set(foreach_attribute, array_or_list) + + +def attribute_to_ndarray(attribute, foreach_attribute=None): + """Create a NumPy ndarray from an attribute.""" + data = attribute.data + data_type_info = _attribute_data_type_info_lookup[attribute.data_type] + ndarray = np.empty(len(data) * data_type_info.item_size, dtype=data_type_info.dtype) + if foreach_attribute is None: + foreach_attribute = data_type_info.foreach_attribute + data.foreach_get(foreach_attribute, ndarray) + return ndarray + + +@dataclass +class AttributeDescription: + """Helper class to reduce duplicate code for handling built-in Blender attributes.""" + name: str + # Valid identifiers can be found in bpy.types.Attribute.bl_rna.properties["data_type"].enum_items + data_type: str + # Valid identifiers can be found in bpy.types.Attribute.bl_rna.properties["domain"].enum_items + domain: str + # Some attributes are required to exist if certain conditions are met. If a required attribute does not exist when + # attempting to get it, an AssertionError is raised. + is_required_check: Callable[[bpy.types.AttributeGroup], bool] = None + # NumPy dtype that matches the internal C data of this attribute. + dtype: np.dtype = field(init=False) + # The default attribute name to use with foreach_get and foreach_set. + foreach_attribute: str = field(init=False) + # The number of elements per value of the attribute when flattened into a 1-dimensional list/array. + item_size: int = field(init=False) + + def __post_init__(self): + data_type_info = _attribute_data_type_info_lookup[self.data_type] + self.dtype = data_type_info.dtype + self.foreach_attribute = data_type_info.foreach_attribute + self.item_size = data_type_info.item_size + + def is_required(self, attributes): + """Check if the attribute is required to exist in the provided attributes.""" + is_required_check = self.is_required_check + return is_required_check and is_required_check(attributes) + + def get(self, attributes): + """Get the attribute. + + If the attribute is required, but does not exist, an AssertionError is raised, otherwise None is returned.""" + attr = attribute_get(attributes, self.name, self.data_type, self.domain) + if not attr and self.is_required(attributes): + raise AssertionError("Required attribute '%s' with type '%s' and domain '%s' not found in %r" + % (self.name, self.data_type, self.domain, attributes)) + return attr + + def ensure(self, attributes): + """Get the attribute, creating it if it does not exist. + + Raises a RuntimeError if the attribute could not be created, which should only happen when attempting to create + an attribute with a reserved name, but with the wrong data_type or domain. See usage of + BuiltinCustomDataLayerProvider in Blender source for most reserved names. + + There is no guarantee that the returned attribute has the desired name because the name could already be in use + by another attribute with a different data_type and/or domain.""" + attr = self.get(attributes) + if attr: + return attr + + attr = attributes.new(self.name, self.data_type, self.domain) + if not attr: + raise RuntimeError("Could not create attribute '%s' with type '%s' and domain '%s' in %r" + % (self.name, self.data_type, self.domain, attributes)) + return attr + + def foreach_set(self, attributes, array_or_list, foreach_attribute=None): + """Get the attribute, creating it if it does not exist, and then set every value in the attribute.""" + attribute_foreach_set(self.ensure(attributes), array_or_list, foreach_attribute) + + def get_ndarray(self, attributes, foreach_attribute=None): + """Get the attribute and if it exists, return a NumPy ndarray containing its data, otherwise return None.""" + attr = self.get(attributes) + return attribute_to_ndarray(attr, foreach_attribute) if attr else None + + def to_ndarray(self, attributes, foreach_attribute=None): + """Get the attribute and if it exists, return a NumPy ndarray containing its data, otherwise return a + zero-length ndarray.""" + ndarray = self.get_ndarray(attributes, foreach_attribute) + return ndarray if ndarray is not None else np.empty(0, dtype=self.dtype) + + +# Built-in Blender attributes +# Only attributes used by the importer/exporter are included here. +# See usage of BuiltinCustomDataLayerProvider in Blender source to find most built-in attributes. +MESH_ATTRIBUTE_MATERIAL_INDEX = AttributeDescription("material_index", 'INT', 'FACE') +MESH_ATTRIBUTE_POSITION = AttributeDescription("position", 'FLOAT_VECTOR', 'POINT', + is_required_check=lambda attributes: bool(attributes.id_data.vertices)) +MESH_ATTRIBUTE_SHARP_EDGE = AttributeDescription("sharp_edge", 'BOOLEAN', 'EDGE') +MESH_ATTRIBUTE_EDGE_VERTS = AttributeDescription(".edge_verts", 'INT32_2D', 'EDGE', + is_required_check=lambda attributes: bool(attributes.id_data.edges)) +MESH_ATTRIBUTE_CORNER_VERT = AttributeDescription(".corner_vert", 'INT', 'CORNER', + is_required_check=lambda attributes: bool(attributes.id_data.loops)) +MESH_ATTRIBUTE_CORNER_EDGE = AttributeDescription(".corner_edge", 'INT', 'CORNER', + is_required_check=lambda attributes: bool(attributes.id_data.loops)) +MESH_ATTRIBUTE_SHARP_FACE = AttributeDescription("sharp_face", 'BOOLEAN', 'FACE') + + +# ##### UIDs code. ##### + +# ID class (mere int). +class UUID(int): + pass + + +# UIDs storage. +_keys_to_uuids = {} +_uuids_to_keys = {} + + +def _key_to_uuid(uuids, key): + # TODO: Check this is robust enough for our needs! + # Note: We assume we have already checked the related key wasn't yet in _keys_to_uids! + # As int64 is signed in FBX, we keep uids below 2**63... + if isinstance(key, int) and 0 <= key < 2**63: + # We can use value directly as id! + uuid = key + else: + uuid = hash(key) + if uuid < 0: + uuid = -uuid + if uuid >= 2**63: + uuid //= 2 + # Try to make our uid shorter! + if uuid > int(1e9): + t_uuid = uuid % int(1e9) + if t_uuid not in uuids: + uuid = t_uuid + # Make sure our uuid *is* unique. + if uuid in uuids: + inc = 1 if uuid < 2**62 else -1 + while uuid in uuids: + uuid += inc + if 0 > uuid >= 2**63: + # Note that this is more that unlikely, but does not harm anyway... + raise ValueError("Unable to generate an UUID for key {}".format(key)) + return UUID(uuid) + + +def get_fbx_uuid_from_key(key): + """ + Return an UUID for given key, which is assumed to be hashable. + """ + uuid = _keys_to_uuids.get(key, None) + if uuid is None: + uuid = _key_to_uuid(_uuids_to_keys, key) + _keys_to_uuids[key] = uuid + _uuids_to_keys[uuid] = key + return uuid + + +# XXX Not sure we'll actually need this one? +def get_key_from_fbx_uuid(uuid): + """ + Return the key which generated this uid. + """ + assert(uuid.__class__ == UUID) + return _uuids_to_keys.get(uuid, None) + + +# Blender-specific key generators +def get_bid_name(bid): + library = getattr(bid, "library", None) + if library is not None: + return "%s_L_%s" % (bid.name, library.name) + else: + return bid.name + + +def get_blenderID_key(bid): + if isinstance(bid, Iterable): + return "|".join("B" + e.rna_type.name + "#" + get_bid_name(e) for e in bid) + else: + return "B" + bid.rna_type.name + "#" + get_bid_name(bid) + + +def get_blenderID_name(bid): + if isinstance(bid, Iterable): + return "|".join(get_bid_name(e) for e in bid) + else: + return get_bid_name(bid) + + +def get_blender_empty_key(obj): + """Return bone's keys (Model and NodeAttribute).""" + return "|".join((get_blenderID_key(obj), "Empty")) + + +def get_blender_mesh_shape_key(me): + """Return main shape deformer's key.""" + return "|".join((get_blenderID_key(me), "Shape")) + + +def get_blender_mesh_shape_channel_key(me, shape): + """Return shape channel and geometry shape keys.""" + return ("|".join((get_blenderID_key(me), "Shape", get_blenderID_key(shape))), + "|".join((get_blenderID_key(me), "Geometry", get_blenderID_key(shape)))) + + +def get_blender_bone_key(armature, bone): + """Return bone's keys (Model and NodeAttribute).""" + return "|".join((get_blenderID_key((armature, bone)), "Data")) + + +def get_blender_bindpose_key(obj, mesh): + """Return object's bindpose key.""" + return "|".join((get_blenderID_key(obj), get_blenderID_key(mesh), "BindPose")) + + +def get_blender_armature_skin_key(armature, mesh): + """Return armature's skin key.""" + return "|".join((get_blenderID_key(armature), get_blenderID_key(mesh), "DeformerSkin")) + + +def get_blender_bone_cluster_key(armature, mesh, bone): + """Return bone's cluster key.""" + return "|".join((get_blenderID_key(armature), get_blenderID_key(mesh), + get_blenderID_key(bone), "SubDeformerCluster")) + + +def get_blender_anim_id_base(scene, ref_id): + if ref_id is not None: + return get_blenderID_key(scene) + "|" + get_blenderID_key(ref_id) + else: + return get_blenderID_key(scene) + + +def get_blender_anim_stack_key(scene, ref_id): + """Return single anim stack key.""" + return get_blender_anim_id_base(scene, ref_id) + "|AnimStack" + + +def get_blender_anim_layer_key(scene, ref_id): + """Return ID's anim layer key.""" + return get_blender_anim_id_base(scene, ref_id) + "|AnimLayer" + + +def get_blender_anim_curve_node_key(scene, ref_id, obj_key, fbx_prop_name): + """Return (stack/layer, ID, fbxprop) curve node key.""" + return "|".join((get_blender_anim_id_base(scene, ref_id), obj_key, fbx_prop_name, "AnimCurveNode")) + + +def get_blender_anim_curve_key(scene, ref_id, obj_key, fbx_prop_name, fbx_prop_item_name): + """Return (stack/layer, ID, fbxprop, item) curve key.""" + return "|".join((get_blender_anim_id_base(scene, ref_id), obj_key, fbx_prop_name, + fbx_prop_item_name, "AnimCurve")) + + +def get_blender_nodetexture_key(ma, socket_names): + return "|".join((get_blenderID_key(ma), *socket_names)) + + +# ##### Element generators. ##### + +# Note: elem may be None, in this case the element is not added to any parent. +def elem_empty(elem, name): + sub_elem = encode_bin.FBXElem(name) + if elem is not None: + elem.elems.append(sub_elem) + return sub_elem + + +def _elem_data_single(elem, name, value, func_name): + sub_elem = elem_empty(elem, name) + getattr(sub_elem, func_name)(value) + return sub_elem + + +def _elem_data_vec(elem, name, value, func_name): + sub_elem = elem_empty(elem, name) + func = getattr(sub_elem, func_name) + for v in value: + func(v) + return sub_elem + + +def elem_data_single_bool(elem, name, value): + return _elem_data_single(elem, name, value, "add_bool") + + +def elem_data_single_char(elem, name, value): + return _elem_data_single(elem, name, value, "add_char") + + +def elem_data_single_int8(elem, name, value): + return _elem_data_single(elem, name, value, "add_int8") + + +def elem_data_single_int16(elem, name, value): + return _elem_data_single(elem, name, value, "add_int16") + + +def elem_data_single_int32(elem, name, value): + return _elem_data_single(elem, name, value, "add_int32") + + +def elem_data_single_int64(elem, name, value): + return _elem_data_single(elem, name, value, "add_int64") + + +def elem_data_single_float32(elem, name, value): + return _elem_data_single(elem, name, value, "add_float32") + + +def elem_data_single_float64(elem, name, value): + return _elem_data_single(elem, name, value, "add_float64") + + +def elem_data_single_bytes(elem, name, value): + return _elem_data_single(elem, name, value, "add_bytes") + + +def elem_data_single_string(elem, name, value): + return _elem_data_single(elem, name, value, "add_string") + + +def elem_data_single_string_unicode(elem, name, value): + return _elem_data_single(elem, name, value, "add_string_unicode") + + +def elem_data_single_bool_array(elem, name, value): + return _elem_data_single(elem, name, value, "add_bool_array") + + +def elem_data_single_int32_array(elem, name, value): + return _elem_data_single(elem, name, value, "add_int32_array") + + +def elem_data_single_int64_array(elem, name, value): + return _elem_data_single(elem, name, value, "add_int64_array") + + +def elem_data_single_float32_array(elem, name, value): + return _elem_data_single(elem, name, value, "add_float32_array") + + +def elem_data_single_float64_array(elem, name, value): + return _elem_data_single(elem, name, value, "add_float64_array") + + +def elem_data_single_byte_array(elem, name, value): + return _elem_data_single(elem, name, value, "add_byte_array") + + +def elem_data_vec_float64(elem, name, value): + return _elem_data_vec(elem, name, value, "add_float64") + + +# ##### Generators for standard FBXProperties70 properties. ##### + +def elem_properties(elem): + return elem_empty(elem, b"Properties70") + + +# Properties definitions, format: (b"type_1", b"label(???)", "name_set_value_1", "name_set_value_2", ...) +# XXX Looks like there can be various variations of formats here... Will have to be checked ultimately! +# Also, those "custom" types like 'FieldOfView' or 'Lcl Translation' are pure nonsense, +# these are just Vector3D ultimately... *sigh* (again). +FBX_PROPERTIES_DEFINITIONS = { + # Generic types. + "p_bool": (b"bool", b"", "add_int32"), # Yes, int32 for a bool (and they do have a core bool type)!!! + "p_integer": (b"int", b"Integer", "add_int32"), + "p_ulonglong": (b"ULongLong", b"", "add_int64"), + "p_double": (b"double", b"Number", "add_float64"), # Non-animatable? + "p_number": (b"Number", b"", "add_float64"), # Animatable-only? + "p_enum": (b"enum", b"", "add_int32"), + "p_vector_3d": (b"Vector3D", b"Vector", "add_float64", "add_float64", "add_float64"), # Non-animatable? + "p_vector": (b"Vector", b"", "add_float64", "add_float64", "add_float64"), # Animatable-only? + "p_color_rgb": (b"ColorRGB", b"Color", "add_float64", "add_float64", "add_float64"), # Non-animatable? + "p_color": (b"Color", b"", "add_float64", "add_float64", "add_float64"), # Animatable-only? + "p_string": (b"KString", b"", "add_string_unicode"), + "p_string_url": (b"KString", b"Url", "add_string_unicode"), + "p_timestamp": (b"KTime", b"Time", "add_int64"), + "p_datetime": (b"DateTime", b"", "add_string_unicode"), + # Special types. + "p_object": (b"object", b""), # XXX Check this! No value for this prop??? Would really like to know how it works! + "p_compound": (b"Compound", b""), + # Specific types (sic). + # ## Objects (Models). + "p_lcl_translation": (b"Lcl Translation", b"", "add_float64", "add_float64", "add_float64"), + "p_lcl_rotation": (b"Lcl Rotation", b"", "add_float64", "add_float64", "add_float64"), + "p_lcl_scaling": (b"Lcl Scaling", b"", "add_float64", "add_float64", "add_float64"), + "p_visibility": (b"Visibility", b"", "add_float64"), + "p_visibility_inheritance": (b"Visibility Inheritance", b"", "add_int32"), + # ## Cameras!!! + "p_roll": (b"Roll", b"", "add_float64"), + "p_opticalcenterx": (b"OpticalCenterX", b"", "add_float64"), + "p_opticalcentery": (b"OpticalCenterY", b"", "add_float64"), + "p_fov": (b"FieldOfView", b"", "add_float64"), + "p_fov_x": (b"FieldOfViewX", b"", "add_float64"), + "p_fov_y": (b"FieldOfViewY", b"", "add_float64"), +} + + +def _elem_props_set(elem, ptype, name, value, flags): + p = elem_data_single_string(elem, b"P", name) + for t in ptype[:2]: + p.add_string(t) + p.add_string(flags) + if len(ptype) == 3: + getattr(p, ptype[2])(value) + elif len(ptype) > 3: + # We assume value is iterable, else it's a bug! + for callback, val in zip(ptype[2:], value): + getattr(p, callback)(val) + + +def _elem_props_flags(animatable, animated, custom): + # XXX: There are way more flags, see + # http://help.autodesk.com/view/FBX/2015/ENU/?guid=__cpp_ref_class_fbx_property_flags_html + # Unfortunately, as usual, no doc at all about their 'translation' in actual FBX file format. + # Curse you-know-who. + if animatable: + if animated: + if custom: + return b"A+U" + return b"A+" + if custom: + # Seems that customprops always need those 'flags', see T69554. Go figure... + return b"A+U" + return b"A" + if custom: + # Seems that customprops always need those 'flags', see T69554. Go figure... + return b"A+U" + return b"" + + +def elem_props_set(elem, ptype, name, value=None, animatable=False, animated=False, custom=False): + ptype = FBX_PROPERTIES_DEFINITIONS[ptype] + _elem_props_set(elem, ptype, name, value, _elem_props_flags(animatable, animated, custom)) + + +def elem_props_compound(elem, cmpd_name, custom=False): + def _setter(ptype, name, value, animatable=False, animated=False, custom=False): + name = cmpd_name + b"|" + name + elem_props_set(elem, ptype, name, value, animatable=animatable, animated=animated, custom=custom) + + elem_props_set(elem, "p_compound", cmpd_name, custom=custom) + return _setter + + +def elem_props_template_init(templates, template_type): + """ + Init a writing template of given type, for *one* element's properties. + """ + ret = {} + tmpl = templates.get(template_type) + if tmpl is not None: + written = tmpl.written[0] + props = tmpl.properties + ret = {name: [val, ptype, anim, written] for name, (val, ptype, anim) in props.items()} + return ret + + +def elem_props_template_set(template, elem, ptype_name, name, value, animatable=False, animated=False): + """ + Only add a prop if the same value is not already defined in given template. + Note it is important to not give iterators as value, here! + """ + ptype = FBX_PROPERTIES_DEFINITIONS[ptype_name] + if len(ptype) > 3: + value = tuple(value) + tmpl_val, tmpl_ptype, tmpl_animatable, tmpl_written = template.get(name, (None, None, False, False)) + # Note animatable flag from template takes precedence over given one, if applicable. + # However, animated properties are always written, since they cannot match their template! + if tmpl_ptype is not None and not animated: + if (tmpl_written and + ((len(ptype) == 3 and (tmpl_val, tmpl_ptype) == (value, ptype_name)) or + (len(ptype) > 3 and (tuple(tmpl_val), tmpl_ptype) == (value, ptype_name)))): + return # Already in template and same value. + _elem_props_set(elem, ptype, name, value, _elem_props_flags(tmpl_animatable, animated, False)) + template[name][3] = True + else: + _elem_props_set(elem, ptype, name, value, _elem_props_flags(animatable, animated, False)) + + +def elem_props_template_finalize(template, elem): + """ + Finalize one element's template/props. + Issue is, some templates might be "needed" by different types (e.g. NodeAttribute is for lights, cameras, etc.), + but values for only *one* subtype can be written as template. So we have to be sure we write those for the other + subtypes in each and every elements, if they are not overridden by that element. + Yes, hairy, FBX that is to say. When they could easily support several subtypes per template... :( + """ + for name, (value, ptype_name, animatable, written) in template.items(): + if written: + continue + ptype = FBX_PROPERTIES_DEFINITIONS[ptype_name] + _elem_props_set(elem, ptype, name, value, _elem_props_flags(animatable, False, False)) + + +# ##### Templates ##### +# TODO: check all those "default" values, they should match Blender's default as much as possible, I guess? + +FBXTemplate = namedtuple("FBXTemplate", ("type_name", "prop_type_name", "properties", "nbr_users", "written")) + + +def fbx_templates_generate(root, fbx_templates): + # We may have to gather different templates in the same node (e.g. NodeAttribute template gathers properties + # for Lights, Cameras, LibNodes, etc.). + ref_templates = {(tmpl.type_name, tmpl.prop_type_name): tmpl for tmpl in fbx_templates.values()} + + templates = {} + for type_name, prop_type_name, properties, nbr_users, _written in fbx_templates.values(): + tmpl = templates.setdefault(type_name, [{}, 0]) + tmpl[0][prop_type_name] = (properties, nbr_users) + tmpl[1] += nbr_users + + for type_name, (subprops, nbr_users) in templates.items(): + template = elem_data_single_string(root, b"ObjectType", type_name) + elem_data_single_int32(template, b"Count", nbr_users) + + if len(subprops) == 1: + prop_type_name, (properties, _nbr_sub_type_users) = next(iter(subprops.items())) + subprops = (prop_type_name, properties) + ref_templates[(type_name, prop_type_name)].written[0] = True + else: + # Ack! Even though this could/should work, looks like it is not supported. So we have to chose one. :| + max_users = max_props = -1 + written_prop_type_name = None + for prop_type_name, (properties, nbr_sub_type_users) in subprops.items(): + if nbr_sub_type_users > max_users or (nbr_sub_type_users == max_users and len(properties) > max_props): + max_users = nbr_sub_type_users + max_props = len(properties) + written_prop_type_name = prop_type_name + subprops = (written_prop_type_name, properties) + ref_templates[(type_name, written_prop_type_name)].written[0] = True + + prop_type_name, properties = subprops + if prop_type_name and properties: + elem = elem_data_single_string(template, b"PropertyTemplate", prop_type_name) + props = elem_properties(elem) + for name, (value, ptype, animatable) in properties.items(): + try: + elem_props_set(props, ptype, name, value, animatable=animatable) + except Exception as e: + print("Failed to write template prop (%r)" % e) + print(props, ptype, name, value, animatable) + + +# ##### FBX animation helpers. ##### + + +class AnimationCurveNodeWrapper: + """ + This class provides a same common interface for all (FBX-wise) AnimationCurveNode and AnimationCurve elements, + and easy API to handle those. + """ + __slots__ = ( + 'elem_keys', 'default_values', 'fbx_group', 'fbx_gname', 'fbx_props', + 'force_keying', 'force_startend_keying', + '_frame_times_array', '_frame_values_array', '_frame_write_mask_array', + ) + + kinds = { + 'LCL_TRANSLATION': ("Lcl Translation", "T", ("X", "Y", "Z")), + 'LCL_ROTATION': ("Lcl Rotation", "R", ("X", "Y", "Z")), + 'LCL_SCALING': ("Lcl Scaling", "S", ("X", "Y", "Z")), + 'SHAPE_KEY': ("DeformPercent", "DeformPercent", ("DeformPercent",)), + 'CAMERA_FOCAL': ("FocalLength", "FocalLength", ("FocalLength",)), + 'CAMERA_FOCUS_DISTANCE': ("FocusDistance", "FocusDistance", ("FocusDistance",)), + } + + def __init__(self, elem_key, kind, force_keying, force_startend_keying, default_values=...): + self.elem_keys = [elem_key] + assert(kind in self.kinds) + self.fbx_group = [self.kinds[kind][0]] + self.fbx_gname = [self.kinds[kind][1]] + self.fbx_props = [self.kinds[kind][2]] + self.force_keying = force_keying + self.force_startend_keying = force_startend_keying + self._frame_times_array = None + self._frame_values_array = None + self._frame_write_mask_array = None + if default_values is not ...: + assert(len(default_values) == len(self.fbx_props[0])) + self.default_values = default_values + else: + self.default_values = (0.0) * len(self.fbx_props[0]) + + def __bool__(self): + # We are 'True' if we do have some validated keyframes... + return self._frame_write_mask_array is not None and bool(np.any(self._frame_write_mask_array)) + + def add_group(self, elem_key, fbx_group, fbx_gname, fbx_props): + """ + Add another whole group stuff (curvenode, animated item/prop + curvnode/curve identifiers). + E.g. Shapes animations is written twice, houra! + """ + assert(len(fbx_props) == len(self.fbx_props[0])) + self.elem_keys.append(elem_key) + self.fbx_group.append(fbx_group) + self.fbx_gname.append(fbx_gname) + self.fbx_props.append(fbx_props) + + def set_keyframes(self, keyframe_times, keyframe_values): + """ + Set all keyframe times and values of the group. + Values can be a 2D array where each row is the values for a separate curve. + """ + # View 1D keyframe_values as 2D with a single row, so that the same code can be used for both 1D and + # 2D inputs. + if len(keyframe_values.shape) == 1: + keyframe_values = keyframe_values[np.newaxis] + # There must be a time for each column of values. + assert(len(keyframe_times) == keyframe_values.shape[1]) + # There must be as many rows of values as there are properties. + assert(len(self.fbx_props[0]) == len(keyframe_values)) + write_mask = np.full_like(keyframe_values, True, dtype=bool) # write everything by default + self._frame_times_array = keyframe_times + self._frame_values_array = keyframe_values + self._frame_write_mask_array = write_mask + + def simplify(self, fac, step, force_keep=False): + """ + Simplifies sampled curves by only enabling samples when: + * their values relatively differ from the previous sample ones. + """ + if self._frame_times_array is None: + # Keyframes have not been added yet. + return + + if fac == 0.0: + return + + # So that, with default factor and step values (1), we get: + min_reldiff_fac = fac * 1.0e-3 # min relative value evolution: 0.1% of current 'order of magnitude'. + min_absdiff_fac = 0.1 # A tenth of reldiff... + + # Initialise to no values enabled for writing. + self._frame_write_mask_array[:] = False + + # Values are enabled for writing if they differ enough from either of their adjacent values or if they differ + # enough from the closest previous value that is enabled due to either of these conditions. + for sampled_values, enabled_mask in zip(self._frame_values_array, self._frame_write_mask_array): + # Create overlapping views of the 'previous' (all but the last) and 'current' (all but the first) + # `sampled_values` and `enabled_mask`. + # Calculate absolute values from `sampled_values` so that the 'previous' and 'current' absolute arrays can + # be views into the same array instead of separately calculated arrays. + abs_sampled_values = np.abs(sampled_values) + # 'previous' views. + p_val_view = sampled_values[:-1] + p_abs_val_view = abs_sampled_values[:-1] + p_enabled_mask_view = enabled_mask[:-1] + # 'current' views. + c_val_view = sampled_values[1:] + c_abs_val_view = abs_sampled_values[1:] + c_enabled_mask_view = enabled_mask[1:] + + # If enough difference from previous sampled value, enable the current value *and* the previous one! + # The difference check is symmetrical, so this will compare each value to both of its adjacent values. + # Unless it is forcefully enabled later, this is the only way that the first value can be enabled. + # This is a contracted form of relative + absolute-near-zero difference: + # def is_different(a, b): + # abs_diff = abs(a - b) + # if abs_diff < min_reldiff_fac * min_absdiff_fac: + # return False + # return (abs_diff / ((abs(a) + abs(b)) / 2)) > min_reldiff_fac + # Note that we ignore the '/ 2' part here, since it's not much significant for us. + # Contracted form using only builtin Python functions: + # return abs(a - b) > (min_reldiff_fac * max(abs(a) + abs(b), min_absdiff_fac)) + abs_diff = np.abs(c_val_view - p_val_view) + different_if_greater_than = min_reldiff_fac * np.maximum(c_abs_val_view + p_abs_val_view, min_absdiff_fac) + enough_diff_p_val_mask = abs_diff > different_if_greater_than + # Enable both the current values *and* the previous values where `enough_diff_p_val_mask` is True. Some + # values may get set to True twice because the views overlap, but this is not a problem. + p_enabled_mask_view[enough_diff_p_val_mask] = True + c_enabled_mask_view[enough_diff_p_val_mask] = True + + # Else, if enough difference from previous enabled value, enable the current value only! + # For each 'current' value, get the index of the nearest previous enabled value in `sampled_values` (or + # itself if the value is enabled). + # Start with an array that is the index of the 'current' value in `sampled_values`. The 'current' values are + # all but the first value, so the indices will be from 1 to `len(sampled_values)` exclusive. + # Let len(sampled_values) == 9: + # [1, 2, 3, 4, 5, 6, 7, 8] + p_enabled_idx_in_sampled_values = np.arange(1, len(sampled_values)) + # Replace the indices of all disabled values with 0 in preparation of filling them in with the index of the + # nearest previous enabled value. We choose to replace with 0 so that if there is no nearest previous + # enabled value, we instead default to `sampled_values[0]`. + c_val_disabled_mask = ~c_enabled_mask_view + # Let `c_val_disabled_mask` be: + # [F, F, T, F, F, T, T, T] + # Set indices to 0 where `c_val_disabled_mask` is True: + # [1, 2, 3, 4, 5, 6, 7, 8] + # v v v v + # [1, 2, 0, 4, 5, 0, 0, 0] + p_enabled_idx_in_sampled_values[c_val_disabled_mask] = 0 + # Accumulative maximum travels across the array from left to right, filling in the zeroed indices with the + # maximum value so far, which will be the closest previous enabled index because the non-zero indices are + # strictly increasing. + # [1, 2, 0, 4, 5, 0, 0, 0] + # v v v v + # [1, 2, 2, 4, 5, 5, 5, 5] + p_enabled_idx_in_sampled_values = np.maximum.accumulate(p_enabled_idx_in_sampled_values) + # Only disabled values need to be checked against their nearest previous enabled values. + # We can additionally ignore all values which equal their immediately previous value because those values + # will never be enabled if they were not enabled by the earlier difference check against immediately + # previous values. + p_enabled_diff_to_check_mask = np.logical_and(c_val_disabled_mask, p_val_view != c_val_view) + # Convert from a mask to indices because we need the indices later and because the array of indices will + # usually be smaller than the mask array making it faster to index other arrays with. + p_enabled_diff_to_check_idx = np.flatnonzero(p_enabled_diff_to_check_mask) + # `p_enabled_idx_in_sampled_values` from earlier: + # [1, 2, 2, 4, 5, 5, 5, 5] + # `p_enabled_diff_to_check_mask` assuming no values equal their immediately previous value: + # [F, F, T, F, F, T, T, T] + # `p_enabled_diff_to_check_idx`: + # [ 2, 5, 6, 7] + # `p_enabled_idx_in_sampled_values_to_check`: + # [ 2, 5, 5, 5] + p_enabled_idx_in_sampled_values_to_check = p_enabled_idx_in_sampled_values[p_enabled_diff_to_check_idx] + # Get the 'current' disabled values that need to be checked. + c_val_to_check = c_val_view[p_enabled_diff_to_check_idx] + c_abs_val_to_check = c_abs_val_view[p_enabled_diff_to_check_idx] + # Get the nearest previous enabled value for each value to be checked. + nearest_p_enabled_val = sampled_values[p_enabled_idx_in_sampled_values_to_check] + abs_nearest_p_enabled_val = np.abs(nearest_p_enabled_val) + # Check the relative + absolute-near-zero difference again, but against the nearest previous enabled value + # this time. + abs_diff = np.abs(c_val_to_check - nearest_p_enabled_val) + different_if_greater_than = (min_reldiff_fac + * np.maximum(c_abs_val_to_check + abs_nearest_p_enabled_val, min_absdiff_fac)) + enough_diff_p_enabled_val_mask = abs_diff > different_if_greater_than + # If there are any that are different enough from the previous enabled value, then we have to check them all + # iteratively because enabling a new value can change the nearest previous enabled value of some elements, + # which changes their relative + absolute-near-zero difference: + # `p_enabled_diff_to_check_idx`: + # [2, 5, 6, 7] + # `p_enabled_idx_in_sampled_values_to_check`: + # [2, 5, 5, 5] + # Let `enough_diff_p_enabled_val_mask` be: + # [F, F, T, T] + # The first index that is newly enabled is 6: + # [2, 5,>6<,5] + # But 6 > 5, so the next value's nearest previous enabled index is also affected: + # [2, 5, 6,>6<] + # We had calculated a newly enabled index of 7 too, but that was calculated against the old nearest previous + # enabled index of 5, which has now been updated to 6, so whether 7 is enabled or not needs to be + # recalculated: + # [F, F, T, ?] + if np.any(enough_diff_p_enabled_val_mask): + # Accessing .data, the memoryview of the array, iteratively or by individual index is faster than doing + # the same with the array itself. + zipped = zip(p_enabled_diff_to_check_idx.data, + c_val_to_check.data, + c_abs_val_to_check.data, + p_enabled_idx_in_sampled_values_to_check.data, + enough_diff_p_enabled_val_mask.data) + # While iterating, we could set updated values into `enough_diff_p_enabled_val_mask` as we go and then + # update `enabled_mask` in bulk after the iteration, but if we're going to update an array while + # iterating, we may as well update `enabled_mask` directly instead and skip the bulk update. + # Additionally, the number of `True` writes to `enabled_mask` is usually much less than the number of + # updates that would be required to `enough_diff_p_enabled_val_mask`. + c_enabled_mask_view_mv = c_enabled_mask_view.data + + # While iterating, keep track of the most recent newly enabled index, so we can tell when we need to + # recalculate whether the current value needs to be enabled. + new_p_enabled_idx = -1 + # Keep track of its value too for performance. + new_p_enabled_val = -1 + new_abs_p_enabled_val = -1 + for cur_idx, c_val, c_abs_val, old_p_enabled_idx, enough_diff in zipped: + if new_p_enabled_idx > old_p_enabled_idx: + # The nearest previous enabled value is newly enabled and was not included when + # `enough_diff_p_enabled_val_mask` was calculated, so whether the current value is different + # enough needs to be recalculated using the newly enabled value. + # Check if the relative + absolute-near-zero difference is enough to enable this value. + enough_diff = (abs(c_val - new_p_enabled_val) + > (min_reldiff_fac * max(c_abs_val + new_abs_p_enabled_val, min_absdiff_fac))) + if enough_diff: + # The current value needs to be enabled. + c_enabled_mask_view_mv[cur_idx] = True + # Update the index and values for this newly enabled value. + new_p_enabled_idx = cur_idx + new_p_enabled_val = c_val + new_abs_p_enabled_val = c_abs_val + + # If we write nothing (action doing nothing) and are in 'force_keep' mode, we key everything! :P + # See T41766. + # Also, it seems some importers (e.g. UE4) do not handle correctly armatures where some bones + # are not animated, but are children of animated ones, so added an option to systematically force writing + # one key in this case. + # See T41719, T41605, T41254... + if self.force_keying or (force_keep and not self): + are_keyed = [True] * len(self._frame_write_mask_array) + else: + are_keyed = np.any(self._frame_write_mask_array, axis=1) + + # If we did key something, ensure first and last sampled values are keyed as well. + if self.force_startend_keying: + for is_keyed, frame_write_mask in zip(are_keyed, self._frame_write_mask_array): + if is_keyed: + frame_write_mask[:1] = True + frame_write_mask[-1:] = True + + def get_final_data(self, scene, ref_id, force_keep=False): + """ + Yield final anim data for this 'curvenode' (for all curvenodes defined). + force_keep is to force to keep a curve even if it only has one valid keyframe. + """ + curves = [ + (self._frame_times_array[write_mask], values[write_mask]) + for values, write_mask in zip(self._frame_values_array, self._frame_write_mask_array) + ] + + force_keep = force_keep or self.force_keying + for elem_key, fbx_group, fbx_gname, fbx_props in \ + zip(self.elem_keys, self.fbx_group, self.fbx_gname, self.fbx_props): + group_key = get_blender_anim_curve_node_key(scene, ref_id, elem_key, fbx_group) + group = {} + for c, def_val, fbx_item in zip(curves, self.default_values, fbx_props): + fbx_item = FBX_ANIM_PROPSGROUP_NAME + "|" + fbx_item + curve_key = get_blender_anim_curve_key(scene, ref_id, elem_key, fbx_group, fbx_item) + # (curve key, default value, keyframes, write flag). + times = c[0] + write_flag = len(times) > (0 if force_keep else 1) + group[fbx_item] = (curve_key, def_val, c, write_flag) + yield elem_key, group_key, group, fbx_group, fbx_gname + + +# ##### FBX objects generators. ##### + +# FBX Model-like data (i.e. Blender objects, depsgraph instances and bones) are wrapped in ObjectWrapper. +# This allows us to have a (nearly) same code FBX-wise for all those types. +# The wrapper tries to stay as small as possible, by mostly using callbacks (property(get...)) +# to actual Blender data it contains. +# Note it caches its instances, so that you may call several times ObjectWrapper(your_object) +# with a minimal cost (just re-computing the key). + +class MetaObjectWrapper(type): + def __call__(cls, bdata, armature=None): + if bdata is None: + return None + dup_mat = None + if isinstance(bdata, Object): + key = get_blenderID_key(bdata) + elif isinstance(bdata, DepsgraphObjectInstance): + if bdata.is_instance: + key = "|".join((get_blenderID_key((bdata.parent.original, bdata.instance_object.original)), + cls._get_dup_num_id(bdata))) + dup_mat = bdata.matrix_world.copy() + else: + key = get_blenderID_key(bdata.object.original) + else: # isinstance(bdata, (Bone, PoseBone)): + if isinstance(bdata, PoseBone): + bdata = armature.data.bones[bdata.name] + key = get_blenderID_key((armature, bdata)) + + cache = getattr(cls, "_cache", None) + if cache is None: + cache = cls._cache = {} + instance = cache.get(key) + if instance is not None: + # Duplis hack: since dupli instances are not persistent in Blender (we have to re-create them to get updated + # info like matrix...), we *always* need to reset that matrix when calling ObjectWrapper() (all + # other data is supposed valid during whole cache live span, so we can skip resetting it). + instance._dupli_matrix = dup_mat + return instance + + instance = cls.__new__(cls, bdata, armature) + instance.__init__(bdata, armature) + instance.key = key + instance._dupli_matrix = dup_mat + cache[key] = instance + return instance + + +class ObjectWrapper(metaclass=MetaObjectWrapper): + """ + This class provides a same common interface for all (FBX-wise) object-like elements: + * Blender Object + * Blender Bone and PoseBone + * Blender DepsgraphObjectInstance (for dulis). + Note since a same Blender object might be 'mapped' to several FBX models (esp. with duplis), + we need to use a key to identify each. + """ + __slots__ = ( + 'name', 'key', 'bdata', 'parented_to_armature', 'override_materials', + '_tag', '_ref', '_dupli_matrix' + ) + + @classmethod + def cache_clear(cls): + if hasattr(cls, "_cache"): + del cls._cache + + @staticmethod + def _get_dup_num_id(bdata): + INVALID_IDS = {2147483647, 0} + pids = tuple(bdata.persistent_id) + idx_valid = 0 + prev_i = ... + for idx, i in enumerate(pids[::-1]): + if i not in INVALID_IDS or (idx == len(pids) and i == 0 and prev_i != 0): + idx_valid = len(pids) - idx + break + prev_i = i + return ".".join(str(i) for i in pids[:idx_valid]) + + def __init__(self, bdata, armature=None): + """ + bdata might be an Object (deprecated), DepsgraphObjectInstance, Bone or PoseBone. + If Bone or PoseBone, armature Object must be provided. + """ + # Note: DepsgraphObjectInstance are purely runtime data, they become invalid as soon as we step to the next item! + # Hence we have to immediately copy *all* needed data... + if isinstance(bdata, Object): # DEPRECATED + self._tag = 'OB' + self.name = get_blenderID_name(bdata) + self.bdata = bdata + self._ref = None + elif isinstance(bdata, DepsgraphObjectInstance): + if bdata.is_instance: + # Note that dupli instance matrix is set by meta-class initialization. + self._tag = 'DP' + self.name = "|".join((get_blenderID_name((bdata.parent.original, bdata.instance_object.original)), + "Dupli", self._get_dup_num_id(bdata))) + self.bdata = bdata.instance_object.original + self._ref = bdata.parent.original + else: + self._tag = 'OB' + self.name = get_blenderID_name(bdata) + self.bdata = bdata.object.original + self._ref = None + else: # isinstance(bdata, (Bone, PoseBone)): + if isinstance(bdata, PoseBone): + bdata = armature.data.bones[bdata.name] + self._tag = 'BO' + self.name = get_blenderID_name(bdata) + self.bdata = bdata + self._ref = armature + self.parented_to_armature = False + self.override_materials = None + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.key == other.key + + def __hash__(self): + return hash(self.key) + + def __repr__(self): + return self.key + + # #### Common to all _tag values. + def get_fbx_uuid(self): + return get_fbx_uuid_from_key(self.key) + fbx_uuid = property(get_fbx_uuid) + + # XXX Not sure how much that’s useful now... :/ + def get_hide(self): + return self.bdata.hide_viewport if self._tag in {'OB', 'DP'} else self.bdata.hide + hide = property(get_hide) + + def get_parent(self): + if self._tag == 'OB': + if (self.bdata.parent and self.bdata.parent.type == 'ARMATURE' and + self.bdata.parent_type == 'BONE' and self.bdata.parent_bone): + # Try to parent to a bone. + bo_par = self.bdata.parent.pose.bones.get(self.bdata.parent_bone, None) + if (bo_par): + return ObjectWrapper(bo_par, self.bdata.parent) + else: # Fallback to mere object parenting. + return ObjectWrapper(self.bdata.parent) + else: + # Mere object parenting. + return ObjectWrapper(self.bdata.parent) + elif self._tag == 'DP': + return ObjectWrapper(self._ref) + else: # self._tag == 'BO' + return ObjectWrapper(self.bdata.parent, self._ref) or ObjectWrapper(self._ref) + parent = property(get_parent) + + def get_bdata_pose_bone(self): + if self._tag == 'BO': + return self._ref.pose.bones[self.bdata.name] + return None + bdata_pose_bone = property(get_bdata_pose_bone) + + def get_matrix_local(self): + if self._tag == 'OB': + return self.bdata.matrix_local.copy() + elif self._tag == 'DP': + return self._ref.matrix_world.inverted_safe() @ self._dupli_matrix + else: # 'BO', current pose + # PoseBone.matrix is in armature space, bring in back in real local one! + par = self.bdata.parent + par_mat_inv = self._ref.pose.bones[par.name].matrix.inverted_safe() if par else Matrix() + return par_mat_inv @ self._ref.pose.bones[self.bdata.name].matrix + matrix_local = property(get_matrix_local) + + def get_matrix_global(self): + if self._tag == 'OB': + return self.bdata.matrix_world.copy() + elif self._tag == 'DP': + return self._dupli_matrix + else: # 'BO', current pose + return self._ref.matrix_world @ self._ref.pose.bones[self.bdata.name].matrix + matrix_global = property(get_matrix_global) + + def get_matrix_rest_local(self): + if self._tag == 'BO': + # Bone.matrix_local is in armature space, bring in back in real local one! + par = self.bdata.parent + par_mat_inv = par.matrix_local.inverted_safe() if par else Matrix() + return par_mat_inv @ self.bdata.matrix_local + else: + return self.matrix_local.copy() + matrix_rest_local = property(get_matrix_rest_local) + + def get_matrix_rest_global(self): + if self._tag == 'BO': + return self._ref.matrix_world @ self.bdata.matrix_local + else: + return self.matrix_global.copy() + matrix_rest_global = property(get_matrix_rest_global) + + # #### Transform and helpers + def has_valid_parent(self, objects): + par = self.parent + if par in objects: + if self._tag == 'OB': + par_type = self.bdata.parent_type + if par_type in {'OBJECT', 'BONE'}: + return True + else: + print("Sorry, “{}” parenting type is not supported".format(par_type)) + return False + return True + return False + + def use_bake_space_transform(self, scene_data): + # NOTE: Only applies to object types supporting this!!! Currently, only meshes and the like... + # TODO: Check whether this can work for bones too... + return (scene_data.settings.bake_space_transform and self._tag in {'OB', 'DP'} and + self.bdata.type in BLENDER_OBJECT_TYPES_MESHLIKE | {'EMPTY'}) + + def fbx_object_matrix(self, scene_data, rest=False, local_space=False, global_space=False): + """ + Generate object transform matrix (*always* in matching *FBX* space!). + If local_space is True, returned matrix is *always* in local space. + Else if global_space is True, returned matrix is always in world space. + If both local_space and global_space are False, returned matrix is in parent space if parent is valid, + else in world space. + Note local_space has precedence over global_space. + If rest is True and object is a Bone, returns matching rest pose transform instead of current pose one. + Applies specific rotation to bones, lamps and cameras (conversion Blender -> FBX). + """ + # Objects which are not bones and do not have any parent are *always* in global space + # (unless local_space is True!). + is_global = (not local_space and + (global_space or not (self._tag in {'DP', 'BO'} or self.has_valid_parent(scene_data.objects)))) + + # Objects (meshes!) parented to armature are not parented to anything in FBX, hence we need them + # in global space, which is their 'virtual' local space... + is_global = is_global or self.parented_to_armature + + # Since we have to apply corrections to some types of object, we always need local Blender space here... + matrix = self.matrix_rest_local if rest else self.matrix_local + parent = self.parent + + # Bones, lamps and cameras need to be rotated (in local space!). + if self._tag == 'BO': + # If we have a bone parent we need to undo the parent correction. + if not is_global and scene_data.settings.bone_correction_matrix_inv and parent and parent.is_bone: + matrix = scene_data.settings.bone_correction_matrix_inv @ matrix + # Apply the bone correction. + if scene_data.settings.bone_correction_matrix: + matrix = matrix @ scene_data.settings.bone_correction_matrix + elif self.bdata.type == 'LIGHT': + matrix = matrix @ MAT_CONVERT_LIGHT + elif self.bdata.type == 'CAMERA': + matrix = matrix @ MAT_CONVERT_CAMERA + + if self._tag in {'DP', 'OB'} and parent: + if parent._tag == 'BO': + # In bone parent case, we get transformation in **bone tip** space (sigh). + # Have to bring it back into bone root, which is FBX expected value. + matrix = Matrix.Translation((0, (parent.bdata.tail - parent.bdata.head).length, 0)) @ matrix + + # Our matrix is in local space, time to bring it in its final desired space. + if parent: + if is_global: + # Move matrix to global Blender space. + matrix = (parent.matrix_rest_global if rest else parent.matrix_global) @ matrix + elif parent.use_bake_space_transform(scene_data): + # Blender's and FBX's local space of parent may differ if we use bake_space_transform... + # Apply parent's *Blender* local space... + matrix = (parent.matrix_rest_local if rest else parent.matrix_local) @ matrix + # ...and move it back into parent's *FBX* local space. + par_mat = parent.fbx_object_matrix(scene_data, rest=rest, local_space=True) + matrix = par_mat.inverted_safe() @ matrix + + if self.use_bake_space_transform(scene_data): + # If we bake the transforms we need to post-multiply inverse global transform. + # This means that the global transform will not apply to children of this transform. + matrix = matrix @ scene_data.settings.global_matrix_inv + if is_global: + # In any case, pre-multiply the global matrix to get it in FBX global space! + matrix = scene_data.settings.global_matrix @ matrix + + return matrix + + def fbx_object_tx(self, scene_data, rest=False, rot_euler_compat=None): + """ + Generate object transform data (always in local space when possible). + """ + matrix = self.fbx_object_matrix(scene_data, rest=rest) + loc, rot, scale = matrix.decompose() + matrix_rot = rot.to_matrix() + # quat -> euler, we always use 'XYZ' order, use ref rotation if given. + if rot_euler_compat is not None: + rot = rot.to_euler('XYZ', rot_euler_compat) + else: + rot = rot.to_euler('XYZ') + return loc, rot, scale, matrix, matrix_rot + + # #### _tag dependent... + def get_is_object(self): + return self._tag == 'OB' + is_object = property(get_is_object) + + def get_is_dupli(self): + return self._tag == 'DP' + is_dupli = property(get_is_dupli) + + def get_is_bone(self): + return self._tag == 'BO' + is_bone = property(get_is_bone) + + def get_type(self): + if self._tag in {'OB', 'DP'}: + return self.bdata.type + return ... + type = property(get_type) + + def get_armature(self): + if self._tag == 'BO': + return ObjectWrapper(self._ref) + return None + armature = property(get_armature) + + def get_bones(self): + if self._tag == 'OB' and self.bdata.type == 'ARMATURE': + return (ObjectWrapper(bo, self.bdata) for bo in self.bdata.data.bones) + return () + bones = property(get_bones) + + def get_materials(self): + override_materials = self.override_materials + if override_materials is not None: + return override_materials + if self._tag in {'OB', 'DP'}: + return tuple(slot.material for slot in self.bdata.material_slots) + return () + materials = property(get_materials) + + def is_deformed_by_armature(self, arm_obj): + if not (self.is_object and self.type == 'MESH'): + return False + if self.parent == arm_obj and self.bdata.parent_type == 'ARMATURE': + return True + for mod in self.bdata.modifiers: + if mod.type == 'ARMATURE' and mod.object == arm_obj.bdata: + return True + + # #### Duplis... + def dupli_list_gen(self, depsgraph): + if self._tag == 'OB' and self.bdata.is_instancer: + return (ObjectWrapper(dup) for dup in depsgraph.object_instances + if dup.parent and ObjectWrapper(dup.parent.original) == self) + return () + + +def fbx_name_class(name, cls): + return FBX_NAME_CLASS_SEP.join((name, cls)) + + +# ##### Top-level FBX data container. ##### + +# Helper sub-container gathering all exporter settings related to media (texture files). +FBXExportSettingsMedia = namedtuple("FBXExportSettingsMedia", ( + "path_mode", "base_src", "base_dst", "subdir", + "embed_textures", "copy_set", "embedded_set", +)) + +# Helper container gathering all exporter settings. +FBXExportSettings = namedtuple("FBXExportSettings", ( + "report", "to_axes", "global_matrix", "global_scale", "apply_unit_scale", "unit_scale", + "bake_space_transform", "global_matrix_inv", "global_matrix_inv_transposed", + "context_objects", "object_types", "use_mesh_modifiers", "use_mesh_modifiers_render", + "mesh_smooth_type", "use_subsurf", "use_mesh_edges", "use_tspace", "use_triangles", + "armature_nodetype", "use_armature_deform_only", "add_leaf_bones", + "bone_correction_matrix", "bone_correction_matrix_inv", + "bake_anim", "bake_anim_use_all_bones", "bake_anim_use_nla_strips", "bake_anim_use_all_actions", + "bake_anim_step", "bake_anim_simplify_factor", "bake_anim_force_startend_keying", + "use_metadata", "media_settings", "use_custom_props", "colors_type", "prioritize_active_color" +)) + +# Helper container gathering some data we need multiple times: +# * templates. +# * settings, scene. +# * objects. +# * object data. +# * skinning data (binding armature/mesh). +# * animations. +FBXExportData = namedtuple("FBXExportData", ( + "templates", "templates_users", "connections", + "settings", "scene", "depsgraph", "objects", "animations", "animated", "frame_start", "frame_end", + "data_empties", "data_lights", "data_cameras", "data_meshes", "mesh_material_indices", + "data_bones", "data_leaf_bones", "data_deformers_skin", "data_deformers_shape", + "data_world", "data_materials", "data_textures", "data_videos", +)) + +# Helper container gathering all importer settings. +FBXImportSettings = namedtuple("FBXImportSettings", ( + "report", "to_axes", "global_matrix", "global_scale", + "bake_space_transform", "global_matrix_inv", "global_matrix_inv_transposed", + "use_custom_normals", "use_image_search", + "use_alpha_decals", "decal_offset", + "use_anim", "anim_offset", + "use_subsurf", + "use_custom_props", "use_custom_props_enum_as_string", + "nodal_material_wrap_map", "image_cache", + "ignore_leaf_bones", "force_connect_children", "automatic_bone_orientation", "bone_correction_matrix", + "use_prepost_rot", "colors_type", +)) diff --git a/scripts/addons_core/io_scene_fbx/fbx_utils_threading.py b/scripts/addons_core/io_scene_fbx/fbx_utils_threading.py new file mode 100644 index 00000000000..bf7631b511d --- /dev/null +++ b/scripts/addons_core/io_scene_fbx/fbx_utils_threading.py @@ -0,0 +1,194 @@ +# SPDX-FileCopyrightText: 2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from contextlib import contextmanager, nullcontext +import os +from queue import SimpleQueue + +# Note: `bpy` cannot be imported here because this module is also used by the fbx2json.py and json2fbx.py scripts. + +# For debugging/profiling purposes, can be modified at runtime to force single-threaded execution. +_MULTITHREADING_ENABLED = True +# The concurrent.futures module may not work or may not be available on WebAssembly platforms wasm32-emscripten and +# wasm32-wasi. +try: + from concurrent.futures import ThreadPoolExecutor +except ModuleNotFoundError: + _MULTITHREADING_ENABLED = False + ThreadPoolExecutor = None +else: + try: + # The module may be available, but not be fully functional. An error may be raised when attempting to start a + # new thread. + with ThreadPoolExecutor() as tpe: + # Attempt to start a thread by submitting a callable. + tpe.submit(lambda: None) + except Exception: + # Assume that multithreading is not supported and fall back to single-threaded execution. + _MULTITHREADING_ENABLED = False + + +def get_cpu_count(): + """Get the number of cpus assigned to the current process if that information is available on this system. + If not available, get the total number of cpus. + If the cpu count is indeterminable, it is assumed that there is only 1 cpu available.""" + sched_getaffinity = getattr(os, "sched_getaffinity", None) + if sched_getaffinity is not None: + # Return the number of cpus assigned to the current process. + return len(sched_getaffinity(0)) + count = os.cpu_count() + return count if count is not None else 1 + + +class MultiThreadedTaskConsumer: + """Helper class that encapsulates everything needed to run a function on separate threads, with a single-threaded + fallback if multithreading is not available. + + Lower overhead than typical use of ThreadPoolExecutor because no Future objects are returned, which makes this class + more suitable to running many smaller tasks. + + As with any threaded parallelization, because of Python's Global Interpreter Lock, only one thread can execute + Python code at a time, so threaded parallelization is only useful when the functions used release the GIL, such as + many IO related functions.""" + # A special task value used to signal task consumer threads to shut down. + _SHUT_DOWN_THREADS = object() + + __slots__ = ("_consumer_function", "_shared_task_queue", "_task_consumer_futures", "_executor", + "_max_consumer_threads", "_shutting_down", "_max_queue_per_consumer") + + def __init__(self, consumer_function, max_consumer_threads, max_queue_per_consumer=5): + # It's recommended to use MultiThreadedTaskConsumer.new_cpu_bound_cm() instead of creating new instances + # directly. + # __init__ should only be called after checking _MULTITHREADING_ENABLED. + assert(_MULTITHREADING_ENABLED) + # The function that will be called on separate threads to consume tasks. + self._consumer_function = consumer_function + # All the threads share a single queue. This is a simplistic approach, but it is unlikely to be problematic + # unless the main thread is expected to wait a long time for the consumer threads to finish. + self._shared_task_queue = SimpleQueue() + # Reference to each thread is kept through the returned Future objects. This is used as part of determining when + # new threads should be started and is used to be able to receive and handle exceptions from the threads. + self._task_consumer_futures = [] + # Create the executor. + self._executor = ThreadPoolExecutor(max_workers=max_consumer_threads) + # Technically the max workers of the executor is accessible through its `._max_workers`, but since it's private, + # meaning it could be changed without warning, we'll store the max workers/consumers ourselves. + self._max_consumer_threads = max_consumer_threads + # The maximum task queue size (before another consumer thread is started) increases by this amount with every + # additional consumer thread. + self._max_queue_per_consumer = max_queue_per_consumer + # When shutting down the threads, this is set to True as an extra safeguard to prevent new tasks being + # scheduled. + self._shutting_down = False + + @classmethod + def new_cpu_bound_cm(cls, consumer_function, other_cpu_bound_threads_in_use=1, hard_max_threads=32): + """Return a context manager that, when entered, returns a wrapper around `consumer_function` that schedules + `consumer_function` to be run on a separate thread. + + If the system can't use multithreading, then the context manager's returned function will instead be the input + `consumer_function` argument, causing tasks to be run immediately on the calling thread. + + When exiting the context manager, it waits for all scheduled tasks to complete and prevents the creation of new + tasks, similar to calling ThreadPoolExecutor.shutdown(). For these reasons, the wrapped function should only be + called from the thread that entered the context manager, otherwise there is no guarantee that all tasks will get + scheduled before the context manager exits. + + Any task that fails with an exception will cause all task consumer threads to stop. + + The maximum number of threads used matches the number of cpus available up to a maximum of `hard_max_threads`. + `hard_max_threads`'s default of 32 matches ThreadPoolExecutor's default behaviour. + + The maximum number of threads used is decreased by `other_cpu_bound_threads_in_use`. Defaulting to `1`, assuming + that the calling thread will also be doing CPU-bound work. + + Most IO-bound tasks can probably use a ThreadPoolExecutor directly instead because there will typically be fewer + tasks and, on average, each individual task will take longer. + If needed, `cls.new_cpu_bound_cm(consumer_function, -4)` could be suitable for lots of small IO-bound tasks, + because it ensures a minimum of 5 threads, like the default ThreadPoolExecutor.""" + if _MULTITHREADING_ENABLED: + max_threads = get_cpu_count() - other_cpu_bound_threads_in_use + max_threads = min(max_threads, hard_max_threads) + if max_threads > 0: + return cls(consumer_function, max_threads)._wrap_executor_cm() + # Fall back to single-threaded. + return nullcontext(consumer_function) + + def _task_consumer_callable(self): + """Callable that is run by each task consumer thread. + Signals the other task consumer threads to stop when stopped intentionally or when an exception occurs.""" + try: + while True: + # Blocks until it can get a task. + task_args = self._shared_task_queue.get() + + if task_args is self._SHUT_DOWN_THREADS: + # This special value signals that it's time for all the threads to stop. + break + else: + # Call the task consumer function. + self._consumer_function(*task_args) + finally: + # Either the thread has been told to shut down because it received _SHUT_DOWN_THREADS or an exception has + # occurred. + # Add _SHUT_DOWN_THREADS to the queue so that the other consumer threads will also shut down. + self._shared_task_queue.put(self._SHUT_DOWN_THREADS) + + def _schedule_task(self, *args): + """Task consumer threads are only started as tasks are added. + + To mitigate starting lots of threads if many tasks are scheduled in quick succession, new threads are only + started if the number of queued tasks grows too large. + + This function is a slight misuse of ThreadPoolExecutor. Normally each task to be scheduled would be submitted + through ThreadPoolExecutor.submit, but doing so is noticeably slower for small tasks. We could start new Thread + instances manually without using ThreadPoolExecutor, but ThreadPoolExecutor gives us a higher level API for + waiting for threads to finish and handling exceptions without having to implement an API using Thread ourselves. + """ + if self._shutting_down: + # Shouldn't occur through normal usage. + raise RuntimeError("Cannot schedule new tasks after shutdown") + # Schedule the task by adding it to the task queue. + self._shared_task_queue.put(args) + # Check if more consumer threads need to be added to account for the rate at which tasks are being scheduled + # compared to the rate at which tasks are being consumed. + current_consumer_count = len(self._task_consumer_futures) + if current_consumer_count < self._max_consumer_threads: + # The max queue size increases as new threads are added, otherwise, by the time the next task is added, it's + # likely that the queue size will still be over the max, causing another new thread to be added immediately. + # Increasing the max queue size whenever a new thread is started gives some time for the new thread to start + # up and begin consuming tasks before it's determined that another thread is needed. + max_queue_size_for_current_consumers = self._max_queue_per_consumer * current_consumer_count + + if self._shared_task_queue.qsize() > max_queue_size_for_current_consumers: + # Add a new consumer thread because the queue has grown too large. + self._task_consumer_futures.append(self._executor.submit(self._task_consumer_callable)) + + @contextmanager + def _wrap_executor_cm(self): + """Wrap the executor's context manager to instead return self._schedule_task and such that the threads + automatically start shutting down before the executor itself starts shutting down.""" + # .__enter__() + # Exiting the context manager of the executor will wait for all threads to finish and prevent new + # threads from being created, as if its shutdown() method had been called. + with self._executor: + try: + yield self._schedule_task + finally: + # .__exit__() + self._shutting_down = True + # Signal all consumer threads to finish up and shut down so that the executor can shut down. + # When this is run on the same thread that schedules new tasks, this guarantees that no more tasks will + # be scheduled after the consumer threads start to shut down. + self._shared_task_queue.put(self._SHUT_DOWN_THREADS) + + # Because `self._executor` was entered with a context manager, it will wait for all the consumer threads + # to finish even if we propagate an exception from one of the threads here. + for future in self._task_consumer_futures: + # .exception() waits for the future to finish and returns its raised exception or None. + ex = future.exception() + if ex is not None: + # If one of the threads raised an exception, propagate it to the main thread. + # Only the first exception will be propagated if there were multiple. + raise ex diff --git a/scripts/addons_core/io_scene_fbx/import_fbx.py b/scripts/addons_core/io_scene_fbx/import_fbx.py new file mode 100644 index 00000000000..13daf585cba --- /dev/null +++ b/scripts/addons_core/io_scene_fbx/import_fbx.py @@ -0,0 +1,4024 @@ +# SPDX-FileCopyrightText: 2013-2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +# FBX 7.1.0 -> 7.4.0 loader for Blender + +# Not totally pep8 compliant. +# pep8 import_fbx.py --ignore=E501,E123,E702,E125 + +if "bpy" in locals(): + import importlib + if "parse_fbx" in locals(): + importlib.reload(parse_fbx) + if "fbx_utils" in locals(): + importlib.reload(fbx_utils) + +import bpy +from bpy.app.translations import pgettext_tip as tip_ +from mathutils import Matrix, Euler, Vector, Quaternion + +# Also imported in .fbx_utils, so importing here is unlikely to further affect Blender startup time. +import numpy as np + +# ----- +# Utils +from . import parse_fbx, fbx_utils + +from .parse_fbx import ( + data_types, + FBXElem, +) +from .fbx_utils import ( + PerfMon, + units_blender_to_fbx_factor, + units_convertor_iter, + array_to_matrix4, + similar_values, + similar_values_iter, + FBXImportSettings, + vcos_transformed, + nors_transformed, + parray_as_ndarray, + astype_view_signedness, + MESH_ATTRIBUTE_MATERIAL_INDEX, + MESH_ATTRIBUTE_POSITION, + MESH_ATTRIBUTE_EDGE_VERTS, + MESH_ATTRIBUTE_CORNER_VERT, + MESH_ATTRIBUTE_SHARP_FACE, + MESH_ATTRIBUTE_SHARP_EDGE, + expand_shape_key_range, + FBX_KTIME_V7, + FBX_KTIME_V8, + FBX_TIMECODE_DEFINITION_TO_KTIME_PER_SECOND, +) + +LINEAR_INTERPOLATION_VALUE = bpy.types.Keyframe.bl_rna.properties['interpolation'].enum_items['LINEAR'].value + +# global singleton, assign on execution +fbx_elem_nil = None + +# Units converters... +convert_deg_to_rad_iter = units_convertor_iter("degree", "radian") + +MAT_CONVERT_BONE = fbx_utils.MAT_CONVERT_BONE.inverted() +MAT_CONVERT_LIGHT = fbx_utils.MAT_CONVERT_LIGHT.inverted() +MAT_CONVERT_CAMERA = fbx_utils.MAT_CONVERT_CAMERA.inverted() + + +def validate_blend_names(name): + assert(type(name) == bytes) + # Blender typically does not accept names over 63 bytes... + if len(name) > 63: + import hashlib + h = hashlib.sha1(name).hexdigest() + n = 55 + name_utf8 = name[:n].decode('utf-8', 'replace') + "_" + h[:7] + while len(name_utf8.encode()) > 63: + n -= 1 + name_utf8 = name[:n].decode('utf-8', 'replace') + "_" + h[:7] + return name_utf8 + else: + # We use 'replace' even though FBX 'specs' say it should always be utf8, see T53841. + return name.decode('utf-8', 'replace') + + +def elem_find_first(elem, id_search, default=None): + for fbx_item in elem.elems: + if fbx_item.id == id_search: + return fbx_item + return default + + +def elem_find_iter(elem, id_search): + for fbx_item in elem.elems: + if fbx_item.id == id_search: + yield fbx_item + + +def elem_find_first_string(elem, id_search): + fbx_item = elem_find_first(elem, id_search) + if fbx_item is not None and fbx_item.props: # Do not error on complete empty properties (see T45291). + assert(len(fbx_item.props) == 1) + assert(fbx_item.props_type[0] == data_types.STRING) + return fbx_item.props[0].decode('utf-8', 'replace') + return None + + +def elem_find_first_string_as_bytes(elem, id_search): + fbx_item = elem_find_first(elem, id_search) + if fbx_item is not None and fbx_item.props: # Do not error on complete empty properties (see T45291). + assert(len(fbx_item.props) == 1) + assert(fbx_item.props_type[0] == data_types.STRING) + return fbx_item.props[0] # Keep it as bytes as requested... + return None + + +def elem_find_first_bytes(elem, id_search, decode=True): + fbx_item = elem_find_first(elem, id_search) + if fbx_item is not None and fbx_item.props: # Do not error on complete empty properties (see T45291). + assert(len(fbx_item.props) == 1) + assert(fbx_item.props_type[0] == data_types.BYTES) + return fbx_item.props[0] + return None + + +def elem_repr(elem): + return "%s: props[%d=%r], elems=(%r)" % ( + elem.id, + len(elem.props), + ", ".join([repr(p) for p in elem.props]), + # elem.props_type, + b", ".join([e.id for e in elem.elems]), + ) + + +def elem_split_name_class(elem): + assert(elem.props_type[-2] == data_types.STRING) + elem_name, elem_class = elem.props[-2].split(b'\x00\x01') + return elem_name, elem_class + + +def elem_name_ensure_class(elem, clss=...): + elem_name, elem_class = elem_split_name_class(elem) + if clss is not ...: + assert(elem_class == clss) + return validate_blend_names(elem_name) + + +def elem_name_ensure_classes(elem, clss=...): + elem_name, elem_class = elem_split_name_class(elem) + if clss is not ...: + assert(elem_class in clss) + return validate_blend_names(elem_name) + + +def elem_split_name_class_nodeattr(elem): + assert(elem.props_type[-2] == data_types.STRING) + elem_name, elem_class = elem.props[-2].split(b'\x00\x01') + assert(elem_class == b'NodeAttribute') + assert(elem.props_type[-1] == data_types.STRING) + elem_class = elem.props[-1] + return elem_name, elem_class + + +def elem_uuid(elem): + assert(elem.props_type[0] == data_types.INT64) + return elem.props[0] + + +def elem_prop_first(elem, default=None): + return elem.props[0] if (elem is not None) and elem.props else default + + +# ---- +# Support for +# Properties70: { ... P: +# Custom properties ("user properties" in FBX) are ignored here and get handled separately (see #104773). +def elem_props_find_first(elem, elem_prop_id): + if elem is None: + # When properties are not found... Should never happen, but happens - as usual. + return None + # support for templates (tuple of elems) + if type(elem) is not FBXElem: + assert(type(elem) is tuple) + for e in elem: + result = elem_props_find_first(e, elem_prop_id) + if result is not None: + return result + assert(len(elem) > 0) + return None + + for subelem in elem.elems: + assert(subelem.id == b'P') + # 'U' flag indicates that the property has been defined by the user. + if subelem.props[0] == elem_prop_id and b'U' not in subelem.props[3]: + return subelem + return None + + +def elem_props_get_color_rgb(elem, elem_prop_id, default=None): + elem_prop = elem_props_find_first(elem, elem_prop_id) + if elem_prop is not None: + assert(elem_prop.props[0] == elem_prop_id) + if elem_prop.props[1] == b'Color': + # FBX version 7300 + assert(elem_prop.props[1] == b'Color') + assert(elem_prop.props[2] == b'') + else: + assert(elem_prop.props[1] == b'ColorRGB') + assert(elem_prop.props[2] == b'Color') + assert(elem_prop.props_type[4:7] == bytes((data_types.FLOAT64,)) * 3) + return elem_prop.props[4:7] + return default + + +def elem_props_get_vector_3d(elem, elem_prop_id, default=None): + elem_prop = elem_props_find_first(elem, elem_prop_id) + if elem_prop is not None: + assert(elem_prop.props_type[4:7] == bytes((data_types.FLOAT64,)) * 3) + return elem_prop.props[4:7] + return default + + +def elem_props_get_number(elem, elem_prop_id, default=None): + elem_prop = elem_props_find_first(elem, elem_prop_id) + if elem_prop is not None: + assert(elem_prop.props[0] == elem_prop_id) + if elem_prop.props[1] == b'double': + assert(elem_prop.props[1] == b'double') + assert(elem_prop.props[2] == b'Number') + else: + assert(elem_prop.props[1] == b'Number') + assert(elem_prop.props[2] == b'') + + # we could allow other number types + assert(elem_prop.props_type[4] == data_types.FLOAT64) + + return elem_prop.props[4] + return default + + +def elem_props_get_integer(elem, elem_prop_id, default=None): + elem_prop = elem_props_find_first(elem, elem_prop_id) + if elem_prop is not None: + assert(elem_prop.props[0] == elem_prop_id) + if elem_prop.props[1] == b'int': + assert(elem_prop.props[1] == b'int') + assert(elem_prop.props[2] == b'Integer') + elif elem_prop.props[1] == b'ULongLong': + assert(elem_prop.props[1] == b'ULongLong') + assert(elem_prop.props[2] == b'') + + # we could allow other number types + assert(elem_prop.props_type[4] in {data_types.INT32, data_types.INT64}) + + return elem_prop.props[4] + return default + + +def elem_props_get_bool(elem, elem_prop_id, default=None): + elem_prop = elem_props_find_first(elem, elem_prop_id) + if elem_prop is not None: + assert(elem_prop.props[0] == elem_prop_id) + # b'Bool' with a capital seems to be used for animated property... go figure... + assert(elem_prop.props[1] in {b'bool', b'Bool'}) + assert(elem_prop.props[2] == b'') + + # we could allow other number types + assert(elem_prop.props_type[4] == data_types.INT32) + assert(elem_prop.props[4] in {0, 1}) + + return bool(elem_prop.props[4]) + return default + + +def elem_props_get_enum(elem, elem_prop_id, default=None): + elem_prop = elem_props_find_first(elem, elem_prop_id) + if elem_prop is not None: + assert(elem_prop.props[0] == elem_prop_id) + assert(elem_prop.props[1] == b'enum') + assert(elem_prop.props[2] == b'') + assert(elem_prop.props[3] == b'') + + # we could allow other number types + assert(elem_prop.props_type[4] == data_types.INT32) + + return elem_prop.props[4] + return default + + +def elem_props_get_visibility(elem, elem_prop_id, default=None): + elem_prop = elem_props_find_first(elem, elem_prop_id) + if elem_prop is not None: + assert(elem_prop.props[0] == elem_prop_id) + assert(elem_prop.props[1] == b'Visibility') + assert(elem_prop.props[2] == b'') + + # we could allow other number types + assert(elem_prop.props_type[4] == data_types.FLOAT64) + + return elem_prop.props[4] + return default + + +# ---------------------------------------------------------------------------- +# Blender + +# ------ +# Object +from collections import namedtuple + + +FBXTransformData = namedtuple("FBXTransformData", ( + "loc", "geom_loc", + "rot", "rot_ofs", "rot_piv", "pre_rot", "pst_rot", "rot_ord", "rot_alt_mat", "geom_rot", + "sca", "sca_ofs", "sca_piv", "geom_sca", +)) + + +def blen_read_custom_properties(fbx_obj, blen_obj, settings): + # There doesn't seem to be a way to put user properties into templates, so this only get the object properties: + fbx_obj_props = elem_find_first(fbx_obj, b'Properties70') + if fbx_obj_props: + for fbx_prop in fbx_obj_props.elems: + assert(fbx_prop.id == b'P') + + if b'U' in fbx_prop.props[3]: + if fbx_prop.props[0] == b'UDP3DSMAX': + # Special case for 3DS Max user properties: + try: + assert(fbx_prop.props[1] == b'KString') + except AssertionError as exc: + print(exc) + assert(fbx_prop.props_type[4] == data_types.STRING) + items = fbx_prop.props[4].decode('utf-8', 'replace') + for item in items.split('\r\n'): + if item: + split_item = item.split('=', 1) + if len(split_item) != 2: + split_item = item.split(':', 1) + if len(split_item) != 2: + print("cannot parse UDP3DSMAX custom property '%s', ignoring..." % item) + else: + prop_name, prop_value = split_item + prop_name = validate_blend_names(prop_name.strip().encode('utf-8')) + blen_obj[prop_name] = prop_value.strip() + else: + prop_name = validate_blend_names(fbx_prop.props[0]) + prop_type = fbx_prop.props[1] + if prop_type in {b'Vector', b'Vector3D', b'Color', b'ColorRGB'}: + assert(fbx_prop.props_type[4:7] == bytes((data_types.FLOAT64,)) * 3) + blen_obj[prop_name] = fbx_prop.props[4:7] + elif prop_type in {b'Vector4', b'ColorRGBA'}: + assert(fbx_prop.props_type[4:8] == bytes((data_types.FLOAT64,)) * 4) + blen_obj[prop_name] = fbx_prop.props[4:8] + elif prop_type == b'Vector2D': + assert(fbx_prop.props_type[4:6] == bytes((data_types.FLOAT64,)) * 2) + blen_obj[prop_name] = fbx_prop.props[4:6] + elif prop_type in {b'Integer', b'int'}: + assert(fbx_prop.props_type[4] == data_types.INT32) + blen_obj[prop_name] = fbx_prop.props[4] + elif prop_type == b'KString': + assert(fbx_prop.props_type[4] == data_types.STRING) + blen_obj[prop_name] = fbx_prop.props[4].decode('utf-8', 'replace') + elif prop_type in {b'Number', b'double', b'Double'}: + assert(fbx_prop.props_type[4] == data_types.FLOAT64) + blen_obj[prop_name] = fbx_prop.props[4] + elif prop_type in {b'Float', b'float'}: + assert(fbx_prop.props_type[4] == data_types.FLOAT32) + blen_obj[prop_name] = fbx_prop.props[4] + elif prop_type in {b'Bool', b'bool'}: + assert(fbx_prop.props_type[4] == data_types.INT32) + blen_obj[prop_name] = fbx_prop.props[4] != 0 + elif prop_type in {b'Enum', b'enum'}: + assert(fbx_prop.props_type[4:6] == bytes((data_types.INT32, data_types.STRING))) + val = fbx_prop.props[4] + if settings.use_custom_props_enum_as_string and fbx_prop.props[5]: + enum_items = fbx_prop.props[5].decode('utf-8', 'replace').split('~') + if val >= 0 and val < len(enum_items): + blen_obj[prop_name] = enum_items[val] + else: + print("WARNING: User property '%s' has wrong enum value, skipped" % prop_name) + else: + blen_obj[prop_name] = val + else: + print( + "WARNING: User property type '%s' is not supported" % + prop_type.decode( + 'utf-8', 'replace')) + + +def blen_read_object_transform_do(transform_data): + # This is a nightmare. FBX SDK uses Maya way to compute the transformation matrix of a node - utterly simple: + # + # WorldTransform = ParentWorldTransform @ T @ Roff @ Rp @ Rpre @ R @ Rpost-1 @ Rp-1 @ Soff @ Sp @ S @ Sp-1 + # + # Where all those terms are 4 x 4 matrices that contain: + # WorldTransform: Transformation matrix of the node in global space. + # ParentWorldTransform: Transformation matrix of the parent node in global space. + # T: Translation + # Roff: Rotation offset + # Rp: Rotation pivot + # Rpre: Pre-rotation + # R: Rotation + # Rpost-1: Inverse of the post-rotation (FBX 2011 documentation incorrectly specifies this without inversion) + # Rp-1: Inverse of the rotation pivot + # Soff: Scaling offset + # Sp: Scaling pivot + # S: Scaling + # Sp-1: Inverse of the scaling pivot + # + # But it was still too simple, and FBX notion of compatibility is... quite specific. So we also have to + # support 3DSMax way: + # + # WorldTransform = ParentWorldTransform @ T @ R @ S @ OT @ OR @ OS + # + # Where all those terms are 4 x 4 matrices that contain: + # WorldTransform: Transformation matrix of the node in global space + # ParentWorldTransform: Transformation matrix of the parent node in global space + # T: Translation + # R: Rotation + # S: Scaling + # OT: Geometric transform translation + # OR: Geometric transform rotation + # OS: Geometric transform scale + # + # Notes: + # Geometric transformations ***are not inherited***: ParentWorldTransform does not contain the OT, OR, OS + # of WorldTransform's parent node. + # The R matrix takes into account the rotation order. Other rotation matrices are always 'XYZ' order. + # + # Taken from https://help.autodesk.com/view/FBX/2020/ENU/ + # ?guid=FBX_Developer_Help_nodes_and_scene_graph_fbx_nodes_computing_transformation_matrix_html + + # translation + lcl_translation = Matrix.Translation(transform_data.loc) + geom_loc = Matrix.Translation(transform_data.geom_loc) + + # rotation + def to_rot(rot, rot_ord): return Euler(convert_deg_to_rad_iter(rot), rot_ord).to_matrix().to_4x4() + lcl_rot = to_rot(transform_data.rot, transform_data.rot_ord) @ transform_data.rot_alt_mat + pre_rot = to_rot(transform_data.pre_rot, 'XYZ') + pst_rot = to_rot(transform_data.pst_rot, 'XYZ') + geom_rot = to_rot(transform_data.geom_rot, 'XYZ') + + rot_ofs = Matrix.Translation(transform_data.rot_ofs) + rot_piv = Matrix.Translation(transform_data.rot_piv) + sca_ofs = Matrix.Translation(transform_data.sca_ofs) + sca_piv = Matrix.Translation(transform_data.sca_piv) + + # scale + lcl_scale = Matrix() + lcl_scale[0][0], lcl_scale[1][1], lcl_scale[2][2] = transform_data.sca + geom_scale = Matrix() + geom_scale[0][0], geom_scale[1][1], geom_scale[2][2] = transform_data.geom_sca + + base_mat = ( + lcl_translation @ + rot_ofs @ + rot_piv @ + pre_rot @ + lcl_rot @ + pst_rot.inverted_safe() @ + rot_piv.inverted_safe() @ + sca_ofs @ + sca_piv @ + lcl_scale @ + sca_piv.inverted_safe() + ) + geom_mat = geom_loc @ geom_rot @ geom_scale + # We return mat without 'geometric transforms' too, because it is to be used for children, sigh... + return (base_mat @ geom_mat, base_mat, geom_mat) + + +# XXX This might be weak, now that we can add vgroups from both bones and shapes, name collisions become +# more likely, will have to make this more robust!!! +def add_vgroup_to_objects(vg_indices, vg_weights, vg_name, objects): + assert(len(vg_indices) == len(vg_weights)) + if vg_indices: + for obj in objects: + # We replace/override here... + vg = obj.vertex_groups.get(vg_name) + if vg is None: + vg = obj.vertex_groups.new(name=vg_name) + vg_add = vg.add + for i, w in zip(vg_indices, vg_weights): + vg_add((i,), w, 'REPLACE') + + +def blen_read_object_transform_preprocess(fbx_props, fbx_obj, rot_alt_mat, use_prepost_rot): + # This is quite involved, 'fbxRNode.cpp' from openscenegraph used as a reference + const_vector_zero_3d = 0.0, 0.0, 0.0 + const_vector_one_3d = 1.0, 1.0, 1.0 + + loc = list(elem_props_get_vector_3d(fbx_props, b'Lcl Translation', const_vector_zero_3d)) + rot = list(elem_props_get_vector_3d(fbx_props, b'Lcl Rotation', const_vector_zero_3d)) + sca = list(elem_props_get_vector_3d(fbx_props, b'Lcl Scaling', const_vector_one_3d)) + + geom_loc = list(elem_props_get_vector_3d(fbx_props, b'GeometricTranslation', const_vector_zero_3d)) + geom_rot = list(elem_props_get_vector_3d(fbx_props, b'GeometricRotation', const_vector_zero_3d)) + geom_sca = list(elem_props_get_vector_3d(fbx_props, b'GeometricScaling', const_vector_one_3d)) + + rot_ofs = elem_props_get_vector_3d(fbx_props, b'RotationOffset', const_vector_zero_3d) + rot_piv = elem_props_get_vector_3d(fbx_props, b'RotationPivot', const_vector_zero_3d) + sca_ofs = elem_props_get_vector_3d(fbx_props, b'ScalingOffset', const_vector_zero_3d) + sca_piv = elem_props_get_vector_3d(fbx_props, b'ScalingPivot', const_vector_zero_3d) + + is_rot_act = elem_props_get_bool(fbx_props, b'RotationActive', False) + + if is_rot_act: + if use_prepost_rot: + pre_rot = elem_props_get_vector_3d(fbx_props, b'PreRotation', const_vector_zero_3d) + pst_rot = elem_props_get_vector_3d(fbx_props, b'PostRotation', const_vector_zero_3d) + else: + pre_rot = const_vector_zero_3d + pst_rot = const_vector_zero_3d + rot_ord = { + 0: 'XYZ', + 1: 'XZY', + 2: 'YZX', + 3: 'YXZ', + 4: 'ZXY', + 5: 'ZYX', + 6: 'XYZ', # XXX eSphericXYZ, not really supported... + }.get(elem_props_get_enum(fbx_props, b'RotationOrder', 0)) + else: + pre_rot = const_vector_zero_3d + pst_rot = const_vector_zero_3d + rot_ord = 'XYZ' + + return FBXTransformData(loc, geom_loc, + rot, rot_ofs, rot_piv, pre_rot, pst_rot, rot_ord, rot_alt_mat, geom_rot, + sca, sca_ofs, sca_piv, geom_sca) + + +# --------- +# Animation +def _blen_read_object_transform_do_anim(transform_data, lcl_translation_mat, lcl_rot_euler, lcl_scale_mat, + extra_pre_matrix, extra_post_matrix): + """Specialized version of blen_read_object_transform_do for animation that pre-calculates the non-animated matrices + and returns a function that calculates (base_mat @ geom_mat). See the comments in blen_read_object_transform_do for + a full description of what this function is doing. + + The lcl_translation_mat, lcl_rot_euler and lcl_scale_mat arguments should have their values updated each frame and + then calling the returned function will calculate the matrix for the current frame. + + extra_pre_matrix and extra_post_matrix are any extra matrices to multiply first/last.""" + # Translation + geom_loc = Matrix.Translation(transform_data.geom_loc) + + # Rotation + def to_rot_xyz(rot): + # All the rotations that can be precalculated have a fixed XYZ order. + return Euler(convert_deg_to_rad_iter(rot), 'XYZ').to_matrix().to_4x4() + pre_rot = to_rot_xyz(transform_data.pre_rot) + pst_rot_inv = to_rot_xyz(transform_data.pst_rot).inverted_safe() + geom_rot = to_rot_xyz(transform_data.geom_rot) + + # Offsets and pivots + rot_ofs = Matrix.Translation(transform_data.rot_ofs) + rot_piv = Matrix.Translation(transform_data.rot_piv) + rot_piv_inv = rot_piv.inverted_safe() + sca_ofs = Matrix.Translation(transform_data.sca_ofs) + sca_piv = Matrix.Translation(transform_data.sca_piv) + sca_piv_inv = sca_piv.inverted_safe() + + # Scale + geom_scale = Matrix() + geom_scale[0][0], geom_scale[1][1], geom_scale[2][2] = transform_data.geom_sca + + # Some matrices can be combined in advance, using the associative property of matrix multiplication, so that less + # matrix multiplication is required each frame. + geom_mat = geom_loc @ geom_rot @ geom_scale + post_lcl_translation = rot_ofs @ rot_piv @ pre_rot + post_lcl_rotation = transform_data.rot_alt_mat @ pst_rot_inv @ rot_piv_inv @ sca_ofs @ sca_piv + post_lcl_scaling = sca_piv_inv @ geom_mat @ extra_post_matrix + + # Get the bound to_matrix method to avoid re-binding it on each call. + lcl_rot_euler_to_matrix_3x3 = lcl_rot_euler.to_matrix + # Get the unbound Matrix.to_4x4 method to avoid having to look it up again on each call. + matrix_to_4x4 = Matrix.to_4x4 + + if extra_pre_matrix == Matrix(): + # There aren't any other matrices that must be multiplied before lcl_translation_mat that extra_pre_matrix can + # be combined with, so skip extra_pre_matrix when it's the identity matrix. + return lambda: (lcl_translation_mat @ + post_lcl_translation @ + matrix_to_4x4(lcl_rot_euler_to_matrix_3x3()) @ + post_lcl_rotation @ + lcl_scale_mat @ + post_lcl_scaling) + else: + return lambda: (extra_pre_matrix @ + lcl_translation_mat @ + post_lcl_translation @ + matrix_to_4x4(lcl_rot_euler_to_matrix_3x3()) @ + post_lcl_rotation @ + lcl_scale_mat @ + post_lcl_scaling) + + +def _transformation_curves_gen(item, values_arrays, channel_keys): + """Yields flattened location/rotation/scaling values for imported PoseBone/Object Lcl Translation/Rotation/Scaling + animation curve values. + + The value arrays must have the same lengths, where each index of each array corresponds to a single keyframe. + + Each value array must have a corresponding channel key tuple that identifies the fbx property + (b'Lcl Translation'/b'Lcl Rotation'/b'Lcl Scaling') and the channel (x/y/z as 0/1/2) of that property.""" + from operator import setitem + from functools import partial + + if item.is_bone: + bl_obj = item.bl_obj.pose.bones[item.bl_bone] + else: + bl_obj = item.bl_obj + + rot_mode = bl_obj.rotation_mode + transform_data = item.fbx_transform_data + rot_eul_prev = bl_obj.rotation_euler.copy() + rot_quat_prev = bl_obj.rotation_quaternion.copy() + + # Pre-compute combined pre-matrix + # Remove that rest pose matrix from current matrix (also in parent space) by computing the inverted local rest + # matrix of the bone, if relevant. + combined_pre_matrix = item.get_bind_matrix().inverted_safe() if item.is_bone else Matrix() + # item.pre_matrix will contain any correction for a parent's correction matrix or the global matrix + if item.pre_matrix: + combined_pre_matrix @= item.pre_matrix + + # Pre-compute combined post-matrix + # Compensate for changes in the local matrix during processing + combined_post_matrix = item.anim_compensation_matrix.copy() if item.anim_compensation_matrix else Matrix() + # item.post_matrix will contain any correction for lights, camera and bone orientation + if item.post_matrix: + combined_post_matrix @= item.post_matrix + + # Create matrices/euler from the initial transformation values of this item. + # These variables will be updated in-place as we iterate through each frame. + lcl_translation_mat = Matrix.Translation(transform_data.loc) + lcl_rotation_eul = Euler(convert_deg_to_rad_iter(transform_data.rot), transform_data.rot_ord) + lcl_scaling_mat = Matrix() + lcl_scaling_mat[0][0], lcl_scaling_mat[1][1], lcl_scaling_mat[2][2] = transform_data.sca + + # Create setters into lcl_translation_mat, lcl_rotation_eul and lcl_scaling_mat for each values_array and convert + # any rotation values into radians. + lcl_setters = [] + values_arrays_converted = [] + for values_array, (fbx_prop, channel) in zip(values_arrays, channel_keys): + if fbx_prop == b'Lcl Translation': + # lcl_translation_mat.translation[channel] = value + setter = partial(setitem, lcl_translation_mat.translation, channel) + elif fbx_prop == b'Lcl Rotation': + # FBX rotations are in degrees, but Blender uses radians, so convert all rotation values in advance. + values_array = np.deg2rad(values_array) + # lcl_rotation_eul[channel] = value + setter = partial(setitem, lcl_rotation_eul, channel) + else: + assert(fbx_prop == b'Lcl Scaling') + # lcl_scaling_mat[channel][channel] = value + setter = partial(setitem, lcl_scaling_mat[channel], channel) + lcl_setters.append(setter) + values_arrays_converted.append(values_array) + + # Create an iterator that gets one value from each array. Each iterated tuple will be all the imported + # Lcl Translation/Lcl Rotation/Lcl Scaling values for a single frame, in that order. + # Note that an FBX animation does not have to animate all the channels, so only the animated channels of each + # property will be present. + # .data, the memoryview of an np.ndarray, is faster to iterate than the ndarray itself. + frame_values_it = zip(*(arr.data for arr in values_arrays_converted)) + + # Getting the unbound methods in advance avoids having to look them up again on each call within the loop. + mat_decompose = Matrix.decompose + quat_to_axis_angle = Quaternion.to_axis_angle + quat_to_euler = Quaternion.to_euler + quat_dot = Quaternion.dot + + calc_mat = _blen_read_object_transform_do_anim(transform_data, + lcl_translation_mat, lcl_rotation_eul, lcl_scaling_mat, + combined_pre_matrix, combined_post_matrix) + + # Iterate through the values for each frame. + for frame_values in frame_values_it: + # Set each value into its corresponding lcl matrix/euler. + for lcl_setter, value in zip(lcl_setters, frame_values): + lcl_setter(value) + + # Calculate the updated matrix for this frame. + mat = calc_mat() + + # Now we have a virtual matrix of transform from AnimCurves, we can yield keyframe values! + loc, rot, sca = mat_decompose(mat) + if rot_mode == 'QUATERNION': + if quat_dot(rot_quat_prev, rot) < 0.0: + rot = -rot + rot_quat_prev = rot + elif rot_mode == 'AXIS_ANGLE': + vec, ang = quat_to_axis_angle(rot) + rot = ang, vec.x, vec.y, vec.z + else: # Euler + rot = quat_to_euler(rot, rot_mode, rot_eul_prev) + rot_eul_prev = rot + + # Yield order matches the order that the location/rotation/scale FCurves are created in. + yield from loc + yield from rot + yield from sca + + +def _combine_curve_keyframe_times(times_and_values_tuples, initial_values): + """Combine multiple parsed animation curves, that affect different channels, such that every animation curve + contains the keyframes from every other curve, interpolating the values for the newly inserted keyframes in each + curve. + + Currently, linear interpolation is assumed, but FBX does store how keyframes should be interpolated, so correctly + interpolating the keyframe values is a TODO.""" + if len(times_and_values_tuples) == 1: + # Nothing to do when there is only a single curve. + times, values = times_and_values_tuples[0] + return times, [values] + + all_times = [t[0] for t in times_and_values_tuples] + + # Get the combined sorted unique times of all the curves. + sorted_all_times = np.unique(np.concatenate(all_times)) + + values_arrays = [] + for (times, values), initial_value in zip(times_and_values_tuples, initial_values): + if sorted_all_times.size == times.size: + # `sorted_all_times` will always contain all values in `times` and both `times` and `sorted_all_times` must + # be strictly increasing, so if both arrays have the same size, they must be identical. + extended_values = values + else: + # For now, linear interpolation is assumed. NumPy conveniently has a fast C-compiled function for this. + # Efficiently implementing other FBX supported interpolation will most likely be much more complicated. + extended_values = np.interp(sorted_all_times, times, values, left=initial_value) + values_arrays.append(extended_values) + return sorted_all_times, values_arrays + + +def blen_read_invalid_animation_curve(key_times, key_values): + """FBX will parse animation curves even when their keyframe times are invalid (not strictly increasing). It's + unclear exactly how FBX handles invalid curves, but this matches in some cases and is how the FBX IO addon has been + handling invalid keyframe times for a long time. + + Notably, this function will also correctly parse valid animation curves, though is much slower than the trivial, + regular way. + + The returned keyframe times are guaranteed to be strictly increasing.""" + sorted_unique_times = np.unique(key_times) + + # Unsure if this can be vectorized with numpy, so using iteration for now. + def index_gen(): + idx = 0 + key_times_data = key_times.data + key_times_len = len(key_times) + # Iterating .data, the memoryview of the array, is faster than iterating the array directly. + for curr_fbxktime in sorted_unique_times.data: + if key_times_data[idx] < curr_fbxktime: + if idx >= 0: + idx += 1 + if idx >= key_times_len: + # We have reached our last element for this curve, stay on it from now on... + idx = -1 + yield idx + + indices = np.fromiter(index_gen(), dtype=np.int64, count=len(sorted_unique_times)) + indexed_times = key_times[indices] + indexed_values = key_values[indices] + + # Linear interpolate the value for each time in sorted_unique_times according to the times and values at each index + # and the previous index. + interpolated_values = np.empty_like(indexed_values) + + # Where the index is 0, there's no previous value to interpolate from, so we set the value without interpolating. + # Because the indices are in increasing order, all zeroes must be at the start, so we can find the index of the last + # zero and use that to index with a slice instead of a boolean array for performance. + # Equivalent to, but as a slice: + # idx_zero_mask = indices == 0 + # idx_nonzero_mask = ~idx_zero_mask + first_nonzero_idx = np.searchsorted(indices, 0, side='right') + idx_zero_slice = slice(0, first_nonzero_idx) # [:first_nonzero_idx] + idx_nonzero_slice = slice(first_nonzero_idx, None) # [first_nonzero_idx:] + + interpolated_values[idx_zero_slice] = indexed_values[idx_zero_slice] + + indexed_times_nonzero_idx = indexed_times[idx_nonzero_slice] + indexed_values_nonzero_idx = indexed_values[idx_nonzero_slice] + indices_nonzero = indices[idx_nonzero_slice] + + prev_indices_nonzero = indices_nonzero - 1 + prev_indexed_times_nonzero_idx = key_times[prev_indices_nonzero] + prev_indexed_values_nonzero_idx = key_values[prev_indices_nonzero] + + ifac_a = sorted_unique_times[idx_nonzero_slice] - prev_indexed_times_nonzero_idx + ifac_b = indexed_times_nonzero_idx - prev_indexed_times_nonzero_idx + # If key_times contains two (or more) duplicate times in a row, then values in `ifac_b` can be zero which would + # result in division by zero. + # Use the `np.errstate` context manager to suppress printing the RuntimeWarning to the system console. + with np.errstate(divide='ignore'): + ifac = ifac_a / ifac_b + interpolated_values[idx_nonzero_slice] = ((indexed_values_nonzero_idx - prev_indexed_values_nonzero_idx) * ifac + + prev_indexed_values_nonzero_idx) + + # If the time to interpolate at is larger than the time in indexed_times, then the value has been extrapolated. + # Extrapolated values are excluded. + valid_mask = indexed_times >= sorted_unique_times + + key_times = sorted_unique_times[valid_mask] + key_values = interpolated_values[valid_mask] + + return key_times, key_values + + +def _convert_fbx_time_to_blender_time(key_times, blen_start_offset, fbx_start_offset, fps, fbx_ktime): + timefac = fps / fbx_ktime + + # Convert from FBX timing to Blender timing. + # Cannot subtract in-place because key_times could be read directly from FBX and could be used by multiple Actions. + key_times = key_times - fbx_start_offset + # FBX times are integers and timefac is a Python float, so the new array will be a np.float64 array. + key_times = key_times * timefac + + key_times += blen_start_offset + + return key_times + + +def blen_read_animation_curve(fbx_curve): + """Read an animation curve from FBX data. + + The parsed keyframe times are guaranteed to be strictly increasing.""" + key_times = parray_as_ndarray(elem_prop_first(elem_find_first(fbx_curve, b'KeyTime'))) + key_values = parray_as_ndarray(elem_prop_first(elem_find_first(fbx_curve, b'KeyValueFloat'))) + + assert(len(key_values) == len(key_times)) + + # The FBX SDK specifies that only one key per time is allowed and that the keys are sorted in time order. + # https://help.autodesk.com/view/FBX/2020/ENU/?guid=FBX_Developer_Help_cpp_ref_class_fbx_anim_curve_html + all_times_strictly_increasing = (key_times[1:] > key_times[:-1]).all() + + if all_times_strictly_increasing: + return key_times, key_values + else: + # FBX will still read animation curves even if they are invalid. + return blen_read_invalid_animation_curve(key_times, key_values) + + +def blen_store_keyframes(fbx_key_times, blen_fcurve, key_values, blen_start_offset, fps, fbx_ktime, fbx_start_offset=0): + """Set all keyframe times and values for a newly created FCurve. + Linear interpolation is currently assumed. + + This is a convenience function for calling blen_store_keyframes_multi with only a single fcurve and values array.""" + blen_store_keyframes_multi(fbx_key_times, [(blen_fcurve, key_values)], blen_start_offset, fps, fbx_ktime, + fbx_start_offset) + + +def blen_store_keyframes_multi(fbx_key_times, fcurve_and_key_values_pairs, blen_start_offset, fps, fbx_ktime, + fbx_start_offset=0): + """Set all keyframe times and values for multiple pairs of newly created FCurves and keyframe values arrays, where + each pair has the same keyframe times. + Linear interpolation is currently assumed.""" + bl_key_times = _convert_fbx_time_to_blender_time(fbx_key_times, blen_start_offset, fbx_start_offset, fps, fbx_ktime) + num_keys = len(bl_key_times) + + # Compatible with C float type + bl_keyframe_dtype = np.single + # Compatible with C char type + bl_enum_dtype = np.ubyte + + # The keyframe_points 'co' are accessed as flattened pairs of (time, value). + # The key times are the same for each (blen_fcurve, key_values) pair, so only the values need to be updated for each + # array of values. + keyframe_points_co = np.empty(len(bl_key_times) * 2, dtype=bl_keyframe_dtype) + # Even indices are times. + keyframe_points_co[0::2] = bl_key_times + + interpolation_array = np.full(num_keys, LINEAR_INTERPOLATION_VALUE, dtype=bl_enum_dtype) + + for blen_fcurve, key_values in fcurve_and_key_values_pairs: + # The fcurve must be newly created and thus have no keyframe_points. + assert(len(blen_fcurve.keyframe_points) == 0) + + # Odd indices are values. + keyframe_points_co[1::2] = key_values + + # Add the keyframe points to the FCurve and then set the 'co' and 'interpolation' of each point. + blen_fcurve.keyframe_points.add(num_keys) + blen_fcurve.keyframe_points.foreach_set('co', keyframe_points_co) + blen_fcurve.keyframe_points.foreach_set('interpolation', interpolation_array) + + # Since we inserted our keyframes in 'ultra-fast' mode, we have to update the fcurves now. + blen_fcurve.update() + + +def blen_read_animations_action_item(action, item, cnodes, fps, anim_offset, global_scale, shape_key_deforms, + fbx_ktime): + """ + 'Bake' loc/rot/scale into the action, + taking any pre_ and post_ matrix into account to transform from fbx into blender space. + """ + from bpy.types import Object, PoseBone, ShapeKey, Material, Camera + + fbx_curves: dict[bytes, dict[int, FBXElem]] = {} + for curves, fbxprop in cnodes.values(): + channels_dict = fbx_curves.setdefault(fbxprop, {}) + for (fbx_acdata, _blen_data), channel in curves.values(): + if channel in channels_dict: + # Ignore extra curves when one has already been found for this channel because FBX's default animation + # system implementation only uses the first curve assigned to a channel. + # Additional curves per channel are allowed by the FBX specification, but the handling of these curves + # is considered the responsibility of the application that created them. Note that each curve node is + # expected to have a unique set of channels, so these additional curves with the same channel would have + # to belong to separate curve nodes. See the FBX SDK documentation for FbxAnimCurveNode. + continue + channels_dict[channel] = fbx_acdata + + # Leave if no curves are attached (if a blender curve is attached to scale but without keys it defaults to 0). + if len(fbx_curves) == 0: + return + + if isinstance(item, Material): + grpname = item.name + props = [("diffuse_color", 3, grpname or "Diffuse Color")] + elif isinstance(item, ShapeKey): + props = [(item.path_from_id("value"), 1, "Key")] + elif isinstance(item, Camera): + props = [(item.path_from_id("lens"), 1, "Camera"), (item.dof.path_from_id("focus_distance"), 1, "Camera")] + else: # Object or PoseBone: + if item.is_bone: + bl_obj = item.bl_obj.pose.bones[item.bl_bone] + else: + bl_obj = item.bl_obj + + # We want to create actions for objects, but for bones we 'reuse' armatures' actions! + grpname = bl_obj.name + + # Since we might get other channels animated in the end, due to all FBX transform magic, + # we need to add curves for whole loc/rot/scale in any case. + props = [(bl_obj.path_from_id("location"), 3, grpname or "Location"), + None, + (bl_obj.path_from_id("scale"), 3, grpname or "Scale")] + rot_mode = bl_obj.rotation_mode + if rot_mode == 'QUATERNION': + props[1] = (bl_obj.path_from_id("rotation_quaternion"), 4, grpname or "Quaternion Rotation") + elif rot_mode == 'AXIS_ANGLE': + props[1] = (bl_obj.path_from_id("rotation_axis_angle"), 4, grpname or "Axis Angle Rotation") + else: # Euler + props[1] = (bl_obj.path_from_id("rotation_euler"), 3, grpname or "Euler Rotation") + + blen_curves = [action.fcurves.new(prop, index=channel, action_group=grpname) + for prop, nbr_channels, grpname in props for channel in range(nbr_channels)] + + if isinstance(item, Material): + for fbxprop, channel_to_curve in fbx_curves.items(): + assert(fbxprop == b'DiffuseColor') + for channel, curve in channel_to_curve.items(): + assert(channel in {0, 1, 2}) + blen_curve = blen_curves[channel] + fbx_key_times, values = blen_read_animation_curve(curve) + blen_store_keyframes(fbx_key_times, blen_curve, values, anim_offset, fps, fbx_ktime) + + elif isinstance(item, ShapeKey): + for fbxprop, channel_to_curve in fbx_curves.items(): + assert(fbxprop == b'DeformPercent') + for channel, curve in channel_to_curve.items(): + assert(channel == 0) + blen_curve = blen_curves[channel] + + fbx_key_times, values = blen_read_animation_curve(curve) + # A fully activated shape key in FBX DeformPercent is 100.0 whereas it is 1.0 in Blender. + values = values / 100.0 + blen_store_keyframes(fbx_key_times, blen_curve, values, anim_offset, fps, fbx_ktime) + + # Store the minimum and maximum shape key values, so that the shape key's slider range can be expanded + # if necessary after reading all animations. + if values.size: + deform_values = shape_key_deforms.setdefault(item, []) + deform_values.append(values.min()) + deform_values.append(values.max()) + + elif isinstance(item, Camera): + for fbxprop, channel_to_curve in fbx_curves.items(): + is_focus_distance = fbxprop == b'FocusDistance' + assert(fbxprop == b'FocalLength' or is_focus_distance) + for channel, curve in channel_to_curve.items(): + assert(channel == 0) + # The indices are determined by the creation of the `props` list above. + blen_curve = blen_curves[1 if is_focus_distance else 0] + + fbx_key_times, values = blen_read_animation_curve(curve) + if is_focus_distance: + # Remap the imported values from FBX to Blender. + values = values / 1000.0 + values *= global_scale + blen_store_keyframes(fbx_key_times, blen_curve, values, anim_offset, fps, fbx_ktime) + + else: # Object or PoseBone: + transform_data = item.fbx_transform_data + + # Each transformation curve needs to have keyframes at the times of every other transformation curve + # (interpolating missing values), so that we can construct a matrix at every keyframe. + transform_prop_to_attr = { + b'Lcl Translation': transform_data.loc, + b'Lcl Rotation': transform_data.rot, + b'Lcl Scaling': transform_data.sca, + } + + times_and_values_tuples = [] + initial_values = [] + channel_keys = [] + for fbxprop, channel_to_curve in fbx_curves.items(): + if fbxprop not in transform_prop_to_attr: + # Currently, we only care about transformation curves. + continue + for channel, curve in channel_to_curve.items(): + assert(channel in {0, 1, 2}) + fbx_key_times, values = blen_read_animation_curve(curve) + + channel_keys.append((fbxprop, channel)) + + initial_values.append(transform_prop_to_attr[fbxprop][channel]) + + times_and_values_tuples.append((fbx_key_times, values)) + if not times_and_values_tuples: + # If `times_and_values_tuples` is empty, all the imported animation curves are for properties other than + # transformation (e.g. animated custom properties), so there is nothing to do until support for those other + # properties is added. + return + + # Combine the keyframe times of all the transformation curves so that each curve has a value at every time. + combined_fbx_times, values_arrays = _combine_curve_keyframe_times(times_and_values_tuples, initial_values) + + # Convert from FBX Lcl Translation/Lcl Rotation/Lcl Scaling to the Blender location/rotation/scaling properties + # of this Object/PoseBone. + # The number of fcurves for the Blender properties varies depending on the rotation mode. + num_loc_channels = 3 + num_rot_channels = 4 if rot_mode in {'QUATERNION', 'AXIS_ANGLE'} else 3 # Variations of EULER are all 3 + num_sca_channels = 3 + num_channels = num_loc_channels + num_rot_channels + num_sca_channels + num_frames = len(combined_fbx_times) + full_length = num_channels * num_frames + + # Do the conversion. + flattened_channel_values_gen = _transformation_curves_gen(item, values_arrays, channel_keys) + flattened_channel_values = np.fromiter(flattened_channel_values_gen, dtype=np.single, count=full_length) + + # Reshape to one row per frame and then view the transpose so that each row corresponds to a single channel. + # e.g. + # loc_channels = channel_values[:num_loc_channels] + # rot_channels = channel_values[num_loc_channels:num_loc_channels + num_rot_channels] + # sca_channels = channel_values[num_loc_channels + num_rot_channels:] + channel_values = flattened_channel_values.reshape(num_frames, num_channels).T + + # Each channel has the same keyframe times, so the combined times can be passed once along with all the curves + # and values arrays. + blen_store_keyframes_multi(combined_fbx_times, zip(blen_curves, channel_values), anim_offset, fps, fbx_ktime) + + +def blen_read_animations(fbx_tmpl_astack, fbx_tmpl_alayer, stacks, scene, anim_offset, global_scale, fbx_ktime): + """ + Recreate an action per stack/layer/object combinations. + Only the first found action is linked to objects, more complex setups are not handled, + it's up to user to reproduce them! + """ + from bpy.types import ShapeKey, Material, Camera + + shape_key_values = {} + actions = {} + for as_uuid, ((fbx_asdata, _blen_data), alayers) in stacks.items(): + stack_name = elem_name_ensure_class(fbx_asdata, b'AnimStack') + for al_uuid, ((fbx_aldata, _blen_data), items) in alayers.items(): + layer_name = elem_name_ensure_class(fbx_aldata, b'AnimLayer') + for item, cnodes in items.items(): + if isinstance(item, Material): + id_data = item + elif isinstance(item, ShapeKey): + id_data = item.id_data + elif isinstance(item, Camera): + id_data = item + else: + id_data = item.bl_obj + # XXX Ignore rigged mesh animations - those are a nightmare to handle, see note about it in + # FbxImportHelperNode class definition. + if id_data and id_data.type == 'MESH' and id_data.parent and id_data.parent.type == 'ARMATURE': + continue + if id_data is None: + continue + + # Create new action if needed (should always be needed, except for keyblocks from shapekeys cases). + key = (as_uuid, al_uuid, id_data) + action = actions.get(key) + if action is None: + if stack_name == layer_name: + action_name = "|".join((id_data.name, stack_name)) + else: + action_name = "|".join((id_data.name, stack_name, layer_name)) + actions[key] = action = bpy.data.actions.new(action_name) + action.use_fake_user = True + # If none yet assigned, assign this action to id_data. + if not id_data.animation_data: + id_data.animation_data_create() + if not id_data.animation_data.action: + id_data.animation_data.action = action + # And actually populate the action! + blen_read_animations_action_item(action, item, cnodes, scene.render.fps, anim_offset, global_scale, + shape_key_values, fbx_ktime) + + # If the minimum/maximum animated value is outside the slider range of the shape key, attempt to expand the slider + # range until the animated range fits and has extra room to be decreased or increased further. + # Shape key slider_min and slider_max have hard min/max values, if an imported animation uses a value outside that + # range, a warning message will be printed to the console and the slider_min/slider_max values will end up clamped. + shape_key_values_in_range = True + for shape_key, deform_values in shape_key_values.items(): + min_animated_deform = min(deform_values) + max_animated_deform = max(deform_values) + shape_key_values_in_range &= expand_shape_key_range(shape_key, min_animated_deform) + shape_key_values_in_range &= expand_shape_key_range(shape_key, max_animated_deform) + if not shape_key_values_in_range: + print("WARNING: The imported animated Value of a Shape Key is beyond the minimum/maximum allowed and will be" + " clamped during playback.") + + +# ---- +# Mesh + +def blen_read_geom_layerinfo(fbx_layer): + return ( + validate_blend_names(elem_find_first_string_as_bytes(fbx_layer, b'Name')), + elem_find_first_string_as_bytes(fbx_layer, b'MappingInformationType'), + elem_find_first_string_as_bytes(fbx_layer, b'ReferenceInformationType'), + ) + + +def blen_read_geom_validate_blen_data(blen_data, blen_dtype, item_size): + """Validate blen_data when it's not a bpy_prop_collection. + Returns whether blen_data is a bpy_prop_collection""" + blen_data_is_collection = isinstance(blen_data, bpy.types.bpy_prop_collection) + if not blen_data_is_collection: + if item_size > 1: + assert(len(blen_data.shape) == 2) + assert(blen_data.shape[1] == item_size) + assert(blen_data.dtype == blen_dtype) + return blen_data_is_collection + + +def blen_read_geom_parse_fbx_data(fbx_data, stride, item_size): + """Parse fbx_data as an array.array into a 2d np.ndarray that shares the same memory, where each row is a single + item""" + # Technically stride < item_size could be supported, but there's probably not a use case for it since it would + # result in a view of the data with self-overlapping memory. + assert(stride >= item_size) + # View the array.array as an np.ndarray. + fbx_data_np = parray_as_ndarray(fbx_data) + + if stride == item_size: + if item_size > 1: + # Need to make sure fbx_data_np has a whole number of items to be able to view item_size elements per row. + items_remainder = len(fbx_data_np) % item_size + if items_remainder: + print("ERROR: not a whole number of items in this FBX layer, skipping the partial item!") + fbx_data_np = fbx_data_np[:-items_remainder] + fbx_data_np = fbx_data_np.reshape(-1, item_size) + else: + # Create a view of fbx_data_np that is only the first item_size elements of each stride. Note that the view will + # not be C-contiguous. + stride_remainder = len(fbx_data_np) % stride + if stride_remainder: + if stride_remainder < item_size: + print("ERROR: not a whole number of items in this FBX layer, skipping the partial item!") + # Not enough in the remainder for a full item, so cut off the partial stride + fbx_data_np = fbx_data_np[:-stride_remainder] + # Reshape to one stride per row and then create a view that includes only the first item_size elements + # of each stride. + fbx_data_np = fbx_data_np.reshape(-1, stride)[:, :item_size] + else: + print("ERROR: not a whole number of strides in this FBX layer! There are a whole number of items, but" + " this could indicate an error!") + # There is not a whole number of strides, but there is a whole number of items. + # This is a pain to deal with because fbx_data_np.reshape(-1, stride) is not possible. + # A view of just the items can be created using stride_tricks.as_strided by specifying the shape and + # strides of the view manually. + # Extreme care must be taken when using stride_tricks.as_strided because improper usage can result in + # a view that gives access to memory outside the array. + from numpy.lib import stride_tricks + + # fbx_data_np should always start off as flat and C-contiguous. + assert(fbx_data_np.strides == (fbx_data_np.itemsize,)) + + num_whole_strides = len(fbx_data_np) // stride + # Plus the one partial stride that is enough elements for a complete item. + num_items = num_whole_strides + 1 + shape = (num_items, item_size) + + # strides are the number of bytes to step to get to the next element, for each axis. + step_per_item = fbx_data_np.itemsize * stride + step_per_item_element = fbx_data_np.itemsize + strides = (step_per_item, step_per_item_element) + + fbx_data_np = stride_tricks.as_strided(fbx_data_np, shape, strides) + else: + # There's a whole number of strides, so first reshape to one stride per row and then create a view that + # includes only the first item_size elements of each stride. + fbx_data_np = fbx_data_np.reshape(-1, stride)[:, :item_size] + + return fbx_data_np + + +def blen_read_geom_check_fbx_data_length(blen_data, fbx_data_np, is_indices=False): + """Check that there are the same number of items in blen_data and fbx_data_np. + + Returns a tuple of two elements: + 0: fbx_data_np or, if fbx_data_np contains more items than blen_data, a view of fbx_data_np with the excess + items removed + 1: Whether the returned fbx_data_np contains enough items to completely fill blen_data""" + bl_num_items = len(blen_data) + fbx_num_items = len(fbx_data_np) + enough_data = fbx_num_items >= bl_num_items + if not enough_data: + if is_indices: + print("ERROR: not enough indices in this FBX layer, missing data will be left as default!") + else: + print("ERROR: not enough data in this FBX layer, missing data will be left as default!") + elif fbx_num_items > bl_num_items: + if is_indices: + print("ERROR: too many indices in this FBX layer, skipping excess!") + else: + print("ERROR: too much data in this FBX layer, skipping excess!") + fbx_data_np = fbx_data_np[:bl_num_items] + + return fbx_data_np, enough_data + + +def blen_read_geom_xform(fbx_data_np, xform): + """xform is either None, or a function that takes fbx_data_np as its only positional argument and returns an + np.ndarray with the same total number of elements as fbx_data_np. + It is acceptable for xform to return an array with a different dtype to fbx_data_np. + + Returns xform(fbx_data_np) when xform is not None and ensures the result of xform(fbx_data_np) has the same shape as + fbx_data_np before returning it. + When xform is None, fbx_data_np is returned as is.""" + if xform is not None: + item_size = fbx_data_np.shape[1] + fbx_total_data = fbx_data_np.size + fbx_data_np = xform(fbx_data_np) + # The amount of data should not be changed by xform + assert(fbx_data_np.size == fbx_total_data) + # Ensure fbx_data_np is still item_size elements per row + if len(fbx_data_np.shape) != 2 or fbx_data_np.shape[1] != item_size: + fbx_data_np = fbx_data_np.reshape(-1, item_size) + return fbx_data_np + + +def blen_read_geom_array_foreach_set_direct(blen_data, blen_attr, blen_dtype, fbx_data, stride, item_size, descr, + xform): + """Generic fbx_layer to blen_data foreach setter for Direct layers. + blen_data must be a bpy_prop_collection or 2d np.ndarray whose second axis length is item_size. + fbx_data must be an array.array.""" + fbx_data_np = blen_read_geom_parse_fbx_data(fbx_data, stride, item_size) + fbx_data_np, enough_data = blen_read_geom_check_fbx_data_length(blen_data, fbx_data_np) + fbx_data_np = blen_read_geom_xform(fbx_data_np, xform) + + blen_data_is_collection = blen_read_geom_validate_blen_data(blen_data, blen_dtype, item_size) + + if blen_data_is_collection: + if not enough_data: + blen_total_data = len(blen_data) * item_size + buffer = np.empty(blen_total_data, dtype=blen_dtype) + # It's not clear what values should be used for the missing data, so read the current values into a buffer. + blen_data.foreach_get(blen_attr, buffer) + + # Change the buffer shape to one item per row + buffer.shape = (-1, item_size) + + # Copy the fbx data into the start of the buffer + buffer[:len(fbx_data_np)] = fbx_data_np + else: + # Convert the buffer to the Blender C type of blen_attr + buffer = astype_view_signedness(fbx_data_np, blen_dtype) + + # Set blen_attr of blen_data. The buffer must be flat and C-contiguous, which ravel() ensures + blen_data.foreach_set(blen_attr, buffer.ravel()) + else: + assert(blen_data.size % item_size == 0) + blen_data = blen_data.view() + blen_data.shape = (-1, item_size) + blen_data[:len(fbx_data_np)] = fbx_data_np + + +def blen_read_geom_array_foreach_set_indexed(blen_data, blen_attr, blen_dtype, fbx_data, fbx_layer_index, stride, + item_size, descr, xform): + """Generic fbx_layer to blen_data foreach setter for IndexToDirect layers. + blen_data must be a bpy_prop_collection or 2d np.ndarray whose second axis length is item_size. + fbx_data must be an array.array or a 1d np.ndarray.""" + fbx_data_np = blen_read_geom_parse_fbx_data(fbx_data, stride, item_size) + fbx_data_np = blen_read_geom_xform(fbx_data_np, xform) + + # fbx_layer_index is allowed to be a 1d np.ndarray for use with blen_read_geom_array_foreach_set_looptovert. + if not isinstance(fbx_layer_index, np.ndarray): + fbx_layer_index = parray_as_ndarray(fbx_layer_index) + + fbx_layer_index, enough_indices = blen_read_geom_check_fbx_data_length(blen_data, fbx_layer_index, is_indices=True) + + blen_data_is_collection = blen_read_geom_validate_blen_data(blen_data, blen_dtype, item_size) + + blen_data_items_len = len(blen_data) + blen_data_len = blen_data_items_len * item_size + fbx_num_items = len(fbx_data_np) + + # Find all indices that are out of bounds of fbx_data_np. + min_index_inclusive = -fbx_num_items + max_index_inclusive = fbx_num_items - 1 + valid_index_mask = np.equal(fbx_layer_index, fbx_layer_index.clip(min_index_inclusive, max_index_inclusive)) + indices_invalid = not valid_index_mask.all() + + fbx_data_items = fbx_data_np.reshape(-1, item_size) + + if indices_invalid or not enough_indices: + if blen_data_is_collection: + buffer = np.empty(blen_data_len, dtype=blen_dtype) + buffer_item_view = buffer.view() + buffer_item_view.shape = (-1, item_size) + # Since we don't know what the default values should be for the missing data, read the current values into a + # buffer. + blen_data.foreach_get(blen_attr, buffer) + else: + buffer_item_view = blen_data + + if not enough_indices: + # Reduce the length of the view to the same length as the number of indices. + buffer_item_view = buffer_item_view[:len(fbx_layer_index)] + + # Copy the result of indexing fbx_data_items by each element in fbx_layer_index into the buffer. + if indices_invalid: + print("ERROR: indices in this FBX layer out of bounds of the FBX data, skipping invalid indices!") + buffer_item_view[valid_index_mask] = fbx_data_items[fbx_layer_index[valid_index_mask]] + else: + buffer_item_view[:] = fbx_data_items[fbx_layer_index] + + if blen_data_is_collection: + blen_data.foreach_set(blen_attr, buffer.ravel()) + else: + if blen_data_is_collection: + # Cast the buffer to the Blender C type of blen_attr + fbx_data_items = astype_view_signedness(fbx_data_items, blen_dtype) + buffer_items = fbx_data_items[fbx_layer_index] + blen_data.foreach_set(blen_attr, buffer_items.ravel()) + else: + blen_data[:] = fbx_data_items[fbx_layer_index] + + +def blen_read_geom_array_foreach_set_allsame(blen_data, blen_attr, blen_dtype, fbx_data, stride, item_size, descr, + xform): + """Generic fbx_layer to blen_data foreach setter for AllSame layers. + blen_data must be a bpy_prop_collection or 2d np.ndarray whose second axis length is item_size. + fbx_data must be an array.array.""" + fbx_data_np = blen_read_geom_parse_fbx_data(fbx_data, stride, item_size) + fbx_data_np = blen_read_geom_xform(fbx_data_np, xform) + blen_data_is_collection = blen_read_geom_validate_blen_data(blen_data, blen_dtype, item_size) + fbx_items_len = len(fbx_data_np) + blen_items_len = len(blen_data) + + if fbx_items_len < 1: + print("ERROR: not enough data in this FBX layer, skipping!") + return + + if blen_data_is_collection: + # Create an array filled with the value from fbx_data_np + buffer = np.full((blen_items_len, item_size), fbx_data_np[0], dtype=blen_dtype) + + blen_data.foreach_set(blen_attr, buffer.ravel()) + else: + blen_data[:] = fbx_data_np[0] + + +def blen_read_geom_array_foreach_set_looptovert(mesh, blen_data, blen_attr, blen_dtype, fbx_data, stride, item_size, + descr, xform): + """Generic fbx_layer to blen_data foreach setter for face corner ByVertice layers. + blen_data must be a bpy_prop_collection or 2d np.ndarray whose second axis length is item_size. + fbx_data must be an array.array""" + # The fbx_data is mapped to vertices. To expand fbx_data to face corners, get an array of the vertex index of each + # face corner that will then be used to index fbx_data. + corner_vertex_indices = MESH_ATTRIBUTE_CORNER_VERT.to_ndarray(mesh.attributes) + blen_read_geom_array_foreach_set_indexed(blen_data, blen_attr, blen_dtype, fbx_data, corner_vertex_indices, stride, + item_size, descr, xform) + + +# generic error printers. +def blen_read_geom_array_error_mapping(descr, fbx_layer_mapping, quiet=False): + if not quiet: + print("warning layer %r mapping type unsupported: %r" % (descr, fbx_layer_mapping)) + + +def blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet=False): + if not quiet: + print("warning layer %r ref type unsupported: %r" % (descr, fbx_layer_ref)) + + +def blen_read_geom_array_mapped_vert( + mesh, blen_data, blen_attr, blen_dtype, + fbx_layer_data, fbx_layer_index, + fbx_layer_mapping, fbx_layer_ref, + stride, item_size, descr, + xform=None, quiet=False, +): + if fbx_layer_mapping == b'ByVertice': + if fbx_layer_ref == b'IndexToDirect': + # XXX Looks like we often get no fbx_layer_index in this case, shall not happen but happens... + # We fallback to 'Direct' mapping in this case. + # ~ assert(fbx_layer_index is not None) + if fbx_layer_index is None: + blen_read_geom_array_foreach_set_direct(blen_data, blen_attr, blen_dtype, fbx_layer_data, stride, + item_size, descr, xform) + else: + blen_read_geom_array_foreach_set_indexed(blen_data, blen_attr, blen_dtype, fbx_layer_data, + fbx_layer_index, stride, item_size, descr, xform) + return True + elif fbx_layer_ref == b'Direct': + assert(fbx_layer_index is None) + blen_read_geom_array_foreach_set_direct(blen_data, blen_attr, blen_dtype, fbx_layer_data, stride, item_size, + descr, xform) + return True + blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet) + elif fbx_layer_mapping == b'AllSame': + if fbx_layer_ref == b'IndexToDirect': + assert(fbx_layer_index is None) + blen_read_geom_array_foreach_set_allsame(blen_data, blen_attr, blen_dtype, fbx_layer_data, stride, + item_size, descr, xform) + return True + blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet) + else: + blen_read_geom_array_error_mapping(descr, fbx_layer_mapping, quiet) + + return False + + +def blen_read_geom_array_mapped_edge( + mesh, blen_data, blen_attr, blen_dtype, + fbx_layer_data, fbx_layer_index, + fbx_layer_mapping, fbx_layer_ref, + stride, item_size, descr, + xform=None, quiet=False, +): + if fbx_layer_mapping == b'ByEdge': + if fbx_layer_ref == b'Direct': + blen_read_geom_array_foreach_set_direct(blen_data, blen_attr, blen_dtype, fbx_layer_data, stride, item_size, + descr, xform) + return True + blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet) + elif fbx_layer_mapping == b'AllSame': + if fbx_layer_ref == b'IndexToDirect': + assert(fbx_layer_index is None) + blen_read_geom_array_foreach_set_allsame(blen_data, blen_attr, blen_dtype, fbx_layer_data, stride, + item_size, descr, xform) + return True + blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet) + else: + blen_read_geom_array_error_mapping(descr, fbx_layer_mapping, quiet) + + return False + + +def blen_read_geom_array_mapped_polygon( + mesh, blen_data, blen_attr, blen_dtype, + fbx_layer_data, fbx_layer_index, + fbx_layer_mapping, fbx_layer_ref, + stride, item_size, descr, + xform=None, quiet=False, +): + if fbx_layer_mapping == b'ByPolygon': + if fbx_layer_ref == b'IndexToDirect': + # XXX Looks like we often get no fbx_layer_index in this case, shall not happen but happens... + # We fallback to 'Direct' mapping in this case. + # ~ assert(fbx_layer_index is not None) + if fbx_layer_index is None: + blen_read_geom_array_foreach_set_direct(blen_data, blen_attr, blen_dtype, fbx_layer_data, stride, + item_size, descr, xform) + else: + blen_read_geom_array_foreach_set_indexed(blen_data, blen_attr, blen_dtype, fbx_layer_data, + fbx_layer_index, stride, item_size, descr, xform) + return True + elif fbx_layer_ref == b'Direct': + blen_read_geom_array_foreach_set_direct(blen_data, blen_attr, blen_dtype, fbx_layer_data, stride, item_size, + descr, xform) + return True + blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet) + elif fbx_layer_mapping == b'AllSame': + if fbx_layer_ref == b'IndexToDirect': + assert(fbx_layer_index is None) + blen_read_geom_array_foreach_set_allsame(blen_data, blen_attr, blen_dtype, fbx_layer_data, stride, + item_size, descr, xform) + return True + blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet) + else: + blen_read_geom_array_error_mapping(descr, fbx_layer_mapping, quiet) + + return False + + +def blen_read_geom_array_mapped_polyloop( + mesh, blen_data, blen_attr, blen_dtype, + fbx_layer_data, fbx_layer_index, + fbx_layer_mapping, fbx_layer_ref, + stride, item_size, descr, + xform=None, quiet=False, +): + if fbx_layer_mapping == b'ByPolygonVertex': + if fbx_layer_ref == b'IndexToDirect': + # XXX Looks like we often get no fbx_layer_index in this case, shall not happen but happens... + # We fallback to 'Direct' mapping in this case. + # ~ assert(fbx_layer_index is not None) + if fbx_layer_index is None: + blen_read_geom_array_foreach_set_direct(blen_data, blen_attr, blen_dtype, fbx_layer_data, stride, + item_size, descr, xform) + else: + blen_read_geom_array_foreach_set_indexed(blen_data, blen_attr, blen_dtype, fbx_layer_data, + fbx_layer_index, stride, item_size, descr, xform) + return True + elif fbx_layer_ref == b'Direct': + blen_read_geom_array_foreach_set_direct(blen_data, blen_attr, blen_dtype, fbx_layer_data, stride, item_size, + descr, xform) + return True + blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet) + elif fbx_layer_mapping == b'ByVertice': + if fbx_layer_ref == b'Direct': + assert(fbx_layer_index is None) + blen_read_geom_array_foreach_set_looptovert(mesh, blen_data, blen_attr, blen_dtype, fbx_layer_data, stride, + item_size, descr, xform) + return True + blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet) + elif fbx_layer_mapping == b'AllSame': + if fbx_layer_ref == b'IndexToDirect': + assert(fbx_layer_index is None) + blen_read_geom_array_foreach_set_allsame(blen_data, blen_attr, blen_dtype, fbx_layer_data, stride, + item_size, descr, xform) + return True + blen_read_geom_array_error_ref(descr, fbx_layer_ref, quiet) + else: + blen_read_geom_array_error_mapping(descr, fbx_layer_mapping, quiet) + + return False + + +def blen_read_geom_layer_material(fbx_obj, mesh): + fbx_layer = elem_find_first(fbx_obj, b'LayerElementMaterial') + + if fbx_layer is None: + return + + (fbx_layer_name, + fbx_layer_mapping, + fbx_layer_ref, + ) = blen_read_geom_layerinfo(fbx_layer) + + layer_id = b'Materials' + fbx_layer_data = elem_prop_first(elem_find_first(fbx_layer, layer_id)) + + blen_data = MESH_ATTRIBUTE_MATERIAL_INDEX.ensure(mesh.attributes).data + fbx_item_size = 1 + assert(fbx_item_size == MESH_ATTRIBUTE_MATERIAL_INDEX.item_size) + blen_read_geom_array_mapped_polygon( + mesh, blen_data, MESH_ATTRIBUTE_MATERIAL_INDEX.foreach_attribute, MESH_ATTRIBUTE_MATERIAL_INDEX.dtype, + fbx_layer_data, None, + fbx_layer_mapping, fbx_layer_ref, + 1, fbx_item_size, layer_id, + ) + + +def blen_read_geom_layer_uv(fbx_obj, mesh): + for layer_id in (b'LayerElementUV',): + for fbx_layer in elem_find_iter(fbx_obj, layer_id): + # all should be valid + (fbx_layer_name, + fbx_layer_mapping, + fbx_layer_ref, + ) = blen_read_geom_layerinfo(fbx_layer) + + fbx_layer_data = elem_prop_first(elem_find_first(fbx_layer, b'UV')) + fbx_layer_index = elem_prop_first(elem_find_first(fbx_layer, b'UVIndex')) + + # Always init our new layers with (0, 0) UVs. + uv_lay = mesh.uv_layers.new(name=fbx_layer_name, do_init=False) + if uv_lay is None: + print("Failed to add {%r %r} UVLayer to %r (probably too many of them?)" + "" % (layer_id, fbx_layer_name, mesh.name)) + continue + + blen_data = uv_lay.uv + + # some valid files omit this data + if fbx_layer_data is None: + print("%r %r missing data" % (layer_id, fbx_layer_name)) + continue + + blen_read_geom_array_mapped_polyloop( + mesh, blen_data, "vector", np.single, + fbx_layer_data, fbx_layer_index, + fbx_layer_mapping, fbx_layer_ref, + 2, 2, layer_id, + ) + + +def blen_read_geom_layer_color(fbx_obj, mesh, colors_type): + if colors_type == 'NONE': + return + use_srgb = colors_type == 'SRGB' + layer_type = 'BYTE_COLOR' if use_srgb else 'FLOAT_COLOR' + color_prop_name = "color_srgb" if use_srgb else "color" + # almost same as UVs + for layer_id in (b'LayerElementColor',): + for fbx_layer in elem_find_iter(fbx_obj, layer_id): + # all should be valid + (fbx_layer_name, + fbx_layer_mapping, + fbx_layer_ref, + ) = blen_read_geom_layerinfo(fbx_layer) + + fbx_layer_data = elem_prop_first(elem_find_first(fbx_layer, b'Colors')) + fbx_layer_index = elem_prop_first(elem_find_first(fbx_layer, b'ColorIndex')) + + color_lay = mesh.color_attributes.new(name=fbx_layer_name, type=layer_type, domain='CORNER') + + if color_lay is None: + print("Failed to add {%r %r} vertex color layer to %r (probably too many of them?)" + "" % (layer_id, fbx_layer_name, mesh.name)) + continue + + blen_data = color_lay.data + + # some valid files omit this data + if fbx_layer_data is None: + print("%r %r missing data" % (layer_id, fbx_layer_name)) + continue + + blen_read_geom_array_mapped_polyloop( + mesh, blen_data, color_prop_name, np.single, + fbx_layer_data, fbx_layer_index, + fbx_layer_mapping, fbx_layer_ref, + 4, 4, layer_id, + ) + + +def blen_read_geom_layer_smooth(fbx_obj, mesh): + fbx_layer = elem_find_first(fbx_obj, b'LayerElementSmoothing') + + if fbx_layer is None: + return + + # all should be valid + (fbx_layer_name, + fbx_layer_mapping, + fbx_layer_ref, + ) = blen_read_geom_layerinfo(fbx_layer) + + layer_id = b'Smoothing' + fbx_layer_data = elem_prop_first(elem_find_first(fbx_layer, layer_id)) + + # udk has 'Direct' mapped, with no Smoothing, not sure why, but ignore these + if fbx_layer_data is None: + return + + if fbx_layer_mapping == b'ByEdge': + # some models have bad edge data, we can't use this info... + if not mesh.edges: + print("warning skipping sharp edges data, no valid edges...") + return + + blen_data = MESH_ATTRIBUTE_SHARP_EDGE.ensure(mesh.attributes).data + fbx_item_size = 1 + assert(fbx_item_size == MESH_ATTRIBUTE_SHARP_EDGE.item_size) + blen_read_geom_array_mapped_edge( + mesh, blen_data, MESH_ATTRIBUTE_SHARP_EDGE.foreach_attribute, MESH_ATTRIBUTE_SHARP_EDGE.dtype, + fbx_layer_data, None, + fbx_layer_mapping, fbx_layer_ref, + 1, fbx_item_size, layer_id, + xform=np.logical_not, # in FBX, 0 (False) is sharp, but in Blender True is sharp. + ) + elif fbx_layer_mapping == b'ByPolygon': + sharp_face = MESH_ATTRIBUTE_SHARP_FACE.ensure(mesh.attributes) + blen_data = sharp_face.data + fbx_item_size = 1 + assert(fbx_item_size == MESH_ATTRIBUTE_SHARP_FACE.item_size) + sharp_face_set_successfully = blen_read_geom_array_mapped_polygon( + mesh, blen_data, MESH_ATTRIBUTE_SHARP_FACE.foreach_attribute, MESH_ATTRIBUTE_SHARP_FACE.dtype, + fbx_layer_data, None, + fbx_layer_mapping, fbx_layer_ref, + 1, fbx_item_size, layer_id, + xform=lambda s: (s == 0), # smoothgroup bitflags, treat as booleans for now + ) + if not sharp_face_set_successfully: + mesh.attributes.remove(sharp_face) + else: + print("warning layer %r mapping type unsupported: %r" % (fbx_layer.id, fbx_layer_mapping)) + + +def blen_read_geom_layer_edge_crease(fbx_obj, mesh): + fbx_layer = elem_find_first(fbx_obj, b'LayerElementEdgeCrease') + + if fbx_layer is None: + return False + + # all should be valid + (fbx_layer_name, + fbx_layer_mapping, + fbx_layer_ref, + ) = blen_read_geom_layerinfo(fbx_layer) + + if fbx_layer_mapping != b'ByEdge': + return False + + layer_id = b'EdgeCrease' + fbx_layer_data = elem_prop_first(elem_find_first(fbx_layer, layer_id)) + + # some models have bad edge data, we can't use this info... + if not mesh.edges: + print("warning skipping edge crease data, no valid edges...") + return False + + if fbx_layer_mapping == b'ByEdge': + # some models have bad edge data, we can't use this info... + if not mesh.edges: + print("warning skipping edge crease data, no valid edges...") + return False + + blen_data = mesh.edge_creases_ensure().data + return blen_read_geom_array_mapped_edge( + mesh, blen_data, "value", np.single, + fbx_layer_data, None, + fbx_layer_mapping, fbx_layer_ref, + 1, 1, layer_id, + # Blender squares those values before sending them to OpenSubdiv, when other software don't, + # so we need to compensate that to get similar results through FBX... + xform=np.sqrt, + ) + else: + print("warning layer %r mapping type unsupported: %r" % (fbx_layer.id, fbx_layer_mapping)) + return False + + +def blen_read_geom_layer_normal(fbx_obj, mesh, xform=None): + fbx_layer = elem_find_first(fbx_obj, b'LayerElementNormal') + + if fbx_layer is None: + return False + + (fbx_layer_name, + fbx_layer_mapping, + fbx_layer_ref, + ) = blen_read_geom_layerinfo(fbx_layer) + + layer_id = b'Normals' + fbx_layer_data = elem_prop_first(elem_find_first(fbx_layer, layer_id)) + fbx_layer_index = elem_prop_first(elem_find_first(fbx_layer, b'NormalsIndex')) + + if fbx_layer_data is None: + print("warning %r %r missing data" % (layer_id, fbx_layer_name)) + return False + + # Normals are temporarily set here so that they can be retrieved again after a call to Mesh.validate(). + bl_norm_dtype = np.single + item_size = 3 + # try loops, then polygons, then vertices. + tries = ((mesh.attributes["temp_custom_normals"].data, "Loops", False, blen_read_geom_array_mapped_polyloop), + (mesh.polygons, "Polygons", True, blen_read_geom_array_mapped_polygon), + (mesh.vertices, "Vertices", True, blen_read_geom_array_mapped_vert)) + for blen_data, blen_data_type, is_fake, func in tries: + bdata = np.zeros((len(blen_data), item_size), dtype=bl_norm_dtype) if is_fake else blen_data + if func(mesh, bdata, "vector", bl_norm_dtype, + fbx_layer_data, fbx_layer_index, fbx_layer_mapping, fbx_layer_ref, 3, item_size, layer_id, xform, True): + if blen_data_type == "Polygons": + # To expand to per-loop normals, repeat each per-polygon normal by the number of loops of each polygon. + poly_loop_totals = np.empty(len(mesh.polygons), dtype=np.uintc) + mesh.polygons.foreach_get("loop_total", poly_loop_totals) + loop_normals = np.repeat(bdata, poly_loop_totals, axis=0) + mesh.attributes["temp_custom_normals"].data.foreach_set("vector", loop_normals.ravel()) + elif blen_data_type == "Vertices": + # We have to copy vnors to lnors! Far from elegant, but simple. + loop_vertex_indices = MESH_ATTRIBUTE_CORNER_VERT.to_ndarray(mesh.attributes) + mesh.attributes["temp_custom_normals"].data.foreach_set("vector", bdata[loop_vertex_indices].ravel()) + return True + + blen_read_geom_array_error_mapping("normal", fbx_layer_mapping) + blen_read_geom_array_error_ref("normal", fbx_layer_ref) + return False + + +def blen_read_geom(fbx_tmpl, fbx_obj, settings): + # Vertices are in object space, but we are post-multiplying all transforms with the inverse of the + # global matrix, so we need to apply the global matrix to the vertices to get the correct result. + geom_mat_co = settings.global_matrix if settings.bake_space_transform else None + # We need to apply the inverse transpose of the global matrix when transforming normals. + geom_mat_no = Matrix(settings.global_matrix_inv_transposed) if settings.bake_space_transform else None + if geom_mat_no is not None: + # Remove translation & scaling! + geom_mat_no.translation = Vector() + geom_mat_no.normalize() + + # TODO, use 'fbx_tmpl' + elem_name_utf8 = elem_name_ensure_class(fbx_obj, b'Geometry') + + fbx_verts = elem_prop_first(elem_find_first(fbx_obj, b'Vertices')) + fbx_polys = elem_prop_first(elem_find_first(fbx_obj, b'PolygonVertexIndex')) + fbx_edges = elem_prop_first(elem_find_first(fbx_obj, b'Edges')) + + # The dtypes when empty don't matter, but are set to what the fbx arrays are expected to be. + fbx_verts = parray_as_ndarray(fbx_verts) if fbx_verts else np.empty(0, dtype=data_types.ARRAY_FLOAT64) + fbx_polys = parray_as_ndarray(fbx_polys) if fbx_polys else np.empty(0, dtype=data_types.ARRAY_INT32) + fbx_edges = parray_as_ndarray(fbx_edges) if fbx_edges else np.empty(0, dtype=data_types.ARRAY_INT32) + + # Each vert is a 3d vector so is made of 3 components. + tot_verts = len(fbx_verts) // 3 + if tot_verts * 3 != len(fbx_verts): + print("ERROR: Not a whole number of vertices. Ignoring the partial vertex!") + # Remove any remainder. + fbx_verts = fbx_verts[:tot_verts * 3] + + tot_loops = len(fbx_polys) + tot_edges = len(fbx_edges) + + mesh = bpy.data.meshes.new(name=elem_name_utf8) + attributes = mesh.attributes + + if tot_verts: + if geom_mat_co is not None: + fbx_verts = vcos_transformed(fbx_verts, geom_mat_co, MESH_ATTRIBUTE_POSITION.dtype) + else: + fbx_verts = fbx_verts.astype(MESH_ATTRIBUTE_POSITION.dtype, copy=False) + + mesh.vertices.add(tot_verts) + MESH_ATTRIBUTE_POSITION.foreach_set(attributes, fbx_verts.ravel()) + + if tot_loops: + bl_loop_start_dtype = np.uintc + + mesh.loops.add(tot_loops) + # The end of each polygon is specified by an inverted index. + fbx_loop_end_idx = np.flatnonzero(fbx_polys < 0) + + tot_polys = len(fbx_loop_end_idx) + + # Un-invert the loop ends. + fbx_polys[fbx_loop_end_idx] ^= -1 + # Set loop vertex indices, casting to the Blender C type first for performance. + MESH_ATTRIBUTE_CORNER_VERT.foreach_set( + attributes, astype_view_signedness(fbx_polys, MESH_ATTRIBUTE_CORNER_VERT.dtype)) + + poly_loop_starts = np.empty(tot_polys, dtype=bl_loop_start_dtype) + # The first loop is always a loop start. + poly_loop_starts[0] = 0 + # Ignoring the last loop end, the indices after every loop end are the remaining loop starts. + poly_loop_starts[1:] = fbx_loop_end_idx[:-1] + 1 + + mesh.polygons.add(tot_polys) + mesh.polygons.foreach_set("loop_start", poly_loop_starts) + + blen_read_geom_layer_material(fbx_obj, mesh) + blen_read_geom_layer_uv(fbx_obj, mesh) + blen_read_geom_layer_color(fbx_obj, mesh, settings.colors_type) + + if tot_edges: + # edges in fact index the polygons (NOT the vertices) + + # The first vertex index of each edge is the vertex index of the corresponding loop in fbx_polys. + edges_a = fbx_polys[fbx_edges] + + # The second vertex index of each edge is the vertex index of the next loop in the same polygon. The + # complexity here is that if the first vertex index was the last loop of that polygon in fbx_polys, the next + # loop in the polygon is the first loop of that polygon, which is not the next loop in fbx_polys. + + # Copy fbx_polys, but rolled backwards by 1 so that indexing the result by [fbx_edges] will get the next + # loop of the same polygon unless the first vertex index was the last loop of the polygon. + fbx_polys_next = np.roll(fbx_polys, -1) + # Get the first loop of each polygon and set them into fbx_polys_next at the same indices as the last loop + # of each polygon in fbx_polys. + fbx_polys_next[fbx_loop_end_idx] = fbx_polys[poly_loop_starts] + + # Indexing fbx_polys_next by fbx_edges now gets the vertex index of the next loop in fbx_polys. + edges_b = fbx_polys_next[fbx_edges] + + # edges_a and edges_b need to be combined so that the first vertex index of each edge is immediately + # followed by the second vertex index of that same edge. + # Stack edges_a and edges_b as individual columns like np.column_stack((edges_a, edges_b)). + # np.concatenate is used because np.column_stack doesn't allow specifying the dtype of the returned array. + edges_conv = np.concatenate((edges_a.reshape(-1, 1), edges_b.reshape(-1, 1)), + axis=1, dtype=MESH_ATTRIBUTE_EDGE_VERTS.dtype, casting='unsafe') + + # Add the edges and set their vertex indices. + mesh.edges.add(len(edges_conv)) + # ravel() because edges_conv must be flat and C-contiguous when passed to foreach_set. + MESH_ATTRIBUTE_EDGE_VERTS.foreach_set(attributes, edges_conv.ravel()) + elif tot_edges: + print("ERROR: No polygons, but edges exist. Ignoring the edges!") + + # must be after edge, face loading. + blen_read_geom_layer_smooth(fbx_obj, mesh) + + blen_read_geom_layer_edge_crease(fbx_obj, mesh) + + ok_normals = False + if settings.use_custom_normals: + # Note: we store 'temp' normals in loops, since validate() may alter final mesh, + # we can only set custom lnors *after* calling it. + mesh.attributes.new("temp_custom_normals", 'FLOAT_VECTOR', 'CORNER') + if geom_mat_no is None: + ok_normals = blen_read_geom_layer_normal(fbx_obj, mesh) + else: + ok_normals = blen_read_geom_layer_normal(fbx_obj, mesh, + lambda v_array: nors_transformed(v_array, geom_mat_no)) + + mesh.validate(clean_customdata=False) # *Very* important to not remove lnors here! + + if ok_normals: + bl_nors_dtype = np.single + clnors = np.empty(len(mesh.loops) * 3, dtype=bl_nors_dtype) + mesh.attributes["temp_custom_normals"].data.foreach_get("vector", clnors) + + # Iterating clnors into a nested tuple first is faster than passing clnors.reshape(-1, 3) directly into + # normals_split_custom_set. We use clnors.data since it is a memoryview, which is faster to iterate than clnors. + mesh.normals_split_custom_set(tuple(zip(*(iter(clnors.data),) * 3))) + if settings.use_custom_normals: + mesh.attributes.remove(mesh.attributes["temp_custom_normals"]) + + if settings.use_custom_props: + blen_read_custom_properties(fbx_obj, mesh, settings) + + return mesh + + +def blen_read_shapes(fbx_tmpl, fbx_data, objects, me, scene): + if not fbx_data: + # No shape key data. Nothing to do. + return + + me_vcos = MESH_ATTRIBUTE_POSITION.to_ndarray(me.attributes) + me_vcos_vector_view = me_vcos.reshape(-1, 3) + + objects = list({node.bl_obj for node in objects}) + assert(objects) + + # Blender has a hard minimum and maximum shape key Value. If an imported shape key has a value outside this range it + # will be clamped, and we'll print a warning message to the console. + shape_key_values_in_range = True + bc_uuid_to_keyblocks = {} + for bc_uuid, fbx_sdata, fbx_bcdata, shapes_assigned_to_channel in fbx_data: + num_shapes_assigned_to_channel = len(shapes_assigned_to_channel) + if num_shapes_assigned_to_channel > 1: + # Relevant design task: #104698 + raise RuntimeError("FBX in-between Shapes are not currently supported") # See bug report #84111 + elem_name_utf8 = elem_name_ensure_class(fbx_sdata, b'Geometry') + indices = elem_prop_first(elem_find_first(fbx_sdata, b'Indexes')) + dvcos = elem_prop_first(elem_find_first(fbx_sdata, b'Vertices')) + + indices = parray_as_ndarray(indices) if indices else np.empty(0, dtype=data_types.ARRAY_INT32) + dvcos = parray_as_ndarray(dvcos) if dvcos else np.empty(0, dtype=data_types.ARRAY_FLOAT64) + + # If there's not a whole number of vectors, trim off the remainder. + # 3 components per vector. + remainder = len(dvcos) % 3 + if remainder: + dvcos = dvcos[:-remainder] + dvcos = dvcos.reshape(-1, 3) + + # There must be the same number of indices as vertex coordinate differences. + assert(len(indices) == len(dvcos)) + + # We completely ignore normals here! + weight = elem_prop_first(elem_find_first(fbx_bcdata, b'DeformPercent'), default=100.0) / 100.0 + + # The FullWeights array stores the deformation percentages of the BlendShapeChannel that fully activate each + # Shape assigned to the BlendShapeChannel. Blender also uses this array to store Vertex Group weights, but this + # is not part of the FBX standard. + full_weights = elem_prop_first(elem_find_first(fbx_bcdata, b'FullWeights')) + full_weights = parray_as_ndarray(full_weights) if full_weights else np.empty(0, dtype=data_types.ARRAY_FLOAT64) + + # Special case for Blender exported Shape Keys with a Vertex Group assigned. The Vertex Group weights are stored + # in the FullWeights array. + # XXX - It's possible, though very rare, to get a false positive here and create a Vertex Group when we + # shouldn't. This should only be possible when there are extraneous FullWeights or when there is a single + # FullWeight and its value is not 100.0. + if ( + # Blender exported Shape Keys only ever export as 1 Shape per BlendShapeChannel. + num_shapes_assigned_to_channel == 1 + # There should be one vertex weight for each vertex moved by the Shape. + and len(full_weights) == len(indices) + # Skip creating a Vertex Group when all the weights are 100.0 because such a Vertex Group has no effect. + # This also avoids creating a Vertex Group for imported Shapes that only move a single vertex because + # their BlendShapeChannel's singular FullWeight is expected to always be 100.0. + and not np.all(full_weights == 100.0) + # Blender vertex weights are always within the [0.0, 1.0] range (scaled to [0.0, 100.0] when saving to + # FBX). This can eliminate imported BlendShapeChannels from Unreal that have extraneous FullWeights + # because the extraneous values are usually negative. + and np.all((full_weights >= 0.0) & (full_weights <= 100.0)) + ): + # Not doing the division in-place because it's technically possible for FBX BlendShapeChannels to be used by + # more than one FBX BlendShape, though this shouldn't be the case for Blender exported Shape Keys. + vgweights = full_weights / 100.0 + else: + vgweights = None + # There must be a FullWeight for each Shape. Any extra FullWeights are ignored. + assert(len(full_weights) >= num_shapes_assigned_to_channel) + + # To add shape keys to the mesh, an Object using the mesh is needed. + if me.shape_keys is None: + objects[0].shape_key_add(name="Basis", from_mix=False) + kb = objects[0].shape_key_add(name=elem_name_utf8, from_mix=False) + me.shape_keys.use_relative = True # Should already be set as such. + + # Only need to set the shape key co if there are any non-zero dvcos. + if dvcos.any(): + shape_cos = me_vcos_vector_view.copy() + shape_cos[indices] += dvcos + kb.points.foreach_set("co", shape_cos.ravel()) + + shape_key_values_in_range &= expand_shape_key_range(kb, weight) + + kb.value = weight + + # Add vgroup if necessary. + if vgweights is not None: + # VertexGroup.add only allows sequences of int indices, but iterating the indices array directly would + # produce numpy scalars of types such as np.int32. The underlying memoryview of the indices array, however, + # does produce standard Python ints when iterated, so pass indices.data to add_vgroup_to_objects instead of + # indices. + # memoryviews tend to be faster to iterate than numpy arrays anyway, so vgweights.data is passed too. + add_vgroup_to_objects(indices.data, vgweights.data, kb.name, objects) + kb.vertex_group = kb.name + + bc_uuid_to_keyblocks.setdefault(bc_uuid, []).append(kb) + + if not shape_key_values_in_range: + print("WARNING: The imported Value of a Shape Key on the Mesh '%s' is beyond the minimum/maximum allowed and" + " has been clamped." % me.name) + + return bc_uuid_to_keyblocks + + +# -------- +# Material + +def blen_read_material(fbx_tmpl, fbx_obj, settings): + from bpy_extras import node_shader_utils + from math import sqrt + + elem_name_utf8 = elem_name_ensure_class(fbx_obj, b'Material') + + nodal_material_wrap_map = settings.nodal_material_wrap_map + ma = bpy.data.materials.new(name=elem_name_utf8) + + const_color_white = 1.0, 1.0, 1.0 + const_color_black = 0.0, 0.0, 0.0 + + fbx_props = (elem_find_first(fbx_obj, b'Properties70'), + elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil)) + fbx_props_no_template = (fbx_props[0], fbx_elem_nil) + + ma_wrap = node_shader_utils.PrincipledBSDFWrapper(ma, is_readonly=False, use_nodes=True) + ma_wrap.base_color = elem_props_get_color_rgb(fbx_props, b'DiffuseColor', const_color_white) + # No specular color in Principled BSDF shader, assumed to be either white or take some tint from diffuse one... + # TODO: add way to handle tint option (guesstimate from spec color + intensity...)? + ma_wrap.specular = elem_props_get_number(fbx_props, b'SpecularFactor', 0.25) * 2.0 + # XXX Totally empirical conversion, trying to adapt it (and protect against invalid negative values, see T96076): + # From [1.0 - 0.0] Principled BSDF range to [0.0 - 100.0] FBX shininess range)... + fbx_shininess = max(elem_props_get_number(fbx_props, b'Shininess', 20.0), 0.0) + ma_wrap.roughness = 1.0 - (sqrt(fbx_shininess) / 10.0) + # Sweetness... Looks like we are not the only ones to not know exactly how FBX is supposed to work (see T59850). + # According to one of its developers, Unity uses that formula to extract alpha value: + # + # alpha = 1 - TransparencyFactor + # if (alpha == 1 or alpha == 0): + # alpha = 1 - TransparentColor.r + # + # Until further info, let's assume this is correct way to do, hence the following code for TransparentColor. + # However, there are some cases (from 3DSMax, see T65065), where we do have TransparencyFactor only defined + # in the template to 0.0, and then materials defining TransparentColor to pure white (1.0, 1.0, 1.0), + # and setting alpha value in Opacity... try to cope with that too. :(((( + alpha = 1.0 - elem_props_get_number(fbx_props, b'TransparencyFactor', 0.0) + if (alpha == 1.0 or alpha == 0.0): + alpha = elem_props_get_number(fbx_props_no_template, b'Opacity', None) + if alpha is None: + alpha = 1.0 - elem_props_get_color_rgb(fbx_props, b'TransparentColor', const_color_black)[0] + ma_wrap.alpha = alpha + ma_wrap.metallic = elem_props_get_number(fbx_props, b'ReflectionFactor', 0.0) + # We have no metallic (a.k.a. reflection) color... + # elem_props_get_color_rgb(fbx_props, b'ReflectionColor', const_color_white) + ma_wrap.normalmap_strength = elem_props_get_number(fbx_props, b'BumpFactor', 1.0) + # Emission strength and color + ma_wrap.emission_strength = elem_props_get_number(fbx_props, b'EmissiveFactor', 1.0) + ma_wrap.emission_color = elem_props_get_color_rgb(fbx_props, b'EmissiveColor', const_color_black) + + nodal_material_wrap_map[ma] = ma_wrap + + if settings.use_custom_props: + blen_read_custom_properties(fbx_obj, ma, settings) + + return ma + + +# ------- +# Image & Texture + +def blen_read_texture_image(fbx_tmpl, fbx_obj, basedir, settings): + import os + from bpy_extras import image_utils + + def pack_data_from_content(image, fbx_obj): + data = elem_find_first_bytes(fbx_obj, b'Content') + if (data): + data_len = len(data) + if (data_len): + image.pack(data=data, data_len=data_len) + + elem_name_utf8 = elem_name_ensure_classes(fbx_obj, {b'Texture', b'Video'}) + + image_cache = settings.image_cache + + # Yet another beautiful logic demonstration by Master FBX: + # * RelativeFilename in both Video and Texture nodes. + # * FileName in texture nodes. + # * Filename in video nodes. + # Aaaaaaaarrrrrrrrgggggggggggg!!!!!!!!!!!!!! + filepath = elem_find_first_string(fbx_obj, b'RelativeFilename') + if filepath: + # Make sure we do handle a relative path, and not an absolute one (see D5143). + filepath = filepath.lstrip(os.path.sep).lstrip(os.path.altsep) + filepath = os.path.join(basedir, filepath) + else: + filepath = elem_find_first_string(fbx_obj, b'FileName') + if not filepath: + filepath = elem_find_first_string(fbx_obj, b'Filename') + if not filepath: + print("Error, could not find any file path in ", fbx_obj) + print(" Falling back to: ", elem_name_utf8) + filepath = elem_name_utf8 + else: + filepath = filepath.replace('\\', '/') if (os.sep == '/') else filepath.replace('/', '\\') + + image = image_cache.get(filepath) + if image is not None: + # Data is only embedded once, we may have already created the image but still be missing its data! + if not image.has_data: + pack_data_from_content(image, fbx_obj) + return image + + image = image_utils.load_image( + filepath, + dirname=basedir, + place_holder=True, + recursive=settings.use_image_search, + ) + + # Try to use embedded data, if available! + pack_data_from_content(image, fbx_obj) + + image_cache[filepath] = image + # name can be ../a/b/c + image.name = os.path.basename(elem_name_utf8) + + if settings.use_custom_props: + blen_read_custom_properties(fbx_obj, image, settings) + + return image + + +def blen_read_camera(fbx_tmpl, fbx_obj, settings): + # meters to inches + M2I = 0.0393700787 + + global_scale = settings.global_scale + + elem_name_utf8 = elem_name_ensure_class(fbx_obj, b'NodeAttribute') + + fbx_props = (elem_find_first(fbx_obj, b'Properties70'), + elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil)) + + camera = bpy.data.cameras.new(name=elem_name_utf8) + + camera.type = 'ORTHO' if elem_props_get_enum(fbx_props, b'CameraProjectionType', 0) == 1 else 'PERSP' + + camera.dof.focus_distance = elem_props_get_number(fbx_props, b'FocusDistance', 10) * global_scale + if (elem_props_get_bool(fbx_props, b'UseDepthOfField', False)): + camera.dof.use_dof = True + + camera.lens = elem_props_get_number(fbx_props, b'FocalLength', 35.0) + camera.sensor_width = elem_props_get_number(fbx_props, b'FilmWidth', 32.0 * M2I) / M2I + camera.sensor_height = elem_props_get_number(fbx_props, b'FilmHeight', 32.0 * M2I) / M2I + + camera.ortho_scale = elem_props_get_number(fbx_props, b'OrthoZoom', 1.0) + + filmaspect = camera.sensor_width / camera.sensor_height + # film offset + camera.shift_x = elem_props_get_number(fbx_props, b'FilmOffsetX', 0.0) / (M2I * camera.sensor_width) + camera.shift_y = elem_props_get_number(fbx_props, b'FilmOffsetY', 0.0) / (M2I * camera.sensor_height * filmaspect) + + camera.clip_start = elem_props_get_number(fbx_props, b'NearPlane', 0.01) * global_scale + camera.clip_end = elem_props_get_number(fbx_props, b'FarPlane', 100.0) * global_scale + + if settings.use_custom_props: + blen_read_custom_properties(fbx_obj, camera, settings) + + return camera + + +def blen_read_light(fbx_tmpl, fbx_obj, settings): + import math + elem_name_utf8 = elem_name_ensure_class(fbx_obj, b'NodeAttribute') + + fbx_props = (elem_find_first(fbx_obj, b'Properties70'), + elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil)) + + light_type = { + 0: 'POINT', + 1: 'SUN', + 2: 'SPOT'}.get(elem_props_get_enum(fbx_props, b'LightType', 0), 'POINT') + + lamp = bpy.data.lights.new(name=elem_name_utf8, type=light_type) + + if light_type == 'SPOT': + spot_size = elem_props_get_number(fbx_props, b'OuterAngle', None) + if spot_size is None: + # Deprecated. + spot_size = elem_props_get_number(fbx_props, b'Cone angle', 45.0) + lamp.spot_size = math.radians(spot_size) + + spot_blend = elem_props_get_number(fbx_props, b'InnerAngle', None) + if spot_blend is None: + # Deprecated. + spot_blend = elem_props_get_number(fbx_props, b'HotSpot', 45.0) + lamp.spot_blend = 1.0 - (spot_blend / spot_size) + + # TODO, cycles nodes??? + lamp.color = elem_props_get_color_rgb(fbx_props, b'Color', (1.0, 1.0, 1.0)) + lamp.energy = elem_props_get_number(fbx_props, b'Intensity', 100.0) / 100.0 + lamp.use_shadow = elem_props_get_bool(fbx_props, b'CastShadow', True) + if hasattr(lamp, "cycles"): + lamp.cycles.cast_shadow = lamp.use_shadow + # Keeping this for now, but this is not used nor exposed anymore afaik... + lamp.shadow_color = elem_props_get_color_rgb(fbx_props, b'ShadowColor', (0.0, 0.0, 0.0)) + + if settings.use_custom_props: + blen_read_custom_properties(fbx_obj, lamp, settings) + + return lamp + + +# ### Import Utility class +class FbxImportHelperNode: + """ + Temporary helper node to store a hierarchy of fbxNode objects before building Objects, Armatures and Bones. + It tries to keep the correction data in one place so it can be applied consistently to the imported data. + """ + + __slots__ = ( + '_parent', 'anim_compensation_matrix', 'is_global_animation', 'armature_setup', 'armature', 'bind_matrix', + 'bl_bone', 'bl_data', 'bl_obj', 'bone_child_matrix', 'children', 'clusters', + 'fbx_elem', 'fbx_data_elem', 'fbx_name', 'fbx_transform_data', 'fbx_type', + 'is_armature', 'has_bone_children', 'is_bone', 'is_root', 'is_leaf', + 'matrix', 'matrix_as_parent', 'matrix_geom', 'meshes', 'post_matrix', 'pre_matrix') + + def __init__(self, fbx_elem, bl_data, fbx_transform_data, is_bone): + self.fbx_name = elem_name_ensure_class(fbx_elem, b'Model') if fbx_elem else 'Unknown' + self.fbx_type = fbx_elem.props[2] if fbx_elem else None + self.fbx_elem = fbx_elem + # FBX elem of a connected NodeAttribute/Geometry for helpers whose bl_data + # does not exist or is yet to be created. + self.fbx_data_elem = None + self.bl_obj = None + self.bl_data = bl_data + # Name of bone if this is a bone (this may be different to fbx_name if there was a name conflict in Blender!) + self.bl_bone = None + self.fbx_transform_data = fbx_transform_data + self.is_root = False + self.is_bone = is_bone + self.is_armature = False + self.armature = None # For bones only, relevant armature node. + # True if the hierarchy below this node contains bones, important to support mixed hierarchies. + self.has_bone_children = False + # True for leaf-bones added to the end of some bone chains to set the lengths. + self.is_leaf = False + self.pre_matrix = None # correction matrix that needs to be applied before the FBX transform + self.bind_matrix = None # for bones this is the matrix used to bind to the skin + if fbx_transform_data: + self.matrix, self.matrix_as_parent, self.matrix_geom = blen_read_object_transform_do(fbx_transform_data) + else: + self.matrix, self.matrix_as_parent, self.matrix_geom = (None, None, None) + self.post_matrix = None # correction matrix that needs to be applied after the FBX transform + self.bone_child_matrix = None # Objects attached to a bone end not the beginning, this matrix corrects for that + + # XXX Those two are to handle the fact that rigged meshes are not linked to their armature in FBX, which implies + # that their animation is in global space (afaik...). + # This is actually not really solvable currently, since anim_compensation_matrix is not valid if armature + # itself is animated (we'd have to recompute global-to-local anim_compensation_matrix for each frame, + # and for each armature action... beyond being an insane work). + # Solution for now: do not read rigged meshes animations at all! sic... + # a mesh moved in the hierarchy may have a different local matrix. This compensates animations for this. + self.anim_compensation_matrix = None + self.is_global_animation = False + + self.meshes = None # List of meshes influenced by this bone. + self.clusters = [] # Deformer Cluster nodes + self.armature_setup = {} # mesh and armature matrix when the mesh was bound + + self._parent = None + self.children = [] + + @property + def parent(self): + return self._parent + + @parent.setter + def parent(self, value): + if self._parent is not None: + self._parent.children.remove(self) + self._parent = value + if self._parent is not None: + self._parent.children.append(self) + + @property + def ignore(self): + # Separating leaf status from ignore status itself. + # Currently they are equivalent, but this may change in future. + return self.is_leaf + + def __repr__(self): + if self.fbx_elem: + return self.fbx_elem.props[1].decode() + else: + return "None" + + def print_info(self, indent=0): + print(" " * indent + (self.fbx_name if self.fbx_name else "(Null)") + + ("[root]" if self.is_root else "") + + ("[leaf]" if self.is_leaf else "") + + ("[ignore]" if self.ignore else "") + + ("[armature]" if self.is_armature else "") + + ("[bone]" if self.is_bone else "") + + ("[HBC]" if self.has_bone_children else "") + ) + for c in self.children: + c.print_info(indent + 1) + + def mark_leaf_bones(self): + if self.is_bone and len(self.children) == 1: + child = self.children[0] + if child.is_bone and len(child.children) == 0: + child.is_leaf = True + for child in self.children: + child.mark_leaf_bones() + + def do_bake_transform(self, settings): + return (settings.bake_space_transform and self.fbx_type in (b'Mesh', b'Null') and + not self.is_armature and not self.is_bone) + + def find_correction_matrix(self, settings, parent_correction_inv=None): + from bpy_extras.io_utils import axis_conversion + + if self.parent and (self.parent.is_root or self.parent.do_bake_transform(settings)): + self.pre_matrix = settings.global_matrix + + if parent_correction_inv: + self.pre_matrix = parent_correction_inv @ (self.pre_matrix if self.pre_matrix else Matrix()) + + correction_matrix = None + + if self.is_bone: + if settings.automatic_bone_orientation: + # find best orientation to align bone with + bone_children = tuple(child for child in self.children if child.is_bone) + if len(bone_children) == 0: + # no children, inherit the correction from parent (if possible) + if self.parent and self.parent.is_bone: + correction_matrix = parent_correction_inv.inverted() if parent_correction_inv else None + else: + # else find how best to rotate the bone to align the Y axis with the children + best_axis = (1, 0, 0) + if len(bone_children) == 1: + vec = bone_children[0].get_bind_matrix().to_translation() + best_axis = Vector((0, 0, 1 if vec[2] >= 0 else -1)) + if abs(vec[0]) > abs(vec[1]): + if abs(vec[0]) > abs(vec[2]): + best_axis = Vector((1 if vec[0] >= 0 else -1, 0, 0)) + elif abs(vec[1]) > abs(vec[2]): + best_axis = Vector((0, 1 if vec[1] >= 0 else -1, 0)) + else: + # get the child directions once because they may be checked several times + child_locs = (child.get_bind_matrix().to_translation() for child in bone_children) + child_locs = tuple(loc.normalized() for loc in child_locs if loc.magnitude > 0.0) + + # I'm not sure which one I like better... + if False: + best_angle = -1.0 + for i in range(6): + a = i // 2 + s = -1 if i % 2 == 1 else 1 + test_axis = Vector((s if a == 0 else 0, s if a == 1 else 0, s if a == 2 else 0)) + + # find max angle to children + max_angle = 1.0 + for loc in child_locs: + max_angle = min(max_angle, test_axis.dot(loc)) + + # is it better than the last one? + if best_angle < max_angle: + best_angle = max_angle + best_axis = test_axis + else: + best_angle = -1.0 + for vec in child_locs: + test_axis = Vector((0, 0, 1 if vec[2] >= 0 else -1)) + if abs(vec[0]) > abs(vec[1]): + if abs(vec[0]) > abs(vec[2]): + test_axis = Vector((1 if vec[0] >= 0 else -1, 0, 0)) + elif abs(vec[1]) > abs(vec[2]): + test_axis = Vector((0, 1 if vec[1] >= 0 else -1, 0)) + + # find max angle to children + max_angle = 1.0 + for loc in child_locs: + max_angle = min(max_angle, test_axis.dot(loc)) + + # is it better than the last one? + if best_angle < max_angle: + best_angle = max_angle + best_axis = test_axis + + # convert best_axis to axis string + to_up = 'Z' if best_axis[2] >= 0 else '-Z' + if abs(best_axis[0]) > abs(best_axis[1]): + if abs(best_axis[0]) > abs(best_axis[2]): + to_up = 'X' if best_axis[0] >= 0 else '-X' + elif abs(best_axis[1]) > abs(best_axis[2]): + to_up = 'Y' if best_axis[1] >= 0 else '-Y' + to_forward = 'X' if to_up not in {'X', '-X'} else 'Y' + + # Build correction matrix + if (to_up, to_forward) != ('Y', 'X'): + correction_matrix = axis_conversion(from_forward='X', + from_up='Y', + to_forward=to_forward, + to_up=to_up, + ).to_4x4() + else: + correction_matrix = settings.bone_correction_matrix + else: + # camera and light can be hard wired + if self.fbx_type == b'Camera': + correction_matrix = MAT_CONVERT_CAMERA + elif self.fbx_type == b'Light': + correction_matrix = MAT_CONVERT_LIGHT + + self.post_matrix = correction_matrix + + if self.do_bake_transform(settings): + self.post_matrix = settings.global_matrix_inv @ (self.post_matrix if self.post_matrix else Matrix()) + + # process children + correction_matrix_inv = correction_matrix.inverted_safe() if correction_matrix else None + for child in self.children: + child.find_correction_matrix(settings, correction_matrix_inv) + + def find_armature_bones(self, armature): + for child in self.children: + if child.is_bone: + child.armature = armature + child.find_armature_bones(armature) + + def find_armatures(self): + needs_armature = False + for child in self.children: + if child.is_bone: + needs_armature = True + break + if needs_armature: + if self.fbx_type in {b'Null', b'Root'}: + # if empty then convert into armature + self.is_armature = True + armature = self + else: + # otherwise insert a new node + # XXX Maybe in case self is virtual FBX root node, we should instead add one armature per bone child? + armature = FbxImportHelperNode(None, None, None, False) + armature.fbx_name = "Armature" + armature.is_armature = True + + for child in tuple(self.children): + if child.is_bone: + child.parent = armature + + armature.parent = self + + armature.find_armature_bones(armature) + + for child in self.children: + if child.is_armature or child.is_bone: + continue + child.find_armatures() + + def find_bone_children(self): + has_bone_children = False + for child in self.children: + has_bone_children |= child.find_bone_children() + self.has_bone_children = has_bone_children + return self.is_bone or has_bone_children + + def find_fake_bones(self, in_armature=False): + if in_armature and not self.is_bone and self.has_bone_children: + self.is_bone = True + # if we are not a null node we need an intermediate node for the data + if self.fbx_type not in {b'Null', b'Root'}: + node = FbxImportHelperNode(self.fbx_elem, self.bl_data, None, False) + self.fbx_elem = None + self.bl_data = None + + # transfer children + for child in self.children: + if child.is_bone or child.has_bone_children: + continue + child.parent = node + + # attach to parent + node.parent = self + + if self.is_armature: + in_armature = True + for child in self.children: + child.find_fake_bones(in_armature) + + def get_world_matrix_as_parent(self): + matrix = self.parent.get_world_matrix_as_parent() if self.parent else Matrix() + if self.matrix_as_parent: + matrix = matrix @ self.matrix_as_parent + return matrix + + def get_world_matrix(self): + matrix = self.parent.get_world_matrix_as_parent() if self.parent else Matrix() + if self.matrix: + matrix = matrix @ self.matrix + return matrix + + def get_matrix(self): + matrix = self.matrix if self.matrix else Matrix() + if self.pre_matrix: + matrix = self.pre_matrix @ matrix + if self.post_matrix: + matrix = matrix @ self.post_matrix + return matrix + + def get_bind_matrix(self): + matrix = self.bind_matrix if self.bind_matrix else Matrix() + if self.pre_matrix: + matrix = self.pre_matrix @ matrix + if self.post_matrix: + matrix = matrix @ self.post_matrix + return matrix + + def make_bind_pose_local(self, parent_matrix=None): + if parent_matrix is None: + parent_matrix = Matrix() + + if self.bind_matrix: + bind_matrix = parent_matrix.inverted_safe() @ self.bind_matrix + else: + bind_matrix = self.matrix.copy() if self.matrix else None + + self.bind_matrix = bind_matrix + if bind_matrix: + parent_matrix = parent_matrix @ bind_matrix + + for child in self.children: + child.make_bind_pose_local(parent_matrix) + + def collect_skeleton_meshes(self, meshes): + for _, m in self.clusters: + meshes.update(m) + for child in self.children: + if not child.meshes: + child.collect_skeleton_meshes(meshes) + + def collect_armature_meshes(self): + if self.is_armature: + armature_matrix_inv = self.get_world_matrix().inverted_safe() + + meshes = set() + for child in self.children: + # Children meshes may be linked to children armatures, in which case we do not want to link them + # to a parent one. See T70244. + child.collect_armature_meshes() + if not child.meshes: + child.collect_skeleton_meshes(meshes) + for m in meshes: + old_matrix = m.matrix + m.matrix = armature_matrix_inv @ m.get_world_matrix() + m.anim_compensation_matrix = old_matrix.inverted_safe() @ m.matrix + m.is_global_animation = True + m.parent = self + self.meshes = meshes + else: + for child in self.children: + child.collect_armature_meshes() + + def build_skeleton(self, arm, parent_matrix, settings, parent_bone_size=1): + def child_connect(par_bone, child_bone, child_head, connect_ctx): + # child_bone or child_head may be None. + force_connect_children, connected = connect_ctx + if child_bone is not None: + child_bone.parent = par_bone + child_head = child_bone.head + + if similar_values_iter(par_bone.tail, child_head): + if child_bone is not None: + child_bone.use_connect = True + # Disallow any force-connection at this level from now on, since that child was 'really' + # connected, we do not want to move current bone's tail anymore! + connected = None + elif force_connect_children and connected is not None: + # We only store position where tail of par_bone should be in the end. + # Actual tail moving and force connection of compatible child bones will happen + # once all have been checked. + if connected is ...: + connected = ([child_head.copy(), 1], [child_bone] if child_bone is not None else []) + else: + connected[0][0] += child_head + connected[0][1] += 1 + if child_bone is not None: + connected[1].append(child_bone) + connect_ctx[1] = connected + + def child_connect_finalize(par_bone, connect_ctx): + force_connect_children, connected = connect_ctx + # Do nothing if force connection is not enabled! + if force_connect_children and connected is not None and connected is not ...: + # Here again we have to be wary about zero-length bones!!! + par_tail = connected[0][0] / connected[0][1] + if (par_tail - par_bone.head).magnitude < 1e-2: + par_bone_vec = (par_bone.tail - par_bone.head).normalized() + par_tail = par_bone.head + par_bone_vec * 0.01 + par_bone.tail = par_tail + for child_bone in connected[1]: + if similar_values_iter(par_tail, child_bone.head): + child_bone.use_connect = True + + # Create the (edit)bone. + bone = arm.bl_data.edit_bones.new(name=self.fbx_name) + bone.select = True + self.bl_obj = arm.bl_obj + self.bl_data = arm.bl_data + self.bl_bone = bone.name # Could be different from the FBX name! + # Read EditBone custom props the NodeAttribute + if settings.use_custom_props and self.fbx_data_elem: + blen_read_custom_properties(self.fbx_data_elem, bone, settings) + + # get average distance to children + bone_size = 0.0 + bone_count = 0 + for child in self.children: + if child.is_bone: + bone_size += child.get_bind_matrix().to_translation().magnitude + bone_count += 1 + if bone_count > 0: + bone_size /= bone_count + else: + bone_size = parent_bone_size + + # So that our bone gets its final length, but still Y-aligned in armature space. + # 0-length bones are automatically collapsed into their parent when you leave edit mode, + # so this enforces a minimum length. + bone_tail = Vector((0.0, 1.0, 0.0)) * max(0.01, bone_size) + bone.tail = bone_tail + + # And rotate/move it to its final "rest pose". + bone_matrix = parent_matrix @ self.get_bind_matrix().normalized() + + bone.matrix = bone_matrix + + force_connect_children = settings.force_connect_children + + connect_ctx = [force_connect_children, ...] + for child in self.children: + if child.is_leaf and force_connect_children: + # Arggggggggggggggggg! We do not want to create this bone, but we need its 'virtual head' location + # to orient current one!!! + child_head = (bone_matrix @ child.get_bind_matrix().normalized()).translation + child_connect(bone, None, child_head, connect_ctx) + elif child.is_bone and not child.ignore: + child_bone = child.build_skeleton(arm, bone_matrix, settings, bone_size) + # Connection to parent. + child_connect(bone, child_bone, None, connect_ctx) + + child_connect_finalize(bone, connect_ctx) + + # Correction for children attached to a bone. FBX expects to attach to the head of a bone, while Blender + # attaches to the tail. + if force_connect_children: + # When forcefully connecting, the bone's tail position may be changed, which can change both the bone's + # rotation and its length. + # Set the correction matrix such that it transforms the current tail transformation back to the original + # head transformation. + head_to_origin = bone.matrix.inverted_safe() + tail_to_head = Matrix.Translation(bone.head - bone.tail) + origin_to_original_head = bone_matrix + tail_to_original_head = head_to_origin @ tail_to_head @ origin_to_original_head + self.bone_child_matrix = tail_to_original_head + else: + self.bone_child_matrix = Matrix.Translation(-bone_tail) + + return bone + + def build_node_obj(self, fbx_tmpl, settings): + if self.bl_obj: + return self.bl_obj + + if self.is_bone or not self.fbx_elem: + return None + + # create when linking since we need object data + elem_name_utf8 = self.fbx_name + + # Object data must be created already + self.bl_obj = obj = bpy.data.objects.new(name=elem_name_utf8, object_data=self.bl_data) + + fbx_props = (elem_find_first(self.fbx_elem, b'Properties70'), + elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil)) + + # ---- + # Misc Attributes + + obj.color[0:3] = elem_props_get_color_rgb(fbx_props, b'Color', (0.8, 0.8, 0.8)) + obj.hide_viewport = not bool(elem_props_get_visibility(fbx_props, b'Visibility', 1.0)) + + obj.matrix_basis = self.get_matrix() + + if settings.use_custom_props: + blen_read_custom_properties(self.fbx_elem, obj, settings) + + return obj + + def build_skeleton_children(self, fbx_tmpl, settings, scene, view_layer): + if self.is_bone: + for child in self.children: + if child.ignore: + continue + child.build_skeleton_children(fbx_tmpl, settings, scene, view_layer) + return None + else: + # child is not a bone + obj = self.build_node_obj(fbx_tmpl, settings) + + if obj is None: + return None + + for child in self.children: + if child.ignore: + continue + child.build_skeleton_children(fbx_tmpl, settings, scene, view_layer) + + # instance in scene + view_layer.active_layer_collection.collection.objects.link(obj) + obj.select_set(True) + + return obj + + def link_skeleton_children(self, fbx_tmpl, settings, scene): + if self.is_bone: + for child in self.children: + if child.ignore: + continue + child_obj = child.bl_obj + if child_obj and child_obj != self.bl_obj: + child_obj.parent = self.bl_obj # get the armature the bone belongs to + child_obj.parent_bone = self.bl_bone + child_obj.parent_type = 'BONE' + child_obj.matrix_parent_inverse = Matrix() + + # Blender attaches to the end of a bone, while FBX attaches to the start. + # bone_child_matrix corrects for that. + if child.pre_matrix: + child.pre_matrix = self.bone_child_matrix @ child.pre_matrix + else: + child.pre_matrix = self.bone_child_matrix + + child_obj.matrix_basis = child.get_matrix() + child.link_skeleton_children(fbx_tmpl, settings, scene) + return None + else: + obj = self.bl_obj + + for child in self.children: + if child.ignore: + continue + child_obj = child.link_skeleton_children(fbx_tmpl, settings, scene) + if child_obj: + child_obj.parent = obj + + return obj + + def set_pose_matrix_and_custom_props(self, arm, settings): + pose_bone = arm.bl_obj.pose.bones[self.bl_bone] + pose_bone.matrix_basis = self.get_bind_matrix().inverted_safe() @ self.get_matrix() + + # `self.fbx_elem` can be `None` in cases where the imported hierarchy contains a mix of bone and non-bone FBX + # Nodes parented to one another, e.g. "bone1"->"mesh1"->"bone2". In Blender, an Armature can only consist of + # bones, so to maintain the imported hierarchy, a placeholder bone with the same name as "mesh1" is inserted + # into the Armature and then the imported "mesh1" Object is parented to the placeholder bone. The placeholder + # bone won't have a `self.fbx_elem` because it belongs to the "mesh1" Object instead. + # See FbxImportHelperNode.find_fake_bones(). + if settings.use_custom_props and self.fbx_elem: + blen_read_custom_properties(self.fbx_elem, pose_bone, settings) + + for child in self.children: + if child.ignore: + continue + if child.is_bone: + child.set_pose_matrix_and_custom_props(arm, settings) + + def merge_weights(self, combined_weights, fbx_cluster): + indices = elem_prop_first(elem_find_first(fbx_cluster, b'Indexes', default=None), default=()) + weights = elem_prop_first(elem_find_first(fbx_cluster, b'Weights', default=None), default=()) + + for index, weight in zip(indices, weights): + w = combined_weights.get(index) + if w is None: + combined_weights[index] = [weight] + else: + w.append(weight) + + def set_bone_weights(self): + ignored_children = tuple(child for child in self.children + if child.is_bone and child.ignore and len(child.clusters) > 0) + + if len(ignored_children) > 0: + # If we have an ignored child bone we need to merge their weights into the current bone weights. + # This can happen both intentionally and accidentally when skinning a model. Either way, they + # need to be moved into a parent bone or they cause animation glitches. + for fbx_cluster, meshes in self.clusters: + combined_weights = {} + self.merge_weights(combined_weights, fbx_cluster) + + for child in ignored_children: + for child_cluster, child_meshes in child.clusters: + if not meshes.isdisjoint(child_meshes): + self.merge_weights(combined_weights, child_cluster) + + # combine child weights + indices = [] + weights = [] + for i, w in combined_weights.items(): + indices.append(i) + if len(w) > 1: + # Add ignored child weights to the current bone's weight. + # XXX - Weights that sum to more than 1.0 get clamped to 1.0 when set in the vertex group. + weights.append(sum(w)) + else: + weights.append(w[0]) + + add_vgroup_to_objects(indices, weights, self.bl_bone, [node.bl_obj for node in meshes]) + + # clusters that drive meshes not included in a parent don't need to be merged + all_meshes = set().union(*[meshes for _, meshes in self.clusters]) + for child in ignored_children: + for child_cluster, child_meshes in child.clusters: + if all_meshes.isdisjoint(child_meshes): + indices = elem_prop_first(elem_find_first(child_cluster, b'Indexes', default=None), default=()) + weights = elem_prop_first(elem_find_first(child_cluster, b'Weights', default=None), default=()) + add_vgroup_to_objects(indices, weights, self.bl_bone, [node.bl_obj for node in child_meshes]) + else: + # set the vertex weights on meshes + for fbx_cluster, meshes in self.clusters: + indices = elem_prop_first(elem_find_first(fbx_cluster, b'Indexes', default=None), default=()) + weights = elem_prop_first(elem_find_first(fbx_cluster, b'Weights', default=None), default=()) + add_vgroup_to_objects(indices, weights, self.bl_bone, [node.bl_obj for node in meshes]) + + for child in self.children: + if child.is_bone and not child.ignore: + child.set_bone_weights() + + def build_hierarchy(self, fbx_tmpl, settings, scene, view_layer): + if self.is_armature: + # create when linking since we need object data + elem_name_utf8 = self.fbx_name + + self.bl_data = arm_data = bpy.data.armatures.new(name=elem_name_utf8) + + # Object data must be created already + self.bl_obj = arm = bpy.data.objects.new(name=elem_name_utf8, object_data=arm_data) + + arm.matrix_basis = self.get_matrix() + + if self.fbx_elem: + fbx_props = (elem_find_first(self.fbx_elem, b'Properties70'), + elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil)) + + if settings.use_custom_props: + # Read Armature Object custom props from the Node + blen_read_custom_properties(self.fbx_elem, arm, settings) + + if self.fbx_data_elem: + # Read Armature Data custom props from the NodeAttribute + blen_read_custom_properties(self.fbx_data_elem, arm_data, settings) + + # instance in scene + view_layer.active_layer_collection.collection.objects.link(arm) + arm.select_set(True) + + # Add bones: + + # Switch to Edit mode. + view_layer.objects.active = arm + is_hidden = arm.hide_viewport + arm.hide_viewport = False # Can't switch to Edit mode hidden objects... + bpy.ops.object.mode_set(mode='EDIT') + + for child in self.children: + if child.ignore: + continue + if child.is_bone: + child.build_skeleton(self, Matrix(), settings) + + bpy.ops.object.mode_set(mode='OBJECT') + + arm.hide_viewport = is_hidden + + # Set pose matrix and PoseBone custom properties + for child in self.children: + if child.ignore: + continue + if child.is_bone: + child.set_pose_matrix_and_custom_props(self, settings) + + # Add bone children: + for child in self.children: + if child.ignore: + continue + child_obj = child.build_skeleton_children(fbx_tmpl, settings, scene, view_layer) + + return arm + elif self.fbx_elem and not self.is_bone: + obj = self.build_node_obj(fbx_tmpl, settings) + + # walk through children + for child in self.children: + child.build_hierarchy(fbx_tmpl, settings, scene, view_layer) + + # instance in scene + view_layer.active_layer_collection.collection.objects.link(obj) + obj.select_set(True) + + return obj + else: + for child in self.children: + child.build_hierarchy(fbx_tmpl, settings, scene, view_layer) + + return None + + def link_hierarchy(self, fbx_tmpl, settings, scene): + if self.is_armature: + arm = self.bl_obj + + # Link bone children: + for child in self.children: + if child.ignore: + continue + child_obj = child.link_skeleton_children(fbx_tmpl, settings, scene) + if child_obj: + child_obj.parent = arm + + # Add armature modifiers to the meshes + if self.meshes: + for mesh in self.meshes: + (mmat, amat) = mesh.armature_setup[self] + me_obj = mesh.bl_obj + + # bring global armature & mesh matrices into *Blender* global space. + # Note: Usage of matrix_geom (local 'diff' transform) here is quite brittle. + # Among other things, why in hell isn't it taken into account by bindpose & co??? + # Probably because org app (max) handles it completely aside from any parenting stuff, + # which we obviously cannot do in Blender. :/ + if amat is None: + amat = self.bind_matrix + amat = settings.global_matrix @ (Matrix() if amat is None else amat) + if self.matrix_geom: + amat = amat @ self.matrix_geom + mmat = settings.global_matrix @ mmat + if mesh.matrix_geom: + mmat = mmat @ mesh.matrix_geom + + # Now that we have armature and mesh in there (global) bind 'state' (matrix), + # we can compute inverse parenting matrix of the mesh. + me_obj.matrix_parent_inverse = amat.inverted_safe() @ mmat @ me_obj.matrix_basis.inverted_safe() + + mod = mesh.bl_obj.modifiers.new(arm.name, 'ARMATURE') + mod.object = arm + + # Add bone weights to the deformers + for child in self.children: + if child.ignore: + continue + if child.is_bone: + child.set_bone_weights() + + return arm + elif self.bl_obj: + obj = self.bl_obj + + # walk through children + for child in self.children: + child_obj = child.link_hierarchy(fbx_tmpl, settings, scene) + if child_obj: + child_obj.parent = obj + + return obj + else: + for child in self.children: + child.link_hierarchy(fbx_tmpl, settings, scene) + + return None + + +def load(operator, context, filepath="", + use_manual_orientation=False, + axis_forward='-Z', + axis_up='Y', + global_scale=1.0, + bake_space_transform=False, + use_custom_normals=True, + use_image_search=False, + use_alpha_decals=False, + decal_offset=0.0, + use_anim=True, + anim_offset=1.0, + use_subsurf=False, + use_custom_props=True, + use_custom_props_enum_as_string=True, + ignore_leaf_bones=False, + force_connect_children=False, + automatic_bone_orientation=False, + primary_bone_axis='Y', + secondary_bone_axis='X', + use_prepost_rot=True, + colors_type='SRGB'): + + global fbx_elem_nil + fbx_elem_nil = FBXElem('', (), (), ()) + + import os + import time + from bpy_extras.io_utils import axis_conversion + + from . import parse_fbx + from .fbx_utils import RIGHT_HAND_AXES, FBX_FRAMERATES + + start_time_proc = time.process_time() + start_time_sys = time.time() + + perfmon = PerfMon() + perfmon.level_up() + perfmon.step("FBX Import: start importing %s" % filepath) + perfmon.level_up() + + # Detect ASCII files. + + # Typically it's bad practice to fail silently on any error, + # however the file may fail to read for many reasons, + # and this situation is handled later in the code, + # right now we only want to know if the file successfully reads as ascii. + try: + with open(filepath, 'r', encoding="utf-8") as fh: + fh.read(24) + is_ascii = True + except Exception: + is_ascii = False + + if is_ascii: + operator.report({'ERROR'}, tip_("ASCII FBX files are not supported %r") % filepath) + return {'CANCELLED'} + del is_ascii + # End ascii detection. + + try: + elem_root, version = parse_fbx.parse(filepath) + except Exception as e: + import traceback + traceback.print_exc() + + operator.report({'ERROR'}, tip_("Couldn't open file %r (%s)") % (filepath, e)) + return {'CANCELLED'} + + if version < 7100: + operator.report({'ERROR'}, tip_("Version %r unsupported, must be %r or later") % (version, 7100)) + return {'CANCELLED'} + + print("FBX version: %r" % version) + + if bpy.ops.object.mode_set.poll(): + bpy.ops.object.mode_set(mode='OBJECT', toggle=False) + + # deselect all + if bpy.ops.object.select_all.poll(): + bpy.ops.object.select_all(action='DESELECT') + + basedir = os.path.dirname(filepath) + + nodal_material_wrap_map = {} + image_cache = {} + + # Tables: (FBX_byte_id -> [FBX_data, None or Blender_datablock]) + fbx_table_nodes = {} + + if use_alpha_decals: + material_decals = set() + else: + material_decals = None + + scene = context.scene + view_layer = context.view_layer + + # #### Get some info from GlobalSettings. + + perfmon.step("FBX import: Prepare...") + + fbx_settings = elem_find_first(elem_root, b'GlobalSettings') + fbx_settings_props = elem_find_first(fbx_settings, b'Properties70') + if fbx_settings is None or fbx_settings_props is None: + operator.report({'ERROR'}, tip_("No 'GlobalSettings' found in file %r") % filepath) + return {'CANCELLED'} + + # FBX default base unit seems to be the centimeter, while raw Blender Unit is equivalent to the meter... + unit_scale = elem_props_get_number(fbx_settings_props, b'UnitScaleFactor', 1.0) + unit_scale_org = elem_props_get_number(fbx_settings_props, b'OriginalUnitScaleFactor', 1.0) + global_scale *= (unit_scale / units_blender_to_fbx_factor(context.scene)) + # Compute global matrix and scale. + if not use_manual_orientation: + axis_forward = (elem_props_get_integer(fbx_settings_props, b'FrontAxis', 1), + elem_props_get_integer(fbx_settings_props, b'FrontAxisSign', 1)) + axis_up = (elem_props_get_integer(fbx_settings_props, b'UpAxis', 2), + elem_props_get_integer(fbx_settings_props, b'UpAxisSign', 1)) + axis_coord = (elem_props_get_integer(fbx_settings_props, b'CoordAxis', 0), + elem_props_get_integer(fbx_settings_props, b'CoordAxisSign', 1)) + axis_key = (axis_up, axis_forward, axis_coord) + axis_up, axis_forward = {v: k for k, v in RIGHT_HAND_AXES.items()}.get(axis_key, ('Z', 'Y')) + global_matrix = (Matrix.Scale(global_scale, 4) @ + axis_conversion(from_forward=axis_forward, from_up=axis_up).to_4x4()) + + # To cancel out unwanted rotation/scale on nodes. + global_matrix_inv = global_matrix.inverted() + # For transforming mesh normals. + global_matrix_inv_transposed = global_matrix_inv.transposed() + + # Compute bone correction matrix + bone_correction_matrix = None # None means no correction/identity + if not automatic_bone_orientation: + if (primary_bone_axis, secondary_bone_axis) != ('Y', 'X'): + bone_correction_matrix = axis_conversion(from_forward='X', + from_up='Y', + to_forward=secondary_bone_axis, + to_up=primary_bone_axis, + ).to_4x4() + + # Compute framerate settings. + custom_fps = elem_props_get_number(fbx_settings_props, b'CustomFrameRate', 25.0) + time_mode = elem_props_get_enum(fbx_settings_props, b'TimeMode') + real_fps = {eid: val for val, eid in FBX_FRAMERATES[1:]}.get(time_mode, custom_fps) + if real_fps <= 0.0: + real_fps = 25.0 + scene.render.fps = round(real_fps) + scene.render.fps_base = scene.render.fps / real_fps + + # store global settings that need to be accessed during conversion + settings = FBXImportSettings( + operator.report, (axis_up, axis_forward), global_matrix, global_scale, + bake_space_transform, global_matrix_inv, global_matrix_inv_transposed, + use_custom_normals, use_image_search, + use_alpha_decals, decal_offset, + use_anim, anim_offset, + use_subsurf, + use_custom_props, use_custom_props_enum_as_string, + nodal_material_wrap_map, image_cache, + ignore_leaf_bones, force_connect_children, automatic_bone_orientation, bone_correction_matrix, + use_prepost_rot, colors_type, + ) + + # #### And now, the "real" data. + + perfmon.step("FBX import: Templates...") + + fbx_defs = elem_find_first(elem_root, b'Definitions') # can be None + fbx_nodes = elem_find_first(elem_root, b'Objects') + fbx_connections = elem_find_first(elem_root, b'Connections') + + if fbx_nodes is None: + operator.report({'ERROR'}, tip_("No 'Objects' found in file %r") % filepath) + return {'CANCELLED'} + if fbx_connections is None: + operator.report({'ERROR'}, tip_("No 'Connections' found in file %r") % filepath) + return {'CANCELLED'} + + # ---- + # First load property templates + # Load 'PropertyTemplate' values. + # Key is a tuple, (ObjectType, FBXNodeType) + # eg, (b'Texture', b'KFbxFileTexture') + # (b'Geometry', b'KFbxMesh') + fbx_templates = {} + + def _(): + if fbx_defs is not None: + for fbx_def in fbx_defs.elems: + if fbx_def.id == b'ObjectType': + for fbx_subdef in fbx_def.elems: + if fbx_subdef.id == b'PropertyTemplate': + assert(fbx_def.props_type == b'S') + assert(fbx_subdef.props_type == b'S') + # (b'Texture', b'KFbxFileTexture') - eg. + key = fbx_def.props[0], fbx_subdef.props[0] + fbx_templates[key] = fbx_subdef + _() + del _ + + def fbx_template_get(key): + ret = fbx_templates.get(key, fbx_elem_nil) + if ret is fbx_elem_nil: + # Newest FBX (7.4 and above) use no more 'K' in their type names... + key = (key[0], key[1][1:]) + return fbx_templates.get(key, fbx_elem_nil) + return ret + + perfmon.step("FBX import: Nodes...") + + # ---- + # Build FBX node-table + def _(): + for fbx_obj in fbx_nodes.elems: + # TODO, investigate what other items after first 3 may be + assert(fbx_obj.props_type[:3] == b'LSS') + fbx_uuid = elem_uuid(fbx_obj) + fbx_table_nodes[fbx_uuid] = [fbx_obj, None] + _() + del _ + + # ---- + # Load in the data + # http://download.autodesk.com/us/fbx/20112/FBX_SDK_HELP/index.html?url= + # WS73099cc142f487551fea285e1221e4f9ff8-7fda.htm,topicNumber=d0e6388 + + perfmon.step("FBX import: Connections...") + + fbx_connection_map = {} + fbx_connection_map_reverse = {} + + def _(): + for fbx_link in fbx_connections.elems: + c_type = fbx_link.props[0] + if fbx_link.props_type[1:3] == b'LL': + c_src, c_dst = fbx_link.props[1:3] + fbx_connection_map.setdefault(c_src, []).append((c_dst, fbx_link)) + fbx_connection_map_reverse.setdefault(c_dst, []).append((c_src, fbx_link)) + _() + del _ + + perfmon.step("FBX import: Meshes...") + + # ---- + # Load mesh data + def _(): + fbx_tmpl = fbx_template_get((b'Geometry', b'KFbxMesh')) + + for fbx_uuid, fbx_item in fbx_table_nodes.items(): + fbx_obj, blen_data = fbx_item + if fbx_obj.id != b'Geometry': + continue + if fbx_obj.props[-1] == b'Mesh': + assert(blen_data is None) + fbx_item[1] = blen_read_geom(fbx_tmpl, fbx_obj, settings) + _() + del _ + + perfmon.step("FBX import: Materials & Textures...") + + # ---- + # Load material data + def _(): + fbx_tmpl = fbx_template_get((b'Material', b'KFbxSurfacePhong')) + # b'KFbxSurfaceLambert' + + for fbx_uuid, fbx_item in fbx_table_nodes.items(): + fbx_obj, blen_data = fbx_item + if fbx_obj.id != b'Material': + continue + assert(blen_data is None) + fbx_item[1] = blen_read_material(fbx_tmpl, fbx_obj, settings) + _() + del _ + + # ---- + # Load image & textures data + def _(): + fbx_tmpl_tex = fbx_template_get((b'Texture', b'KFbxFileTexture')) + fbx_tmpl_img = fbx_template_get((b'Video', b'KFbxVideo')) + + # Important to run all 'Video' ones first, embedded images are stored in those nodes. + # XXX Note we simplify things here, assuming both matching Video and Texture will use same file path, + # this may be a bit weak, if issue arise we'll fallback to plain connection stuff... + for fbx_uuid, fbx_item in fbx_table_nodes.items(): + fbx_obj, blen_data = fbx_item + if fbx_obj.id != b'Video': + continue + fbx_item[1] = blen_read_texture_image(fbx_tmpl_img, fbx_obj, basedir, settings) + for fbx_uuid, fbx_item in fbx_table_nodes.items(): + fbx_obj, blen_data = fbx_item + if fbx_obj.id != b'Texture': + continue + fbx_item[1] = blen_read_texture_image(fbx_tmpl_tex, fbx_obj, basedir, settings) + _() + del _ + + perfmon.step("FBX import: Cameras & Lamps...") + + # ---- + # Load camera data + def _(): + fbx_tmpl = fbx_template_get((b'NodeAttribute', b'KFbxCamera')) + + for fbx_uuid, fbx_item in fbx_table_nodes.items(): + fbx_obj, blen_data = fbx_item + if fbx_obj.id != b'NodeAttribute': + continue + if fbx_obj.props[-1] == b'Camera': + assert(blen_data is None) + fbx_item[1] = blen_read_camera(fbx_tmpl, fbx_obj, settings) + _() + del _ + + # ---- + # Load lamp data + def _(): + fbx_tmpl = fbx_template_get((b'NodeAttribute', b'KFbxLight')) + + for fbx_uuid, fbx_item in fbx_table_nodes.items(): + fbx_obj, blen_data = fbx_item + if fbx_obj.id != b'NodeAttribute': + continue + if fbx_obj.props[-1] == b'Light': + assert(blen_data is None) + fbx_item[1] = blen_read_light(fbx_tmpl, fbx_obj, settings) + _() + del _ + + # ---- + # Connections + def connection_filter_ex(fbx_uuid, fbx_id, dct): + return [(c_found[0], c_found[1], c_type) + for (c_uuid, c_type) in dct.get(fbx_uuid, ()) + # 0 is used for the root node, which isn't in fbx_table_nodes + for c_found in (() if c_uuid == 0 else (fbx_table_nodes.get(c_uuid, (None, None)),)) + if (fbx_id is None) or (c_found[0] and c_found[0].id == fbx_id)] + + def connection_filter_forward(fbx_uuid, fbx_id): + return connection_filter_ex(fbx_uuid, fbx_id, fbx_connection_map) + + def connection_filter_reverse(fbx_uuid, fbx_id): + return connection_filter_ex(fbx_uuid, fbx_id, fbx_connection_map_reverse) + + perfmon.step("FBX import: Objects & Armatures...") + + # -- temporary helper hierarchy to build armatures and objects from + # lookup from uuid to helper node. Used to build parent-child relations and later to look up animated nodes. + fbx_helper_nodes = {} + + def _(): + # We build an intermediate hierarchy used to: + # - Calculate and store bone orientation correction matrices. The same matrices will be reused for animation. + # - Find/insert armature nodes. + # - Filter leaf bones. + + # create scene root + fbx_helper_nodes[0] = root_helper = FbxImportHelperNode(None, None, None, False) + root_helper.is_root = True + + # add fbx nodes + fbx_tmpl = fbx_template_get((b'Model', b'KFbxNode')) + for a_uuid, a_item in fbx_table_nodes.items(): + fbx_obj, bl_data = a_item + if fbx_obj is None or fbx_obj.id != b'Model': + continue + + fbx_props = (elem_find_first(fbx_obj, b'Properties70'), + elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil)) + + transform_data = blen_read_object_transform_preprocess(fbx_props, fbx_obj, Matrix(), use_prepost_rot) + # Note: 'Root' "bones" are handled as (armature) objects. + # Note: See T46912 for first FBX file I ever saw with 'Limb' bones - thought those were totally deprecated. + is_bone = fbx_obj.props[2] in {b'LimbNode', b'Limb'} + fbx_helper_nodes[a_uuid] = FbxImportHelperNode(fbx_obj, bl_data, transform_data, is_bone) + + # add parent-child relations and add blender data to the node + for fbx_link in fbx_connections.elems: + if fbx_link.props[0] != b'OO': + continue + if fbx_link.props_type[1:3] == b'LL': + c_src, c_dst = fbx_link.props[1:3] + parent = fbx_helper_nodes.get(c_dst) + if parent is None: + continue + + child = fbx_helper_nodes.get(c_src) + if child is None: + # add blender data (meshes, lights, cameras, etc.) to a helper node + fbx_sdata, bl_data = p_item = fbx_table_nodes.get(c_src, (None, None)) + if fbx_sdata is None: + continue + if fbx_sdata.id not in {b'Geometry', b'NodeAttribute'}: + continue + parent.bl_data = bl_data + if bl_data is None: + # If there's no bl_data, add the fbx_sdata so that it can be read when creating the bl_data/bone + parent.fbx_data_elem = fbx_sdata + else: + # set parent + child.parent = parent + + # find armatures (either an empty below a bone or a new node inserted at the bone + root_helper.find_armatures() + + # mark nodes that have bone children + root_helper.find_bone_children() + + # mark nodes that need a bone to attach child-bones to + root_helper.find_fake_bones() + + # mark leaf nodes that are only required to mark the end of their parent bone + if settings.ignore_leaf_bones: + root_helper.mark_leaf_bones() + + # What a mess! Some bones have several BindPoses, some have none, clusters contain a bind pose as well, + # and you can have several clusters per bone! + # Maybe some conversion can be applied to put them all into the same frame of reference? + + # get the bind pose from pose elements + for a_uuid, a_item in fbx_table_nodes.items(): + fbx_obj, bl_data = a_item + if fbx_obj is None: + continue + if fbx_obj.id != b'Pose': + continue + if fbx_obj.props[2] != b'BindPose': + continue + for fbx_pose_node in fbx_obj.elems: + if fbx_pose_node.id != b'PoseNode': + continue + node_elem = elem_find_first(fbx_pose_node, b'Node') + node = elem_uuid(node_elem) + matrix_elem = elem_find_first(fbx_pose_node, b'Matrix') + matrix = array_to_matrix4(matrix_elem.props[0]) if matrix_elem else None + bone = fbx_helper_nodes.get(node) + if bone and matrix: + # Store the matrix in the helper node. + # There may be several bind pose matrices for the same node, but in tests they seem to be identical. + bone.bind_matrix = matrix # global space + + # get clusters and bind pose + for helper_uuid, helper_node in fbx_helper_nodes.items(): + if not helper_node.is_bone: + continue + for cluster_uuid, cluster_link in fbx_connection_map.get(helper_uuid, ()): + if cluster_link.props[0] != b'OO': + continue + fbx_cluster, _ = fbx_table_nodes.get(cluster_uuid, (None, None)) + if fbx_cluster is None or fbx_cluster.id != b'Deformer' or fbx_cluster.props[2] != b'Cluster': + continue + + # Get the bind pose from the cluster: + tx_mesh_elem = elem_find_first(fbx_cluster, b'Transform', default=None) + tx_mesh = array_to_matrix4(tx_mesh_elem.props[0]) if tx_mesh_elem else Matrix() + + tx_bone_elem = elem_find_first(fbx_cluster, b'TransformLink', default=None) + tx_bone = array_to_matrix4(tx_bone_elem.props[0]) if tx_bone_elem else None + + tx_arm_elem = elem_find_first(fbx_cluster, b'TransformAssociateModel', default=None) + tx_arm = array_to_matrix4(tx_arm_elem.props[0]) if tx_arm_elem else None + + mesh_matrix = tx_mesh + armature_matrix = tx_arm + + if tx_bone: + mesh_matrix = tx_bone @ mesh_matrix + helper_node.bind_matrix = tx_bone # overwrite the bind matrix + + # Get the meshes driven by this cluster: (Shouldn't that be only one?) + meshes = set() + for skin_uuid, skin_link in fbx_connection_map.get(cluster_uuid): + if skin_link.props[0] != b'OO': + continue + fbx_skin, _ = fbx_table_nodes.get(skin_uuid, (None, None)) + if fbx_skin is None or fbx_skin.id != b'Deformer' or fbx_skin.props[2] != b'Skin': + continue + skin_connection = fbx_connection_map.get(skin_uuid) + if skin_connection is None: + continue + for mesh_uuid, mesh_link in skin_connection: + if mesh_link.props[0] != b'OO': + continue + fbx_mesh, _ = fbx_table_nodes.get(mesh_uuid, (None, None)) + if fbx_mesh is None or fbx_mesh.id != b'Geometry' or fbx_mesh.props[2] != b'Mesh': + continue + for object_uuid, object_link in fbx_connection_map.get(mesh_uuid): + if object_link.props[0] != b'OO': + continue + mesh_node = fbx_helper_nodes[object_uuid] + if mesh_node: + # ---- + # If we get a valid mesh matrix (in bone space), store armature and + # mesh global matrices, we need them to compute mesh's matrix_parent_inverse + # when actually binding them via the modifier. + # Note we assume all bones were bound with the same mesh/armature (global) matrix, + # we do not support otherwise in Blender anyway! + mesh_node.armature_setup[helper_node.armature] = (mesh_matrix, armature_matrix) + meshes.add(mesh_node) + + helper_node.clusters.append((fbx_cluster, meshes)) + + # convert bind poses from global space into local space + root_helper.make_bind_pose_local() + + # collect armature meshes + root_helper.collect_armature_meshes() + + # find the correction matrices to align FBX objects with their Blender equivalent + root_helper.find_correction_matrix(settings) + + # build the Object/Armature/Bone hierarchy + root_helper.build_hierarchy(fbx_tmpl, settings, scene, view_layer) + + # Link the Object/Armature/Bone hierarchy + root_helper.link_hierarchy(fbx_tmpl, settings, scene) + + # root_helper.print_info(0) + _() + del _ + + perfmon.step("FBX import: ShapeKeys...") + + # We can handle shapes. + blend_shape_channels = {} # We do not need Shapes themselves, but keyblocks, for anim. + + def _(): + fbx_tmpl = fbx_template_get((b'Geometry', b'KFbxShape')) + + # - FBX | - Blender equivalent + # Mesh | `Mesh` + # BlendShape | `Key` + # BlendShapeChannel | `ShapeKey`, but without its `.data`. + # Shape | `ShapeKey.data`, but also includes normals and the values are relative to the base Mesh + # | instead of being absolute. The data is sparse, so each Shape has an "Indexes" array too. + # | FBX 2020 introduced 'Modern Style' Shapes that also support tangents, binormals, vertex + # | colors and UVs, and can be absolute values instead of relative, but 'Modern Style' Shapes + # | are not currently supported. + # + # The FBX connections between Shapes and Meshes form multiple many-many relationships: + # Mesh >-< BlendShape >-< BlendShapeChannel >-< Shape + # In practice, the relationships are almost never many-many and are more typically 1-many or 1-1: + # Mesh --- BlendShape: + # usually 1-1 and the FBX SDK might enforce that each BlendShape is connected to at most one Mesh. + # BlendShape --< BlendShapeChannel: + # usually 1-many. + # BlendShapeChannel --- or uncommonly --< Shape: + # usually 1-1, but 1-many is a documented feature. + + def connections_gen(c_src_uuid, fbx_id, fbx_type): + """Helper to reduce duplicate code""" + # Rarely, an imported FBX file will have duplicate connections. For Shape Key related connections, FBX + # appears to ignore the duplicates, or overwrite the existing duplicates such that the end result is the + # same as ignoring them, so keep a set of the seen connections and ignore any duplicates. + seen_connections = set() + for c_dst_uuid, ctype in fbx_connection_map.get(c_src_uuid, ()): + if ctype.props[0] != b'OO': + # 'Object-Object' connections only. + continue + fbx_data, bl_data = fbx_table_nodes.get(c_dst_uuid, (None, None)) + if fbx_data is None or fbx_data.id != fbx_id or fbx_data.props[2] != fbx_type: + # Either `c_dst_uuid` doesn't exist, or it has a different id or type. + continue + connection_key = (c_src_uuid, c_dst_uuid) + if connection_key in seen_connections: + # The connection is a duplicate, skip it. + continue + seen_connections.add(connection_key) + yield c_dst_uuid, fbx_data, bl_data + + # XXX - Multiple Shapes can be assigned to a single BlendShapeChannel to create a progressive blend between the + # base mesh and the assigned Shapes, with the percentage at which each Shape is fully blended being stored + # in the BlendShapeChannel's FullWeights array. This is also known as 'in-between shapes'. + # We don't have any support for in-between shapes currently. + blend_shape_channel_to_shapes = {} + mesh_to_shapes = {} + for s_uuid, (fbx_sdata, _bl_sdata) in fbx_table_nodes.items(): + if fbx_sdata is None or fbx_sdata.id != b'Geometry' or fbx_sdata.props[2] != b'Shape': + continue + + # shape -> blendshapechannel -> blendshape -> mesh. + for bc_uuid, fbx_bcdata, _bl_bcdata in connections_gen(s_uuid, b'Deformer', b'BlendShapeChannel'): + # Track the Shapes connected to each BlendShapeChannel. + shapes_assigned_to_channel = blend_shape_channel_to_shapes.setdefault(bc_uuid, []) + shapes_assigned_to_channel.append(s_uuid) + for bs_uuid, _fbx_bsdata, _bl_bsdata in connections_gen(bc_uuid, b'Deformer', b'BlendShape'): + for m_uuid, _fbx_mdata, bl_mdata in connections_gen(bs_uuid, b'Geometry', b'Mesh'): + # Blenmeshes are assumed already created at that time! + assert(isinstance(bl_mdata, bpy.types.Mesh)) + # Group shapes by mesh so that each mesh only needs to be processed once for all of its shape + # keys. + if bl_mdata not in mesh_to_shapes: + # And we have to find all objects using this mesh! + objects = [] + for o_uuid, o_ctype in fbx_connection_map.get(m_uuid, ()): + if o_ctype.props[0] != b'OO': + continue + node = fbx_helper_nodes[o_uuid] + if node: + objects.append(node) + shapes_list = [] + mesh_to_shapes[bl_mdata] = (objects, shapes_list) + else: + shapes_list = mesh_to_shapes[bl_mdata][1] + # Only the number of shapes assigned to each BlendShapeChannel needs to be passed through to + # `blen_read_shapes`, but that number isn't known until all the connections have been + # iterated, so pass the `shapes_assigned_to_channel` list instead. + shapes_list.append((bc_uuid, fbx_sdata, fbx_bcdata, shapes_assigned_to_channel)) + # BlendShape deformers are only here to connect BlendShapeChannels to meshes, nothing else to do. + + # Iterate through each mesh and create its shape keys + for bl_mdata, (objects, shapes) in mesh_to_shapes.items(): + for bc_uuid, keyblocks in blen_read_shapes(fbx_tmpl, shapes, objects, bl_mdata, scene).items(): + # keyblocks is a list of tuples (mesh, keyblock) matching that shape/blendshapechannel, for animation. + blend_shape_channels.setdefault(bc_uuid, []).extend(keyblocks) + _() + del _ + + if settings.use_subsurf: + perfmon.step("FBX import: Subdivision surfaces") + + # Look through connections for subsurf in meshes and add it to the parent object + def _(): + for fbx_link in fbx_connections.elems: + if fbx_link.props[0] != b'OO': + continue + if fbx_link.props_type[1:3] == b'LL': + c_src, c_dst = fbx_link.props[1:3] + parent = fbx_helper_nodes.get(c_dst) + if parent is None: + continue + + child = fbx_helper_nodes.get(c_src) + if child is None: + fbx_sdata, bl_data = fbx_table_nodes.get(c_src, (None, None)) + if fbx_sdata.id != b'Geometry': + continue + + preview_levels = elem_prop_first(elem_find_first(fbx_sdata, b'PreviewDivisionLevels')) + render_levels = elem_prop_first(elem_find_first(fbx_sdata, b'RenderDivisionLevels')) + if isinstance(preview_levels, int) and isinstance(render_levels, int): + mod = parent.bl_obj.modifiers.new('subsurf', 'SUBSURF') + mod.levels = preview_levels + mod.render_levels = render_levels + boundary_rule = elem_prop_first(elem_find_first(fbx_sdata, b'BoundaryRule'), default=1) + if boundary_rule == 1: + mod.boundary_smooth = "PRESERVE_CORNERS" + else: + mod.boundary_smooth = "ALL" + + _() + del _ + + if use_anim: + perfmon.step("FBX import: Animations...") + + # Animation! + def _(): + # Find the number of "ktimes" per second for this file. + # Start with the default for this FBX version. + fbx_ktime = FBX_KTIME_V8 if version >= 8000 else FBX_KTIME_V7 + # Try to find the value of the nested elem_root->'FBXHeaderExtension'->'OtherFlags'->'TCDefinition' element + # and look up the "ktimes" per second for its value. + if header := elem_find_first(elem_root, b'FBXHeaderExtension'): + # The header version that added TCDefinition support is 1004. + if elem_prop_first(elem_find_first(header, b'FBXHeaderVersion'), default=0) >= 1004: + if other_flags := elem_find_first(header, b'OtherFlags'): + if timecode_definition := elem_find_first(other_flags, b'TCDefinition'): + timecode_definition_value = elem_prop_first(timecode_definition) + # If its value is unknown or missing, default to FBX_KTIME_V8. + fbx_ktime = FBX_TIMECODE_DEFINITION_TO_KTIME_PER_SECOND.get(timecode_definition_value, + FBX_KTIME_V8) + + fbx_tmpl_astack = fbx_template_get((b'AnimationStack', b'FbxAnimStack')) + fbx_tmpl_alayer = fbx_template_get((b'AnimationLayer', b'FbxAnimLayer')) + stacks = {} + + # AnimationStacks. + for as_uuid, fbx_asitem in fbx_table_nodes.items(): + fbx_asdata, _blen_data = fbx_asitem + if fbx_asdata.id != b'AnimationStack' or fbx_asdata.props[2] != b'': + continue + stacks[as_uuid] = (fbx_asitem, {}) + + # AnimationLayers + # (mixing is completely ignored for now, each layer results in an independent set of actions). + def get_astacks_from_alayer(al_uuid): + for as_uuid, as_ctype in fbx_connection_map.get(al_uuid, ()): + if as_ctype.props[0] != b'OO': + continue + fbx_asdata, _bl_asdata = fbx_table_nodes.get(as_uuid, (None, None)) + if (fbx_asdata is None or fbx_asdata.id != b'AnimationStack' or + fbx_asdata.props[2] != b'' or as_uuid not in stacks): + continue + yield as_uuid + for al_uuid, fbx_alitem in fbx_table_nodes.items(): + fbx_aldata, _blen_data = fbx_alitem + if fbx_aldata.id != b'AnimationLayer' or fbx_aldata.props[2] != b'': + continue + for as_uuid in get_astacks_from_alayer(al_uuid): + _fbx_asitem, alayers = stacks[as_uuid] + alayers[al_uuid] = (fbx_alitem, {}) + + # AnimationCurveNodes (also the ones linked to actual animated data!). + curvenodes = {} + for acn_uuid, fbx_acnitem in fbx_table_nodes.items(): + fbx_acndata, _blen_data = fbx_acnitem + if fbx_acndata.id != b'AnimationCurveNode' or fbx_acndata.props[2] != b'': + continue + cnode = curvenodes[acn_uuid] = {} + items = [] + for n_uuid, n_ctype in fbx_connection_map.get(acn_uuid, ()): + if n_ctype.props[0] != b'OP': + continue + lnk_prop = n_ctype.props[3] + if lnk_prop in {b'Lcl Translation', b'Lcl Rotation', b'Lcl Scaling'}: + # n_uuid can (????) be linked to root '0' node, instead of a mere object node... See T41712. + ob = fbx_helper_nodes.get(n_uuid, None) + if ob is None or ob.is_root: + continue + items.append((ob, lnk_prop)) + elif lnk_prop == b'DeformPercent': # Shape keys. + keyblocks = blend_shape_channels.get(n_uuid, None) + if keyblocks is None: + continue + items += [(kb, lnk_prop) for kb in keyblocks] + elif lnk_prop == b'FocalLength': # Camera lens. + from bpy.types import Camera + fbx_item = fbx_table_nodes.get(n_uuid, None) + if fbx_item is None or not isinstance(fbx_item[1], Camera): + continue + cam = fbx_item[1] + items.append((cam, lnk_prop)) + elif lnk_prop == b'FocusDistance': # Camera focus. + from bpy.types import Camera + fbx_item = fbx_table_nodes.get(n_uuid, None) + if fbx_item is None or not isinstance(fbx_item[1], Camera): + continue + cam = fbx_item[1] + items.append((cam, lnk_prop)) + elif lnk_prop == b'DiffuseColor': + from bpy.types import Material + fbx_item = fbx_table_nodes.get(n_uuid, None) + if fbx_item is None or not isinstance(fbx_item[1], Material): + continue + mat = fbx_item[1] + items.append((mat, lnk_prop)) + print("WARNING! Importing material's animation is not supported for Nodal materials...") + for al_uuid, al_ctype in fbx_connection_map.get(acn_uuid, ()): + if al_ctype.props[0] != b'OO': + continue + fbx_aldata, _blen_aldata = fbx_alitem = fbx_table_nodes.get(al_uuid, (None, None)) + if fbx_aldata is None or fbx_aldata.id != b'AnimationLayer' or fbx_aldata.props[2] != b'': + continue + for as_uuid in get_astacks_from_alayer(al_uuid): + _fbx_alitem, anim_items = stacks[as_uuid][1][al_uuid] + assert(_fbx_alitem == fbx_alitem) + for item, item_prop in items: + # No need to keep curvenode FBX data here, contains nothing useful for us. + anim_items.setdefault(item, {})[acn_uuid] = (cnode, item_prop) + + # AnimationCurves (real animation data). + for ac_uuid, fbx_acitem in fbx_table_nodes.items(): + fbx_acdata, _blen_data = fbx_acitem + if fbx_acdata.id != b'AnimationCurve' or fbx_acdata.props[2] != b'': + continue + for acn_uuid, acn_ctype in fbx_connection_map.get(ac_uuid, ()): + if acn_ctype.props[0] != b'OP': + continue + fbx_acndata, _bl_acndata = fbx_table_nodes.get(acn_uuid, (None, None)) + if (fbx_acndata is None or fbx_acndata.id != b'AnimationCurveNode' or + fbx_acndata.props[2] != b'' or acn_uuid not in curvenodes): + continue + # Note this is an infamous simplification of the compound props stuff, + # seems to be standard naming but we'll probably have to be smarter to handle more exotic files? + channel = { + b'd|X': 0, b'd|Y': 1, b'd|Z': 2, + b'd|DeformPercent': 0, + b'd|FocalLength': 0, + b'd|FocusDistance': 0 + }.get(acn_ctype.props[3], None) + if channel is None: + continue + curvenodes[acn_uuid][ac_uuid] = (fbx_acitem, channel) + + # And now that we have sorted all this, apply animations! + blen_read_animations(fbx_tmpl_astack, fbx_tmpl_alayer, stacks, scene, settings.anim_offset, global_scale, + fbx_ktime) + + _() + del _ + + perfmon.step("FBX import: Assign materials...") + + def _(): + # link Material's to Geometry (via Model's) + processed_meshes = set() + for helper_uuid, helper_node in fbx_helper_nodes.items(): + obj = helper_node.bl_obj + if not obj or obj.type != 'MESH': + continue + + # Get the Mesh corresponding to the Geometry used by this Model. + mesh = obj.data + processed_meshes.add(mesh) + + # Get the Materials from the Model's connections. + material_connections = connection_filter_reverse(helper_uuid, b'Material') + if not material_connections: + continue + + mesh_mats = mesh.materials + num_mesh_mats = len(mesh_mats) + + if num_mesh_mats == 0: + # This is the first (or only) model to use this Geometry. This is the most common case when importing. + # All the Materials can trivially be appended to the Mesh's Materials. + mats_to_append = material_connections + mats_to_compare = () + elif num_mesh_mats == len(material_connections): + # Another Model uses the same Geometry and has already appended its Materials to the Mesh. This is the + # second most common case when importing. + # It's also possible that a Model could share the same Geometry and have the same number of Materials, + # but have different Materials, though this is less common. + # The Model Materials will need to be compared with the Mesh Materials at the same indices to check if + # they are different. + mats_to_append = () + mats_to_compare = material_connections + else: + # Under the assumption that only used Materials are connected to the Model, the number of Materials of + # each Model using a specific Geometry should be the same, otherwise the Material Indices of the + # Geometry will be out-of-bounds of the Materials of at least one of the Models using that Geometry. + # We wouldn't expect this case to happen, but there's nothing to say it can't. + # We'll handle a differing number of Materials by appending any extra Materials and comparing the rest. + mats_to_append = material_connections[num_mesh_mats:] + mats_to_compare = material_connections[:num_mesh_mats] + + for _fbx_lnk_material, material, _fbx_lnk_material_type in mats_to_append: + mesh_mats.append(material) + + mats_to_compare_and_slots = zip(mats_to_compare, obj.material_slots) + for (_fbx_lnk_material, material, _fbx_lnk_material_type), mat_slot in mats_to_compare_and_slots: + if material != mat_slot.material: + # Material Slots default to being linked to the Mesh, so a previously processed Object is also using + # this Mesh, but the Mesh uses a different Material for this Material Slot. + # To have a different Material for this Material Slot on this Object only, the Material Slot must be + # linked to the Object rather than the Mesh. + # TODO: add an option to link all materials to objects in Blender instead? + mat_slot.link = 'OBJECT' + mat_slot.material = material + + # We have to validate mesh polygons' ma_idx, see #41015! + # Some FBX seem to have an extra 'default' material which is not defined in FBX file. + for mesh in processed_meshes: + if mesh.validate_material_indices(): + print("WARNING: mesh '%s' had invalid material indices, those were reset to first material" % mesh.name) + _() + del _ + + perfmon.step("FBX import: Assign textures...") + + def _(): + material_images = {} + + fbx_tmpl = fbx_template_get((b'Material', b'KFbxSurfacePhong')) + # b'KFbxSurfaceLambert' + + def texture_mapping_set(fbx_obj, node_texture): + assert(fbx_obj.id == b'Texture') + + fbx_props = (elem_find_first(fbx_obj, b'Properties70'), + elem_find_first(fbx_tmpl, b'Properties70', fbx_elem_nil)) + loc = elem_props_get_vector_3d(fbx_props, b'Translation', (0.0, 0.0, 0.0)) + rot = tuple(-r for r in elem_props_get_vector_3d(fbx_props, b'Rotation', (0.0, 0.0, 0.0))) + scale = tuple(((1.0 / s) if s != 0.0 else 1.0) + for s in elem_props_get_vector_3d(fbx_props, b'Scaling', (1.0, 1.0, 1.0))) + clamp = (bool(elem_props_get_enum(fbx_props, b'WrapModeU', 0)) or + bool(elem_props_get_enum(fbx_props, b'WrapModeV', 0))) + + if (loc == (0.0, 0.0, 0.0) and + rot == (0.0, 0.0, 0.0) and + scale == (1.0, 1.0, 1.0) and + clamp == False): + return + + node_texture.translation = loc + node_texture.rotation = rot + node_texture.scale = scale + if clamp: + node_texture.extension = 'EXTEND' + + for fbx_uuid, fbx_item in fbx_table_nodes.items(): + fbx_obj, blen_data = fbx_item + if fbx_obj.id != b'Material': + continue + + material = fbx_table_nodes.get(fbx_uuid, (None, None))[1] + for (fbx_lnk, + image, + fbx_lnk_type) in connection_filter_reverse(fbx_uuid, b'Texture'): + + if fbx_lnk_type.props[0] == b'OP': + lnk_type = fbx_lnk_type.props[3] + + ma_wrap = nodal_material_wrap_map[material] + + if lnk_type in {b'DiffuseColor', b'3dsMax|maps|texmap_diffuse'}: + ma_wrap.base_color_texture.image = image + texture_mapping_set(fbx_lnk, ma_wrap.base_color_texture) + elif lnk_type in {b'SpecularColor', b'SpecularFactor'}: + # Intensity actually, not color... + ma_wrap.specular_texture.image = image + texture_mapping_set(fbx_lnk, ma_wrap.specular_texture) + elif lnk_type in {b'ReflectionColor', b'ReflectionFactor', b'3dsMax|maps|texmap_reflection'}: + # Intensity actually, not color... + ma_wrap.metallic_texture.image = image + texture_mapping_set(fbx_lnk, ma_wrap.metallic_texture) + elif lnk_type in {b'TransparentColor', b'TransparencyFactor'}: + ma_wrap.alpha_texture.image = image + texture_mapping_set(fbx_lnk, ma_wrap.alpha_texture) + if use_alpha_decals: + material_decals.add(material) + elif lnk_type == b'ShininessExponent': + # That is probably reversed compared to expected results? TODO... + ma_wrap.roughness_texture.image = image + texture_mapping_set(fbx_lnk, ma_wrap.roughness_texture) + # XXX, applications abuse bump! + elif lnk_type in {b'NormalMap', b'Bump', b'3dsMax|maps|texmap_bump'}: + ma_wrap.normalmap_texture.image = image + texture_mapping_set(fbx_lnk, ma_wrap.normalmap_texture) + """ + elif lnk_type == b'Bump': + # TODO displacement... + """ + elif lnk_type in {b'EmissiveColor'}: + ma_wrap.emission_color_texture.image = image + texture_mapping_set(fbx_lnk, ma_wrap.emission_color_texture) + elif lnk_type in {b'EmissiveFactor'}: + ma_wrap.emission_strength_texture.image = image + texture_mapping_set(fbx_lnk, ma_wrap.emission_strength_texture) + else: + print("WARNING: material link %r ignored" % lnk_type) + + material_images.setdefault(material, {})[lnk_type] = image + + # Check if the diffuse image has an alpha channel, + # if so, use the alpha channel. + + # Note: this could be made optional since images may have alpha but be entirely opaque + for fbx_uuid, fbx_item in fbx_table_nodes.items(): + fbx_obj, blen_data = fbx_item + if fbx_obj.id != b'Material': + continue + material = fbx_table_nodes.get(fbx_uuid, (None, None))[1] + image = material_images.get(material, {}).get(b'DiffuseColor', None) + # do we have alpha? + if image and image.depth == 32: + if use_alpha_decals: + material_decals.add(material) + + ma_wrap = nodal_material_wrap_map[material] + ma_wrap.alpha_texture.use_alpha = True + ma_wrap.alpha_texture.copy_from(ma_wrap.base_color_texture) + + # Propagate mapping from diffuse to all other channels which have none defined. + # XXX Commenting for now, I do not really understand the logic here, why should diffuse mapping + # be applied to all others if not defined for them??? + # ~ ma_wrap = nodal_material_wrap_map[material] + # ~ ma_wrap.mapping_set_from_diffuse() + + _() + del _ + + perfmon.step("FBX import: Cycles z-offset workaround...") + + def _(): + # Annoying workaround for cycles having no z-offset + if material_decals and use_alpha_decals: + for fbx_uuid, fbx_item in fbx_table_nodes.items(): + fbx_obj, blen_data = fbx_item + if fbx_obj.id != b'Geometry': + continue + if fbx_obj.props[-1] == b'Mesh': + mesh = fbx_item[1] + + num_verts = len(mesh.vertices) + if decal_offset != 0.0 and num_verts > 0: + for material in mesh.materials: + if material in material_decals: + blen_norm_dtype = np.single + vcos = MESH_ATTRIBUTE_POSITION.to_ndarray(mesh.attributes) + vnorm = np.empty(num_verts * 3, dtype=blen_norm_dtype) + mesh.vertex_normals.foreach_get("vector", vnorm) + + vcos += vnorm * decal_offset + + MESH_ATTRIBUTE_POSITION.foreach_set(mesh.attributes, vcos) + break + + for obj in (obj for obj in bpy.data.objects if obj.data == mesh): + obj.visible_shadow = False + _() + del _ + + perfmon.level_down() + + perfmon.level_down("Import finished.") + return {'FINISHED'} diff --git a/scripts/addons_core/io_scene_fbx/json2fbx.py b/scripts/addons_core/io_scene_fbx/json2fbx.py new file mode 100755 index 00000000000..8cbea51aee1 --- /dev/null +++ b/scripts/addons_core/io_scene_fbx/json2fbx.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: 2014-2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +Usage +===== + + json2fbx [FILES]... + +This script will write a binary FBX file for each JSON argument given. + + +Input +====== + +The JSON data is formatted into a list of nested lists of 4 items: + + ``[id, [data, ...], "data_types", [subtree, ...]]`` + +Where each list may be empty, and the items in +the subtree are formatted the same way. + +data_types is a string, aligned with data that spesifies a type +for each property. + +The types are as follows: + +* 'Z': - INT8 +* 'Y': - INT16 +* 'B': - BOOL +* 'C': - CHAR +* 'I': - INT32 +* 'F': - FLOAT32 +* 'D': - FLOAT64 +* 'L': - INT64 +* 'R': - BYTES +* 'S': - STRING +* 'f': - FLOAT32_ARRAY +* 'i': - INT32_ARRAY +* 'd': - FLOAT64_ARRAY +* 'l': - INT64_ARRAY +* 'b': - BOOL ARRAY +* 'c': - BYTE ARRAY + +Note that key:value pairs aren't used since the id's are not +ensured to be unique. +""" + + +def elem_empty(elem, name): + import encode_bin + sub_elem = encode_bin.FBXElem(name) + if elem is not None: + elem.elems.append(sub_elem) + return sub_elem + + +def parse_json_rec(fbx_root, json_node): + name, data, data_types, children = json_node + ver = 0 + + assert(len(data_types) == len(data)) + + e = elem_empty(fbx_root, name.encode()) + for d, dt in zip(data, data_types): + if dt == "B": + e.add_bool(d) + elif dt == "C": + d = eval('b"""' + d + '"""') + e.add_char(d) + elif dt == "Z": + e.add_int8(d) + elif dt == "Y": + e.add_int16(d) + elif dt == "I": + e.add_int32(d) + elif dt == "L": + e.add_int64(d) + elif dt == "F": + e.add_float32(d) + elif dt == "D": + e.add_float64(d) + elif dt == "R": + d = eval('b"""' + d + '"""') + e.add_bytes(d) + elif dt == "S": + d = d.encode().replace(b"::", b"\x00\x01") + e.add_string(d) + elif dt == "i": + e.add_int32_array(d) + elif dt == "l": + e.add_int64_array(d) + elif dt == "f": + e.add_float32_array(d) + elif dt == "d": + e.add_float64_array(d) + elif dt == "b": + e.add_bool_array(d) + elif dt == "c": + e.add_byte_array(d) + + if name == "FBXVersion": + assert(data_types == "I") + ver = int(data[0]) + + for child in children: + _ver = parse_json_rec(e, child) + if _ver: + ver = _ver + + return ver + + +def parse_json(json_root): + root = elem_empty(None, b"") + ver = 0 + + for n in json_root: + _ver = parse_json_rec(root, n) + if _ver: + ver = _ver + + return root, ver + + +def json2fbx(fn): + import os + import json + + import encode_bin + + fn_fbx = "%s.fbx" % os.path.splitext(fn)[0] + print("Writing: %r " % fn_fbx, end="") + with open(fn) as f_json: + json_root = json.load(f_json) + with encode_bin.FBXElem.enable_multithreading_cm(): + fbx_root, fbx_version = parse_json(json_root) + print("(Version %d) ..." % fbx_version) + encode_bin.write(fn_fbx, fbx_root, fbx_version) + + +# ---------------------------------------------------------------------------- +# Command Line + +def main(): + import sys + + if "--help" in sys.argv: + print(__doc__) + return + + for arg in sys.argv[1:]: + try: + json2fbx(arg) + except: + print("Failed to convert %r, error:" % arg) + + import traceback + traceback.print_exc() + + +if __name__ == "__main__": + main() diff --git a/scripts/addons_core/io_scene_fbx/parse_fbx.py b/scripts/addons_core/io_scene_fbx/parse_fbx.py new file mode 100644 index 00000000000..948f5385d3c --- /dev/null +++ b/scripts/addons_core/io_scene_fbx/parse_fbx.py @@ -0,0 +1,274 @@ +# SPDX-FileCopyrightText: 2006-2012 assimp team +# SPDX-FileCopyrightText: 2013 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +__all__ = ( + "parse", + "data_types", + "parse_version", + "FBXElem", +) + +from struct import unpack +import array +import zlib +from io import BytesIO + +from . import data_types +from .fbx_utils_threading import MultiThreadedTaskConsumer + +# at the end of each nested block, there is a NUL record to indicate +# that the sub-scope exists (i.e. to distinguish between P: and P : {}) +_BLOCK_SENTINEL_LENGTH = ... +_BLOCK_SENTINEL_DATA = ... +read_fbx_elem_start = ... +_IS_BIG_ENDIAN = (__import__("sys").byteorder != 'little') +_HEAD_MAGIC = b'Kaydara FBX Binary\x20\x20\x00\x1a\x00' +from collections import namedtuple +FBXElem = namedtuple("FBXElem", ("id", "props", "props_type", "elems")) +del namedtuple + + +def read_uint(read): + return unpack(b' Import-Export', + 'description': 'Import-Export as glTF 2.0', + 'warning': '', + 'doc_url': "{BLENDER_MANUAL_URL}/addons/import_export/scene_gltf2.html", + 'tracker_url': "https://github.com/KhronosGroup/glTF-Blender-IO/issues/", + 'support': 'OFFICIAL', + 'category': 'Import-Export', +} + + +def get_version_string(): + return str(bl_info['version'][0]) + '.' + str(bl_info['version'][1]) + '.' + str(bl_info['version'][2]) + +# +# Script reloading (if the user calls 'Reload Scripts' from Blender) +# + + +def reload_package(module_dict_main): + import importlib + from pathlib import Path + + def reload_package_recursive(current_dir, module_dict): + for path in current_dir.iterdir(): + if "__init__" in str(path) or path.stem not in module_dict: + continue + + if path.is_file() and path.suffix == ".py": + importlib.reload(module_dict[path.stem]) + elif path.is_dir(): + reload_package_recursive(path, module_dict[path.stem].__dict__) + + reload_package_recursive(Path(__file__).parent, module_dict_main) + + +if "bpy" in locals(): + reload_package(locals()) + +import bpy +from bpy.props import (StringProperty, + BoolProperty, + EnumProperty, + IntProperty, + FloatProperty, + CollectionProperty) +from bpy.types import Operator +from bpy_extras.io_utils import ImportHelper, ExportHelper, poll_file_object_drop + + +# +# Functions / Classes. +# + +exporter_extension_panel_unregister_functors = [] +importer_extension_panel_unregister_functors = [] + + +def ensure_filepath_matches_export_format(filepath, export_format): + import os + filename = os.path.basename(filepath) + if not filename: + return filepath + + stem, ext = os.path.splitext(filename) + if stem.startswith('.') and not ext: + stem, ext = '', stem + + desired_ext = '.glb' if export_format == 'GLB' else '.gltf' + ext_lower = ext.lower() + if ext_lower not in ['.glb', '.gltf']: + return filepath + desired_ext + elif ext_lower != desired_ext: + filepath = filepath[:-len(ext)] # strip off ext + return filepath + desired_ext + else: + return filepath + + +def on_export_format_changed(self, context): + # Update the filename in the file browser when the format (.glb/.gltf) + # changes + sfile = context.space_data + if not isinstance(sfile, bpy.types.SpaceFileBrowser): + return + if not sfile.active_operator: + return + if sfile.active_operator.bl_idname != "EXPORT_SCENE_OT_gltf": + return + + sfile.params.filename = ensure_filepath_matches_export_format( + sfile.params.filename, + self.export_format, + ) + + # Also change the filter + sfile.params.filter_glob = '*.glb' if self.export_format == 'GLB' else '*.gltf' + # Force update of file list, because update the filter does not update the real file list + bpy.ops.file.refresh() + + +def on_export_action_filter_changed(self, context): + if self.export_action_filter is True: + bpy.types.Scene.gltf_action_filter = bpy.props.CollectionProperty(type=GLTF2_filter_action) + bpy.types.Scene.gltf_action_filter_active = bpy.props.IntProperty() + + for action in bpy.data.actions: + if id(action) not in [id(item.action) for item in bpy.data.scenes[0].gltf_action_filter]: + item = bpy.data.scenes[0].gltf_action_filter.add() + item.keep = True + item.action = action + + else: + bpy.data.scenes[0].gltf_action_filter.clear() + del bpy.types.Scene.gltf_action_filter + del bpy.types.Scene.gltf_action_filter_active + + +def get_format_items(scene, context): + + items = (('GLB', 'glTF Binary (.glb)', + 'Exports a single file, with all data packed in binary form. ' + 'Most efficient and portable, but more difficult to edit later'), + ('GLTF_SEPARATE', 'glTF Separate (.gltf + .bin + textures)', + 'Exports multiple files, with separate JSON, binary and texture data. ' + 'Easiest to edit later')) + + if bpy.context.preferences.addons['io_scene_gltf2'].preferences \ + and "allow_embedded_format" in bpy.context.preferences.addons['io_scene_gltf2'].preferences \ + and bpy.context.preferences.addons['io_scene_gltf2'].preferences['allow_embedded_format']: + # At initialization, the preferences are not yet loaded + # The second line check is needed until the PR is merge in Blender, for github CI tests + items += (('GLTF_EMBEDDED', 'glTF Embedded (.gltf)', + 'Exports a single file, with all data packed in JSON. ' + 'Less efficient than binary, but easier to edit later' + ),) + + return items + + +def is_draco_available(): + # Initialize on first use + if not hasattr(is_draco_available, "draco_exists"): + from .io.com import gltf2_io_draco_compression_extension + is_draco_available.draco_exists = gltf2_io_draco_compression_extension.dll_exists() + + return is_draco_available.draco_exists + + +class ConvertGLTF2_Base: + """Base class containing options that should be exposed during both import and export.""" + + export_import_convert_lighting_mode: EnumProperty( + name='Lighting Mode', + items=( + ('SPEC', 'Standard', 'Physically-based glTF lighting units (cd, lx, nt)'), + ('COMPAT', 'Unitless', 'Non-physical, unitless lighting. Useful when exposure controls are not available'), + ('RAW', 'Raw (Deprecated)', 'Blender lighting strengths with no conversion'), + ), + description='Optional backwards compatibility for non-standard render engines. Applies to lights', # TODO: and emissive materials', + default='SPEC' + ) + + +class ExportGLTF2_Base(ConvertGLTF2_Base): + # TODO: refactor to avoid boilerplate + + bl_options = {'PRESET'} + + # Don't use export_ prefix here, I don't want it to be saved with other export settings + gltf_export_id: StringProperty( + name='Identifier', + description=( + 'Identifier of caller (in case of add-on calling this exporter). ' + 'Can be useful in case of Extension added by other add-ons' + ), + default='' + ) + + # gltfpack properties + export_use_gltfpack: BoolProperty( + name='Use Gltfpack', + description='Use gltfpack to simplify the mesh and/or compress its textures', + default=False, + ) + + export_gltfpack_tc: BoolProperty( + name='KTX2 Compression', + description='Convert all textures to KTX2 with BasisU supercompression', + default=True, + ) + + export_gltfpack_tq: IntProperty( + name='Texture Encoding Quality', + description='Texture encoding quality', + default=8, + min=1, + max=10, + ) + + export_gltfpack_si: FloatProperty( + name='Mesh Simplification Ratio', + description='Simplify meshes targeting triangle count ratio', + default=1.0, + min=0.0, + max=1.0, + ) + + export_gltfpack_sa: BoolProperty( + name='Aggressive Mesh Simplification', + description='Aggressively simplify to the target ratio disregarding quality', + default=False, + ) + + export_gltfpack_slb: BoolProperty( + name='Lock Mesh Border Vertices', + description='Lock border vertices during simplification to avoid gaps on connected meshes', + default=False, + ) + + export_gltfpack_vp: IntProperty( + name='Position Quantization', + description='Use N-bit quantization for positions', + default=14, + min=1, + max=16, + ) + + export_gltfpack_vt: IntProperty( + name='Texture Coordinate Quantization', + description='Use N-bit quantization for texture coordinates', + default=12, + min=1, + max=16, + ) + + export_gltfpack_vn: IntProperty( + name='Normal/Tangent Quantization', + description='Use N-bit quantization for normals and tangents', + default=8, + min=1, + max=16, + ) + + export_gltfpack_vc: IntProperty( + name='Vertex Color Quantization', + description='Use N-bit quantization for colors', + default=8, + min=1, + max=16, + ) + + export_gltfpack_vpi: EnumProperty( + name='Vertex Position Attributes', + description='Type to use for vertex position attributes', + items=(('Integer', 'Integer', 'Use integer attributes for positions'), + ('Normalized', 'Normalized', 'Use normalized attributes for positions'), + ('Floating-point', 'Floating-point', 'Use floating-point attributes for positions')), + default='Integer', + ) + + export_gltfpack_noq: BoolProperty( + name='Disable Quantization', + description='Disable quantization; produces much larger glTF files with no extensions', + default=True, + ) + + # TODO: some stuff in Textures + + # TODO: Animations + + # TODO: Scene + + # TODO: some stuff in Miscellaneous + + export_format: EnumProperty( + name='Format', + items=get_format_items, + description=( + 'Output format. Binary is most efficient, ' + 'but JSON may be easier to edit later' + ), + default=0, # Warning => If you change the default, need to change the default filter too + update=on_export_format_changed, + ) + + ui_tab: EnumProperty( + items=(('GENERAL', "General", "General settings"), + ('MESHES', "Meshes", "Mesh settings"), + ('OBJECTS', "Objects", "Object settings"), + ('ANIMATION', "Animation", "Animation settings")), + name="ui_tab", + description="Export setting categories", + ) + + export_copyright: StringProperty( + name='Copyright', + description='Legal rights and conditions for the model', + default='' + ) + + export_image_format: EnumProperty( + name='Images', + items=(('AUTO', 'Automatic', + 'Save PNGs as PNGs, JPEGs as JPEGs, WebPs as WebPs. ' + 'For other formats, use PNG'), + ('JPEG', 'JPEG Format (.jpg)', + 'Save images as JPEGs. (Images that need alpha are saved as PNGs though.) ' + 'Be aware of a possible loss in quality'), + ('WEBP', 'WebP Format', + 'Save images as WebPs as main image (no fallback)'), + ('NONE', 'None', + 'Don\'t export images'), + ), + description=( + 'Output format for images. PNG is lossless and generally preferred, but JPEG might be preferable for web ' + 'applications due to the smaller file size. Alternatively they can be omitted if they are not needed' + ), + default='AUTO' + ) + + export_image_add_webp: BoolProperty( + name='Create WebP', + description=( + "Creates WebP textures for every texture. " + "For already WebP textures, nothing happens" + ), + default=False + ) + + export_image_webp_fallback: BoolProperty( + name='WebP fallback', + description=( + "For all WebP textures, create a PNG fallback texture" + ), + default=False + ) + + export_texture_dir: StringProperty( + name='Textures', + description='Folder to place texture files in. Relative to the .gltf file', + default='', + ) + + # Keep for back compatibility + export_jpeg_quality: IntProperty( + name='JPEG quality', + description='Quality of JPEG export', + default=75, + min=0, + max=100 + ) + + # Keep for back compatibility + export_image_quality: IntProperty( + name='Image quality', + description='Quality of image export', + default=75, + min=0, + max=100 + ) + + export_keep_originals: BoolProperty( + name='Keep original', + description=('Keep original textures files if possible. ' + 'WARNING: if you use more than one texture, ' + 'where pbr standard requires only one, only one texture will be used. ' + 'This can lead to unexpected results' + ), + default=False, + ) + + export_texcoords: BoolProperty( + name='UVs', + description='Export UVs (texture coordinates) with meshes', + default=True + ) + + export_normals: BoolProperty( + name='Normals', + description='Export vertex normals with meshes', + default=True + ) + + export_gn_mesh: BoolProperty( + name='Geometry Nodes Instances (Experimental)', + description='Export Geometry nodes instance meshes', + default=False + ) + + export_draco_mesh_compression_enable: BoolProperty( + name='Draco mesh compression', + description='Compress mesh using Draco', + default=False + ) + + export_draco_mesh_compression_level: IntProperty( + name='Compression level', + description='Compression level (0 = most speed, 6 = most compression, higher values currently not supported)', + default=6, + min=0, + max=10 + ) + + export_draco_position_quantization: IntProperty( + name='Position quantization bits', + description='Quantization bits for position values (0 = no quantization)', + default=14, + min=0, + max=30 + ) + + export_draco_normal_quantization: IntProperty( + name='Normal quantization bits', + description='Quantization bits for normal values (0 = no quantization)', + default=10, + min=0, + max=30 + ) + + export_draco_texcoord_quantization: IntProperty( + name='Texcoord quantization bits', + description='Quantization bits for texture coordinate values (0 = no quantization)', + default=12, + min=0, + max=30 + ) + + export_draco_color_quantization: IntProperty( + name='Color quantization bits', + description='Quantization bits for color values (0 = no quantization)', + default=10, + min=0, + max=30 + ) + + export_draco_generic_quantization: IntProperty( + name='Generic quantization bits', + description='Quantization bits for generic values like weights or joints (0 = no quantization)', + default=12, + min=0, + max=30 + ) + + export_tangents: BoolProperty( + name='Tangents', + description='Export vertex tangents with meshes', + default=False + ) + + export_materials: EnumProperty( + name='Materials', + items=( + ('EXPORT', + 'Export', + 'Export all materials used by included objects'), + ('PLACEHOLDER', + 'Placeholder', + 'Do not export materials, but write multiple primitive groups per mesh, keeping material slot information'), + ('NONE', + 'No export', + 'Do not export materials, and combine mesh primitive groups, losing material slot information')), + description='Export materials', + default='EXPORT') + + export_unused_images: BoolProperty( + name='Unused images', + description='Export images not assigned to any material', + default=False) + + export_unused_textures: BoolProperty( + name='Prepare Unused textures', + description=( + 'Export image texture nodes not assigned to any material. ' + 'This feature is not standard and needs an external extension to be included in the glTF file' + ), + default=False) + + export_colors: BoolProperty( + name='Dummy', + description='Keep for compatibility only', + default=True + ) + + export_attributes: BoolProperty( + name='Attributes', + description='Export Attributes (when starting with underscore)', + default=False + ) + + use_mesh_edges: BoolProperty( + name='Loose Edges', + description=( + 'Export loose edges as lines, using the material from the first material slot' + ), + default=False, + ) + + use_mesh_vertices: BoolProperty( + name='Loose Points', + description=( + 'Export loose points as glTF points, using the material from the first material slot' + ), + default=False, + ) + + export_cameras: BoolProperty( + name='Cameras', + description='Export cameras', + default=False + ) + + use_selection: BoolProperty( + name='Selected Objects', + description='Export selected objects only', + default=False + ) + + use_visible: BoolProperty( + name='Visible Objects', + description='Export visible objects only', + default=False + ) + + use_renderable: BoolProperty( + name='Renderable Objects', + description='Export renderable objects only', + default=False + ) + + use_active_collection_with_nested: BoolProperty( + name='Include Nested Collections', + description='Include active collection and nested collections', + default=True + ) + + use_active_collection: BoolProperty( + name='Active Collection', + description='Export objects in the active collection only', + default=False + ) + + use_active_scene: BoolProperty( + name='Active Scene', + description='Export active scene only', + default=False + ) + + collection: StringProperty( + name="Source Collection", + description="Export only objects from this collection (and its children)", + default="", + ) + + export_extras: BoolProperty( + name='Custom Properties', + description='Export custom properties as glTF extras', + default=False + ) + + export_yup: BoolProperty( + name='+Y Up', + description='Export using glTF convention, +Y up', + default=True + ) + + export_apply: BoolProperty( + name='Apply Modifiers', + description='Apply modifiers (excluding Armatures) to mesh objects -' + 'WARNING: prevents exporting shape keys', + default=False + ) + + export_shared_accessors: BoolProperty( + name='Shared Accessors', + description='Export Primitives using shared accessors for attributes', + default=False + ) + + export_animations: BoolProperty( + name='Animations', + description='Exports active actions and NLA tracks as glTF animations', + default=True + ) + + export_frame_range: BoolProperty( + name='Limit to Playback Range', + description='Clips animations to selected playback range', + default=False + ) + + export_frame_step: IntProperty( + name='Sampling Rate', + description='How often to evaluate animated values (in frames)', + default=1, + min=1, + max=120 + ) + + export_force_sampling: BoolProperty( + name='Always Sample Animations', + description='Apply sampling to all animations', + default=True + ) + + export_pointer_animation: BoolProperty( + name='Export Animation Pointer (Experimental)', + description='Export material, Light & Camera animation as Animation Pointer', + default=False + ) + + export_animation_mode: EnumProperty( + name='Animation mode', + items=(('ACTIONS', 'Actions', + 'Export actions (actives and on NLA tracks) as separate animations'), + ('ACTIVE_ACTIONS', 'Active actions merged', + 'All the currently assigned actions become one glTF animation'), + ('BROADCAST', 'Broadcast actions', + 'Broadcast all compatible actions to all objects. ' + 'Animated objects will get all actions compatible with them, ' + 'others will get no animation at all'), + ('NLA_TRACKS', 'NLA Tracks', + 'Export individual NLA Tracks as separate animation'), + ('SCENE', 'Scene', + 'Export baked scene as a single animation') + ), + description='Export Animation mode', + default='ACTIONS' + ) + + export_nla_strips_merged_animation_name: StringProperty( + name='Merged Animation Name', + description=( + "Name of single glTF animation to be exported" + ), + default='Animation' + ) + + export_def_bones: BoolProperty( + name='Export Deformation Bones Only', + description='Export Deformation bones only', + default=False + ) + + export_hierarchy_flatten_bones: BoolProperty( + name='Flatten Bone Hierarchy', + description='Flatten Bone Hierarchy. Useful in case of non decomposable transformation matrix', + default=False + ) + + export_hierarchy_flatten_objs: BoolProperty( + name='Flatten Object Hierarchy', + description='Flatten Object Hierarchy. Useful in case of non decomposable transformation matrix', + default=False + ) + + export_armature_object_remove: BoolProperty( + name='Remove Armature Object', + description=( + 'Remove Armature object if possible. ' + 'If Armature has multiple root bones, object will not be removed' + ), + default=False + ) + + export_leaf_bone: BoolProperty( + name='Add Leaf Bones', + description=( + 'Append a final bone to the end of each chain to specify last bone length ' + '(use this when you intend to edit the armature from exported data)' + ), + default=False + ) + + export_optimize_animation_size: BoolProperty( + name='Optimize Animation Size', + description=( + "Reduce exported file size by removing duplicate keyframes" + ), + default=True + ) + + export_optimize_animation_keep_anim_armature: BoolProperty( + name='Force keeping channels for bones', + description=( + "If all keyframes are identical in a rig, " + "force keeping the minimal animation. " + "When off, all possible channels for " + "the bones will be exported, even if empty " + "(minimal animation, 2 keyframes)" + ), + default=True + ) + + export_optimize_animation_keep_anim_object: BoolProperty( + name='Force keeping channel for objects', + description=( + "If all keyframes are identical for object transformations, " + "force keeping the minimal animation" + ), + default=False + ) + + export_optimize_armature_disable_viewport: BoolProperty( + name='Disable viewport if possible', + description=( + "When exporting armature, disable viewport for other objects, " + "for performance. Drivers on shape keys for skined meshes prevent this optimization for now" + ), + default=False + ) + + export_negative_frame: EnumProperty( + name='Negative Frames', + items=(('SLIDE', 'Slide', + 'Slide animation to start at frame 0'), + ('CROP', 'Crop', + 'Keep only frames above frame 0'), + ), + description='Negative Frames are slid or cropped', + default='SLIDE' + ) + + export_anim_slide_to_zero: BoolProperty( + name='Set all glTF Animation starting at 0', + description=( + "Set all glTF animation starting at 0.0s. " + "Can be useful for looping animations" + ), + default=False + ) + + export_bake_animation: BoolProperty( + name='Bake All Objects Animations', + description=( + "Force exporting animation on every object. " + "Can be useful when using constraints or driver. " + "Also useful when exporting only selection" + ), + default=False + ) + + export_anim_single_armature: BoolProperty( + name='Export all Armature Actions', + description=( + "Export all actions, bound to a single armature. " + "WARNING: Option does not support exports including multiple armatures" + ), + default=True + ) + + export_reset_pose_bones: BoolProperty( + name='Reset pose bones between actions', + description=( + "Reset pose bones between each action exported. " + "This is needed when some bones are not keyed on some animations" + ), + default=True + ) + + export_current_frame: BoolProperty( + name='Use Current Frame as Object Rest Transformations', + description=( + 'Export the scene in the current animation frame. ' + 'When off, frame 0 is used as rest transformations for objects' + ), + default=False + ) + + export_rest_position_armature: BoolProperty( + name='Use Rest Position Armature', + description=( + "Export armatures using rest position as joints' rest pose. " + "When off, current frame pose is used as rest pose" + ), + default=True + ) + + export_anim_scene_split_object: BoolProperty( + name='Split Animation by Object', + description=( + "Export Scene as seen in Viewport, " + "But split animation by Object" + ), + default=True + ) + + export_skins: BoolProperty( + name='Skinning', + description='Export skinning (armature) data', + default=True + ) + + export_influence_nb: IntProperty( + name='Bone Influences', + description='Choose how many Bone influences to export', + default=4, + min=1 + ) + + export_all_influences: BoolProperty( + name='Include All Bone Influences', + description='Allow export of all joint vertex influences. Models may appear incorrectly in many viewers', + default=False + ) + + export_morph: BoolProperty( + name='Shape Keys', + description='Export shape keys (morph targets)', + default=True + ) + + export_morph_normal: BoolProperty( + name='Shape Key Normals', + description='Export vertex normals with shape keys (morph targets)', + default=True + ) + + export_morph_tangent: BoolProperty( + name='Shape Key Tangents', + description='Export vertex tangents with shape keys (morph targets)', + default=False + ) + + export_morph_animation: BoolProperty( + name='Shape Key Animations', + description='Export shape keys animations (morph targets)', + default=True + ) + + export_morph_reset_sk_data: BoolProperty( + name='Reset shape keys between actions', + description=( + "Reset shape keys between each action exported. " + "This is needed when some SK channels are not keyed on some animations" + ), + default=True + ) + + export_lights: BoolProperty( + name='Punctual Lights', + description='Export directional, point, and spot lights. ' + 'Uses "KHR_lights_punctual" glTF extension', + default=False + ) + + export_try_sparse_sk: BoolProperty( + name='Use Sparse Accessor if better', + description='Try using Sparse Accessor if it saves space', + default=True + ) + + export_try_omit_sparse_sk: BoolProperty( + name='Omitting Sparse Accessor if data is empty', + description='Omitting Sparse Accessor if data is empty', + default=False + ) + + export_gpu_instances: BoolProperty( + name='GPU Instances', + description='Export using EXT_mesh_gpu_instancing. ' + 'Limited to children of a given Empty. ' + 'Multiple materials might be omitted', + default=False + ) + + export_action_filter: BoolProperty( + name='Filter Actions', + description='Filter Actions to be exported', + default=False, + update=on_export_action_filter_changed, + ) + + export_convert_animation_pointer: BoolProperty( + name='Convert TRS/weights to Animation Pointer', + description='Export TRS and weights as Animation Pointer. ' + 'Using KHR_animation_pointer extension', + default=False + ) + + # This parameter is only here for backward compatibility, as this option is removed in 3.6 + # This option does nothing, and is not displayed in UI + # What you are looking for is probably "export_animation_mode" + export_nla_strips: BoolProperty( + name='Group by NLA Track', + description=( + "When on, multiple actions become part of the same glTF animation if " + "they're pushed onto NLA tracks with the same name. " + "When off, all the currently assigned actions become one glTF animation" + ), + default=True + ) + + # Keep for back compatibility, but no more used + export_original_specular: BoolProperty( + name='Export original PBR Specular', + description=( + 'Export original glTF PBR Specular, instead of Blender Principled Shader Specular' + ), + default=False, + ) + + will_save_settings: BoolProperty( + name='Remember Export Settings', + description='Store glTF export settings in the Blender project', + default=False) + + export_hierarchy_full_collections: BoolProperty( + name='Full Collection Hierarchy', + description='Export full hierarchy, including intermediate collections', + default=False + ) + + export_extra_animations: BoolProperty( + name='Prepare extra animations', + description=( + 'Export additional animations' + 'This feature is not standard and needs an external extension to be included in the glTF file' + ), + default=False + ) + + # Custom scene property for saving settings + scene_key = "glTF2ExportSettings" + + # + + def check(self, _context): + # Ensure file extension matches format + old_filepath = self.filepath + self.filepath = ensure_filepath_matches_export_format( + self.filepath, + self.export_format, + ) + return self.filepath != old_filepath + + def invoke(self, context, event): + settings = context.scene.get(self.scene_key) + self.will_save_settings = False + if settings: + try: + for (k, v) in settings.items(): + setattr(self, k, v) + self.will_save_settings = True + + # Update filter if user saved settings + if hasattr(self, 'export_format'): + self.filter_glob = '*.glb' if self.export_format == 'GLB' else '*.gltf' + + except (AttributeError, TypeError): + self.report({"ERROR"}, "Loading export settings failed. Removed corrupted settings") + del context.scene[self.scene_key] + + import sys + preferences = bpy.context.preferences + for addon_name in preferences.addons.keys(): + try: + if hasattr( + sys.modules[addon_name], + 'glTF2ExportUserExtension') or hasattr( + sys.modules[addon_name], + 'glTF2ExportUserExtensions'): + exporter_extension_panel_unregister_functors.append(sys.modules[addon_name].register_panel()) + except Exception: + pass + + self.has_active_exporter_extensions = len(exporter_extension_panel_unregister_functors) > 0 + return ExportHelper.invoke(self, context, event) + + def save_settings(self, context): + # find all props to save + exceptional = [ + # options that don't start with 'export_' + 'use_selection', + 'use_visible', + 'use_renderable', + 'use_active_collection_with_nested', + 'use_active_collection', + 'use_mesh_edges', + 'use_mesh_vertices', + 'use_active_scene', + 'collection', + ] + all_props = self.properties + export_props = { + x: getattr(self, x) for x in dir(all_props) + if (x.startswith("export_") or x in exceptional) and all_props.get(x) is not None + } + context.scene[self.scene_key] = export_props + + def execute(self, context): + import os + import datetime + import logging + from .io.com.gltf2_io_debug import Log + from .blender.exp import gltf2_blender_export + from .io.com.gltf2_io_path import path_to_uri + + if self.will_save_settings: + self.save_settings(context) + + self.check(context) # ensure filepath has the right extension + + # All custom export settings are stored in this container. + export_settings = {} + + export_settings['loglevel'] = logging.INFO + + export_settings['exported_images'] = {} + export_settings['exported_texture_nodes'] = [] + export_settings['additional_texture_export'] = [] + export_settings['additional_texture_export_current_idx'] = 0 + + export_settings['timestamp'] = datetime.datetime.now() + export_settings['gltf_export_id'] = self.gltf_export_id + export_settings['gltf_filepath'] = self.filepath + export_settings['gltf_filedirectory'] = os.path.dirname(export_settings['gltf_filepath']) + '/' + export_settings['gltf_texturedirectory'] = os.path.join( + export_settings['gltf_filedirectory'], + self.export_texture_dir, + ) + export_settings['gltf_keep_original_textures'] = self.export_keep_originals + + export_settings['gltf_format'] = self.export_format + export_settings['gltf_image_format'] = self.export_image_format + export_settings['gltf_add_webp'] = self.export_image_add_webp + export_settings['gltf_webp_fallback'] = self.export_image_webp_fallback + export_settings['gltf_image_quality'] = self.export_image_quality + export_settings['gltf_copyright'] = self.export_copyright + export_settings['gltf_texcoords'] = self.export_texcoords + export_settings['gltf_normals'] = self.export_normals + export_settings['gltf_tangents'] = self.export_tangents and self.export_normals + export_settings['gltf_loose_edges'] = self.use_mesh_edges + export_settings['gltf_loose_points'] = self.use_mesh_vertices + + if is_draco_available(): + export_settings['gltf_draco_mesh_compression'] = self.export_draco_mesh_compression_enable + export_settings['gltf_draco_mesh_compression_level'] = self.export_draco_mesh_compression_level + export_settings['gltf_draco_position_quantization'] = self.export_draco_position_quantization + export_settings['gltf_draco_normal_quantization'] = self.export_draco_normal_quantization + export_settings['gltf_draco_texcoord_quantization'] = self.export_draco_texcoord_quantization + export_settings['gltf_draco_color_quantization'] = self.export_draco_color_quantization + export_settings['gltf_draco_generic_quantization'] = self.export_draco_generic_quantization + else: + export_settings['gltf_draco_mesh_compression'] = False + + export_settings['gltf_gn_mesh'] = self.export_gn_mesh + + export_settings['gltf_materials'] = self.export_materials + export_settings['gltf_attributes'] = self.export_attributes + export_settings['gltf_cameras'] = self.export_cameras + + export_settings['gltf_unused_textures'] = self.export_unused_textures + export_settings['gltf_unused_images'] = self.export_unused_images + + export_settings['gltf_visible'] = self.use_visible + export_settings['gltf_renderable'] = self.use_renderable + + export_settings['gltf_active_collection'] = self.use_active_collection + if self.use_active_collection: + export_settings['gltf_active_collection_with_nested'] = self.use_active_collection_with_nested + else: + export_settings['gltf_active_collection_with_nested'] = False + export_settings['gltf_active_scene'] = self.use_active_scene + export_settings['gltf_collection'] = self.collection + + export_settings['gltf_selected'] = self.use_selection + export_settings['gltf_layers'] = True # self.export_layers + export_settings['gltf_extras'] = self.export_extras + export_settings['gltf_yup'] = self.export_yup + export_settings['gltf_apply'] = self.export_apply + export_settings['gltf_shared_accessors'] = self.export_shared_accessors + export_settings['gltf_current_frame'] = self.export_current_frame + export_settings['gltf_animations'] = self.export_animations + export_settings['gltf_def_bones'] = self.export_def_bones + export_settings['gltf_flatten_bones_hierarchy'] = self.export_hierarchy_flatten_bones + export_settings['gltf_flatten_obj_hierarchy'] = self.export_hierarchy_flatten_objs + export_settings['gltf_armature_object_remove'] = self.export_armature_object_remove + export_settings['gltf_leaf_bone'] = self.export_leaf_bone + if self.export_animations: + export_settings['gltf_frame_range'] = self.export_frame_range + export_settings['gltf_force_sampling'] = self.export_force_sampling + if not self.export_force_sampling: + export_settings['gltf_def_bones'] = False + export_settings['gltf_bake_animation'] = False + export_settings['gltf_animation_mode'] = self.export_animation_mode + if export_settings['gltf_animation_mode'] == "NLA_TRACKS": + export_settings['gltf_force_sampling'] = True + if export_settings['gltf_animation_mode'] == "SCENE": + export_settings['gltf_anim_scene_split_object'] = self.export_anim_scene_split_object + else: + export_settings['gltf_anim_scene_split_object'] = False + + if export_settings['gltf_animation_mode'] in ['NLA_TRACKS', 'SCENE']: + export_settings['gltf_export_anim_pointer'] = self.export_pointer_animation + if self.export_pointer_animation: + export_settings['gltf_trs_w_animation_pointer'] = self.export_convert_animation_pointer + else: + export_settings['gltf_trs_w_animation_pointer'] = False + else: + export_settings['gltf_trs_w_animation_pointer'] = False + export_settings['gltf_export_anim_pointer'] = False + + export_settings['gltf_nla_strips_merged_animation_name'] = self.export_nla_strips_merged_animation_name + export_settings['gltf_optimize_animation'] = self.export_optimize_animation_size + export_settings['gltf_optimize_animation_keep_armature'] = self.export_optimize_animation_keep_anim_armature + export_settings['gltf_optimize_animation_keep_object'] = self.export_optimize_animation_keep_anim_object + export_settings['gltf_optimize_armature_disable_viewport'] = self.export_optimize_armature_disable_viewport + export_settings['gltf_export_anim_single_armature'] = self.export_anim_single_armature + export_settings['gltf_export_reset_pose_bones'] = self.export_reset_pose_bones + export_settings['gltf_export_reset_sk_data'] = self.export_morph_reset_sk_data + export_settings['gltf_bake_animation'] = self.export_bake_animation + export_settings['gltf_negative_frames'] = self.export_negative_frame + export_settings['gltf_anim_slide_to_zero'] = self.export_anim_slide_to_zero + export_settings['gltf_export_extra_animations'] = self.export_extra_animations + else: + export_settings['gltf_frame_range'] = False + export_settings['gltf_force_sampling'] = False + export_settings['gltf_bake_animation'] = False + export_settings['gltf_optimize_animation'] = False + export_settings['gltf_optimize_animation_keep_armature'] = False + export_settings['gltf_optimize_animation_keep_object'] = False + export_settings['gltf_optimize_armature_disable_viewport'] = False + export_settings['gltf_export_anim_single_armature'] = False + export_settings['gltf_export_reset_pose_bones'] = False + export_settings['gltf_export_reset_sk_data'] = False + export_settings['gltf_export_extra_animations'] = False + export_settings['gltf_skins'] = self.export_skins + if self.export_skins: + export_settings['gltf_all_vertex_influences'] = self.export_all_influences + export_settings['gltf_vertex_influences_nb'] = self.export_influence_nb + else: + export_settings['gltf_all_vertex_influences'] = False + export_settings['gltf_def_bones'] = False + export_settings['gltf_rest_position_armature'] = self.export_rest_position_armature + export_settings['gltf_frame_step'] = self.export_frame_step + + export_settings['gltf_morph'] = self.export_morph + if self.export_morph: + export_settings['gltf_morph_normal'] = self.export_morph_normal + export_settings['gltf_morph_tangent'] = self.export_morph_tangent + export_settings['gltf_morph_anim'] = self.export_morph_animation + else: + export_settings['gltf_morph_normal'] = False + export_settings['gltf_morph_tangent'] = False + export_settings['gltf_morph_anim'] = False + + export_settings['gltf_lights'] = self.export_lights + export_settings['gltf_lighting_mode'] = self.export_import_convert_lighting_mode + + export_settings['gltf_gpu_instances'] = self.export_gpu_instances + + export_settings['gltf_try_sparse_sk'] = self.export_try_sparse_sk + export_settings['gltf_try_omit_sparse_sk'] = self.export_try_omit_sparse_sk + if not self.export_try_sparse_sk: + export_settings['gltf_try_omit_sparse_sk'] = False + + export_settings['gltf_hierarchy_full_collections'] = self.export_hierarchy_full_collections + + # gltfpack stuff + export_settings['gltf_use_gltfpack'] = self.export_use_gltfpack + if self.export_use_gltfpack: + export_settings['gltf_gltfpack_tc'] = self.export_gltfpack_tc + export_settings['gltf_gltfpack_tq'] = self.export_gltfpack_tq + + export_settings['gltf_gltfpack_si'] = self.export_gltfpack_si + export_settings['gltf_gltfpack_sa'] = self.export_gltfpack_sa + export_settings['gltf_gltfpack_slb'] = self.export_gltfpack_slb + + export_settings['gltf_gltfpack_vp'] = self.export_gltfpack_vp + export_settings['gltf_gltfpack_vt'] = self.export_gltfpack_vt + export_settings['gltf_gltfpack_vn'] = self.export_gltfpack_vn + export_settings['gltf_gltfpack_vc'] = self.export_gltfpack_vc + + export_settings['gltf_gltfpack_vpi'] = self.export_gltfpack_vpi + + export_settings['gltf_gltfpack_noq'] = self.export_gltfpack_noq + + export_settings['gltf_binary'] = bytearray() + export_settings['gltf_binaryfilename'] = ( + path_to_uri(os.path.splitext(os.path.basename(self.filepath))[0] + '.bin') + ) + + user_extensions = [] + pre_export_callbacks = [] + post_export_callbacks = [] + + import sys + preferences = bpy.context.preferences + for addon_name in preferences.addons.keys(): + try: + module = sys.modules[addon_name] + except Exception: + continue + if hasattr(module, 'glTF2ExportUserExtension'): + extension_ctor = module.glTF2ExportUserExtension + user_extensions.append(extension_ctor()) + if hasattr(module, 'glTF2ExportUserExtensions'): + extension_ctors = module.glTF2ExportUserExtensions + for extension_ctor in extension_ctors: + user_extensions.append(extension_ctor()) + if hasattr(module, 'glTF2_pre_export_callback'): + pre_export_callbacks.append(module.glTF2_pre_export_callback) + if hasattr(module, 'glTF2_post_export_callback'): + post_export_callbacks.append(module.glTF2_post_export_callback) + export_settings['gltf_user_extensions'] = user_extensions + export_settings['pre_export_callbacks'] = pre_export_callbacks + export_settings['post_export_callbacks'] = post_export_callbacks + + # Initialize logging for export + export_settings['log'] = Log(export_settings['loglevel']) + + profile = bpy.app.debug_value == 102 + if profile: + import cProfile + import pstats + import io + from pstats import SortKey + pr = cProfile.Profile() + pr.enable() + res = gltf2_blender_export.save(context, export_settings) + pr.disable() + s = io.StringIO() + sortby = SortKey.TIME + ps = pstats.Stats(pr, stream=s).sort_stats(sortby) + ps.print_stats() + print(s.getvalue()) + else: + res = gltf2_blender_export.save(context, export_settings) + + # Display popup log, if any + for message_type, message in export_settings['log'].messages(): + self.report({message_type}, message) + + export_settings['log'].flush() + + return res + + def draw(self, context): + operator = self + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False # No animation. + + # Are we inside the File browser + is_file_browser = context.space_data.type == 'FILE_BROWSER' + + export_main(layout, operator, is_file_browser) + export_panel_include(layout, operator, is_file_browser) + export_panel_transform(layout, operator) + export_panel_data(layout, operator) + export_panel_animation(layout, operator) + + # If gltfpack is not setup in plugin preferences -> don't show any gltfpack relevant options in export dialog + gltfpack_path = context.preferences.addons['io_scene_gltf2'].preferences.gltfpack_path_ui.strip() + if gltfpack_path != '': + export_panel_gltfpack(layout, operator) + + +def export_main(layout, operator, is_file_browser): + layout.prop(operator, 'export_format') + if operator.export_format == 'GLTF_SEPARATE': + layout.prop(operator, 'export_keep_originals') + if operator.export_keep_originals is False: + layout.prop(operator, 'export_texture_dir', icon='FILE_FOLDER') + if operator.export_format == 'GLTF_EMBEDDED': + layout.label( + text="This is the least efficient of the available forms, and should only be used when required.", + icon='ERROR') + + layout.prop(operator, 'export_copyright') + if is_file_browser: + layout.prop(operator, 'will_save_settings') + + +def export_panel_include(layout, operator, is_file_browser): + header, body = layout.panel("GLTF_export_include", default_closed=True) + header.label(text="Include") + if body: + if is_file_browser: + col = body.column(heading="Limit to", align=True) + col.prop(operator, 'use_selection') + col.prop(operator, 'use_visible') + col.prop(operator, 'use_renderable') + col.prop(operator, 'use_active_collection') + if operator.use_active_collection: + col.prop(operator, 'use_active_collection_with_nested') + col.prop(operator, 'use_active_scene') + + col = body.column(heading="Data", align=True) + col.prop(operator, 'export_extras') + col.prop(operator, 'export_cameras') + col.prop(operator, 'export_lights') + + +def export_panel_transform(layout, operator): + header, body = layout.panel("GLTF_export_transform", default_closed=True) + header.label(text="Transform") + if body: + body.prop(operator, 'export_yup') + + +def export_panel_data(layout, operator): + header, body = layout.panel("GLTF_export_data", default_closed=True) + header.label(text="Data") + if body: + export_panel_data_scene_graph(body, operator) + export_panel_data_mesh(body, operator) + export_panel_data_material(body, operator) + export_panel_data_shapekeys(body, operator) + export_panel_data_armature(body, operator) + export_panel_data_skinning(body, operator) + export_panel_data_lighting(body, operator) + + if is_draco_available(): + export_panel_data_compression(body, operator) + + +def export_panel_data_scene_graph(layout, operator): + header, body = layout.panel("GLTF_export_data_scene_graph", default_closed=True) + header.label(text="Scene Graph") + if body: + body.prop(operator, 'export_gn_mesh') + body.prop(operator, 'export_gpu_instances') + body.prop(operator, 'export_hierarchy_flatten_objs') + body.prop(operator, 'export_hierarchy_full_collections') + + +def export_panel_data_mesh(layout, operator): + header, body = layout.panel("GLTF_export_data_mesh", default_closed=True) + header.label(text="Mesh") + if body: + body.prop(operator, 'export_apply') + body.prop(operator, 'export_texcoords') + body.prop(operator, 'export_normals') + col = body.column() + col.active = operator.export_normals + col.prop(operator, 'export_tangents') + body.prop(operator, 'export_attributes') + + col = body.column() + col.prop(operator, 'use_mesh_edges') + col.prop(operator, 'use_mesh_vertices') + + col = body.column() + col.prop(operator, 'export_shared_accessors') + + +def export_panel_data_material(layout, operator): + header, body = layout.panel("GLTF_export_data_material", default_closed=True) + header.label(text="Material") + if body: + body.prop(operator, 'export_materials') + col = body.column() + col.active = operator.export_materials == "EXPORT" + col.prop(operator, 'export_image_format') + if operator.export_image_format in ["AUTO", "JPEG", "WEBP"]: + col.prop(operator, 'export_image_quality') + col = body.column() + col.active = operator.export_image_format != "WEBP" + col.prop(operator, "export_image_add_webp") + col = body.column() + col.active = operator.export_image_format != "WEBP" + col.prop(operator, "export_image_webp_fallback") + + header, sub_body = body.panel("GLTF_export_data_material_unused", default_closed=True) + header.label(text="Unused Textures & Images") + if sub_body: + row = sub_body.row() + row.prop(operator, 'export_unused_images') + row = sub_body.row() + row.prop(operator, 'export_unused_textures') + + +def export_panel_data_shapekeys(layout, operator): + header, body = layout.panel("GLTF_export_data_shapekeys", default_closed=True) + header.use_property_split = False + header.prop(operator, "export_morph", text="") + header.label(text="Shape Keys") + if body: + body.active = operator.export_morph + + body.prop(operator, 'export_morph_normal') + col = body.column() + col.active = operator.export_morph_normal + col.prop(operator, 'export_morph_tangent') + + # Data-Shape Keys-Optimize + header, sub_body = body.panel("GLTF_export_data_shapekeys_optimize", default_closed=True) + header.label(text="Optimize Shape Keys") + if sub_body: + row = sub_body.row() + row.prop(operator, 'export_try_sparse_sk') + + row = sub_body.row() + row.active = operator.export_try_sparse_sk + row.prop(operator, 'export_try_omit_sparse_sk') + + +def export_panel_data_armature(layout, operator): + header, body = layout.panel("GLTF_export_data_armature", default_closed=True) + header.label(text="Armature") + if body: + body.active = operator.export_skins + + body.prop(operator, 'export_rest_position_armature') + + row = body.row() + row.active = operator.export_force_sampling + row.prop(operator, 'export_def_bones') + if operator.export_force_sampling is False and operator.export_def_bones is True: + body.label(text="Export only deformation bones is not possible when not sampling animation") + row = body.row() + row.prop(operator, 'export_armature_object_remove') + row = body.row() + row.prop(operator, 'export_hierarchy_flatten_bones') + + +def export_panel_data_skinning(layout, operator): + header, body = layout.panel("GLTF_export_data_skinning", default_closed=True) + header.use_property_split = False + header.prop(operator, "export_skins", text="") + header.label(text="Skinning") + if body: + body.active = operator.export_skins + + row = body.row() + row.prop(operator, 'export_influence_nb') + row.active = not operator.export_all_influences + body.prop(operator, 'export_all_influences') + + +def export_panel_data_lighting(layout, operator): + header, body = layout.panel("GLTF_export_data_lighting", default_closed=True) + header.label(text="Lighting") + if body: + body.prop(operator, 'export_import_convert_lighting_mode') + + +def export_panel_data_compression(layout, operator): + header, body = layout.panel("GLTF_export_data_compression", default_closed=True) + header.use_property_split = False + header.prop(operator, "export_draco_mesh_compression_enable", text="") + header.label(text="Compression") + if body: + body.active = operator.export_draco_mesh_compression_enable + + body.prop(operator, 'export_draco_mesh_compression_level') + + col = body.column(align=True) + col.prop(operator, 'export_draco_position_quantization', text="Quantize Position") + col.prop(operator, 'export_draco_normal_quantization', text="Normal") + col.prop(operator, 'export_draco_texcoord_quantization', text="Tex Coord") + col.prop(operator, 'export_draco_color_quantization', text="Color") + col.prop(operator, 'export_draco_generic_quantization', text="Generic") + + +def export_panel_animation(layout, operator): + header, body = layout.panel("GLTF_export_animation", default_closed=True) + header.use_property_split = False + header.prop(operator, "export_animations", text="") + header.label(text="Animation") + if body: + body.active = operator.export_animations + + body.prop(operator, 'export_animation_mode') + if operator.export_animation_mode == "ACTIVE_ACTIONS": + layout.prop(operator, 'export_nla_strips_merged_animation_name') + + row = body.row() + row.active = operator.export_force_sampling and operator.export_animation_mode in [ + 'ACTIONS', 'ACTIVE_ACTIONS', 'BROACAST'] + row.prop(operator, 'export_bake_animation') + if operator.export_animation_mode == "SCENE": + body.prop(operator, 'export_anim_scene_split_object') + row = body.row() + + if operator.export_animation_mode in ["NLA_TRACKS", "SCENE"]: + export_panel_animation_notes(body, operator) + export_panel_animation_ranges(body, operator) + export_panel_animation_armature(body, operator) + export_panel_animation_shapekeys(body, operator) + export_panel_animation_sampling(body, operator) + export_panel_animation_pointer(body, operator) + export_panel_animation_optimize(body, operator) + if operator.export_animation_mode in ['ACTIONS', 'ACTIVE_ACTIONS']: + export_panel_animation_extra(body, operator) + + from .blender.com.gltf2_blender_ui import export_panel_animation_action_filter + export_panel_animation_action_filter(body, operator) + + +def export_panel_animation_notes(layout, operator): + header, body = layout.panel("GLTF_export_animation_notes", default_closed=True) + header.label(text="Notes") + if body: + if operator.export_animation_mode == "SCENE": + body.label(text="Scene mode uses full bake mode:") + body.label(text="- sampling is active") + body.label(text="- baking all objects is active") + body.label(text="- Using scene frame range") + elif operator.export_animation_mode == "NLA_TRACKS": + body.label(text="Track mode uses full bake mode:") + body.label(text="- sampling is active") + body.label(text="- baking all objects is active") + + +def export_panel_animation_ranges(layout, operator): + header, body = layout.panel("GLTF_export_animation_ranges", default_closed=True) + header.label(text="Rest & Ranges") + if body: + body.active = operator.export_animations + + body.prop(operator, 'export_current_frame') + row = body.row() + row.active = operator.export_animation_mode in ['ACTIONS', 'ACTIVE_ACTIONS', 'BROADCAST', 'NLA_TRACKS'] + row.prop(operator, 'export_frame_range') + body.prop(operator, 'export_anim_slide_to_zero') + row = body.row() + row.active = operator.export_animation_mode in ['ACTIONS', 'ACTIVE_ACTIONS', 'BROADCAST', 'NLA_TRACKS'] + body.prop(operator, 'export_negative_frame') + + +def export_panel_animation_armature(layout, operator): + header, body = layout.panel("GLTF_export_animation_armature", default_closed=True) + header.label(text="Armature") + if body: + body.active = operator.export_animations + + body.prop(operator, 'export_anim_single_armature') + body.prop(operator, 'export_reset_pose_bones') + + +def export_panel_animation_shapekeys(layout, operator): + header, body = layout.panel("GLTF_export_animation_shapekeys", default_closed=True) + header.use_property_split = False + header.prop(operator, "export_morph_animation", text="") + header.label(text="Shape Keys Animation") + if body: + body.active = operator.export_animations + + body.prop(operator, 'export_morph_reset_sk_data') + + +def export_panel_animation_sampling(layout, operator): + header, body = layout.panel("GLTF_export_animation_sampling", default_closed=True) + header.use_property_split = False + header.prop(operator, "export_force_sampling", text="") + header.label(text="Sampling Animations") + if body: + body.active = operator.export_animations + + body.prop(operator, 'export_frame_step') + + +def export_panel_animation_pointer(layout, operator): + header, body = layout.panel("GLTF_export_animation_pointer", default_closed=True) + header.use_property_split = False + header.active = operator.export_animations and operator.export_animation_mode in ['NLA_TRACKS', 'SCENE'] + header.prop(operator, "export_pointer_animation", text="") + header.label(text="Animation Pointer (Experimental)") + if body: + + row = body.row() + row.active = operator.export_pointer_animation + row.prop(operator, 'export_convert_animation_pointer') + + +def export_panel_animation_optimize(layout, operator): + header, body = layout.panel("GLTF_export_animation_optimize", default_closed=True) + header.label(text="Optimize Animations") + if body: + body.active = operator.export_animations + + body.prop(operator, 'export_optimize_animation_size') + + row = body.row() + row.prop(operator, 'export_optimize_animation_keep_anim_armature') + + row = body.row() + row.prop(operator, 'export_optimize_animation_keep_anim_object') + + row = body.row() + row.prop(operator, 'export_optimize_armature_disable_viewport') + + +def export_panel_animation_extra(layout, operator): + header, body = layout.panel("GLTF_export_animation_extra", default_closed=True) + header.label(text="Extra Animations") + if body: + body.active = operator.export_animations + + body.prop(operator, 'export_extra_animations') + + +def export_panel_gltfpack(layout, operator): + header, body = layout.panel("GLTF_export_gltfpack", default_closed=True) + header.label(text="gltfpack") + if body: + col = body.column(heading="gltfpack", align=True) + col.prop(operator, 'export_use_gltfpack') + + col = body.column(heading="Textures", align=True) + col.prop(operator, 'export_gltfpack_tc') + col.prop(operator, 'export_gltfpack_tq') + col = body.column(heading="Simplification", align=True) + col.prop(operator, 'export_gltfpack_si') + col.prop(operator, 'export_gltfpack_sa') + col.prop(operator, 'export_gltfpack_slb') + col = body.column(heading="Vertices", align=True) + col.prop(operator, 'export_gltfpack_vp') + col.prop(operator, 'export_gltfpack_vt') + col.prop(operator, 'export_gltfpack_vn') + col.prop(operator, 'export_gltfpack_vc') + col = body.column(heading="Vertex positions", align=True) + col.prop(operator, 'export_gltfpack_vpi') + #col = body.column(heading = "Animations", align = True) + #col = body.column(heading = "Scene", align = True) + col = body.column(heading="Miscellaneous", align=True) + col.prop(operator, 'export_gltfpack_noq') + + +class ExportGLTF2(bpy.types.Operator, ExportGLTF2_Base, ExportHelper): + """Export scene as glTF 2.0 file""" + bl_idname = 'export_scene.gltf' + bl_label = 'Export glTF 2.0' + + filename_ext = '' + + filter_glob: StringProperty(default='*.glb', options={'HIDDEN'}) + + +def menu_func_export(self, context): + self.layout.operator(ExportGLTF2.bl_idname, text='glTF 2.0 (.glb/.gltf)') + + +class ImportGLTF2(Operator, ConvertGLTF2_Base, ImportHelper): + """Load a glTF 2.0 file""" + bl_idname = 'import_scene.gltf' + bl_label = 'Import glTF 2.0' + bl_options = {'REGISTER', 'UNDO'} + + filter_glob: StringProperty(default="*.glb;*.gltf", options={'HIDDEN'}) + + files: CollectionProperty( + name="File Path", + type=bpy.types.OperatorFileListElement, + ) + + loglevel: IntProperty( + name='Log Level', + description="Log Level") + + import_pack_images: BoolProperty( + name='Pack Images', + description='Pack all images into .blend file', + default=True + ) + + merge_vertices: BoolProperty( + name='Merge Vertices', + description=( + 'The glTF format requires discontinuous normals, UVs, and ' + 'other vertex attributes to be stored as separate vertices, ' + 'as required for rendering on typical graphics hardware. ' + 'This option attempts to combine co-located vertices where possible. ' + 'Currently cannot combine verts with different normals' + ), + default=False, + ) + + import_shading: EnumProperty( + name="Shading", + items=(("NORMALS", "Use Normal Data", ""), + ("FLAT", "Flat Shading", ""), + ("SMOOTH", "Smooth Shading", "")), + description="How normals are computed during import", + default="NORMALS") + + bone_heuristic: EnumProperty( + name="Bone Dir", + items=( + ("BLENDER", "Blender (best for import/export round trip)", + "Good for re-importing glTFs exported from Blender, " + "and re-exporting glTFs to glTFs after Blender editing. " + "Bone tips are placed on their local +Y axis (in glTF space)"), + ("TEMPERANCE", "Temperance (average)", + "Decent all-around strategy. " + "A bone with one child has its tip placed on the local axis " + "closest to its child"), + ("FORTUNE", "Fortune (may look better, less accurate)", + "Might look better than Temperance, but also might have errors. " + "A bone with one child has its tip placed at its child's root. " + "Non-uniform scalings may get messed up though, so beware"), + ), + description="Heuristic for placing bones. Tries to make bones pretty", + default="BLENDER", + ) + + guess_original_bind_pose: BoolProperty( + name='Guess Original Bind Pose', + description=( + 'Try to guess the original bind pose for skinned meshes from ' + 'the inverse bind matrices. ' + 'When off, use default/rest pose as bind pose' + ), + default=True, + ) + + import_webp_texture: BoolProperty( + name='Import WebP textures', + description=( + "If a texture exists in WebP format, " + "loads the WebP texture instead of the fallback PNG/JPEG one" + ), + default=False, + ) + + def draw(self, context): + layout = self.layout + + layout.use_property_split = True + layout.use_property_decorate = False # No animation. + + layout.prop(self, 'import_pack_images') + layout.prop(self, 'merge_vertices') + layout.prop(self, 'import_shading') + layout.prop(self, 'guess_original_bind_pose') + layout.prop(self, 'bone_heuristic') + layout.prop(self, 'export_import_convert_lighting_mode') + layout.prop(self, 'import_webp_texture') + + def invoke(self, context, event): + import sys + preferences = bpy.context.preferences + for addon_name in preferences.addons.keys(): + try: + if hasattr( + sys.modules[addon_name], + 'glTF2ImportUserExtension') or hasattr( + sys.modules[addon_name], + 'glTF2ImportUserExtensions'): + importer_extension_panel_unregister_functors.append(sys.modules[addon_name].register_panel()) + except Exception: + pass + + self.has_active_importer_extensions = len(importer_extension_panel_unregister_functors) > 0 + return ImportHelper.invoke_popup(self, context) + + def execute(self, context): + return self.import_gltf2(context) + + def import_gltf2(self, context): + import os + + self.set_debug_log() + import_settings = self.as_keywords() + + user_extensions = [] + + import sys + preferences = bpy.context.preferences + for addon_name in preferences.addons.keys(): + try: + module = sys.modules[addon_name] + except Exception: + continue + if hasattr(module, 'glTF2ImportUserExtension'): + extension_ctor = module.glTF2ImportUserExtension + user_extensions.append(extension_ctor()) + import_settings['import_user_extensions'] = user_extensions + + if self.files: + # Multiple file import + ret = {'CANCELLED'} + dirname = os.path.dirname(self.filepath) + for file in self.files: + path = os.path.join(dirname, file.name) + if self.unit_import(path, import_settings) == {'FINISHED'}: + ret = {'FINISHED'} + return ret + else: + # Single file import + return self.unit_import(self.filepath, import_settings) + + def unit_import(self, filename, import_settings): + import time + from .io.imp.gltf2_io_gltf import glTFImporter, ImportError + from .blender.imp.gltf2_blender_gltf import BlenderGlTF + + try: + gltf_importer = glTFImporter(filename, import_settings) + gltf_importer.read() + gltf_importer.checks() + + gltf_importer.log.info("Data are loaded, start creating Blender stuff") + + start_time = time.time() + BlenderGlTF.create(gltf_importer) + elapsed_s = "{:.2f}s".format(time.time() - start_time) + gltf_importer.log.info("glTF import finished in " + elapsed_s) + + # Display popup log, if any + for message_type, message in gltf_importer.log.messages(): + self.report({message_type}, message) + + gltf_importer.log.flush() + + return {'FINISHED'} + + except ImportError as e: + self.report({'ERROR'}, e.args[0]) + return {'CANCELLED'} + + def set_debug_log(self): + import logging + if bpy.app.debug_value == 0: # Default values => Display all messages except debug ones + self.loglevel = logging.INFO + elif bpy.app.debug_value == 1: + self.loglevel = logging.WARNING + elif bpy.app.debug_value == 2: + self.loglevel = logging.ERROR + elif bpy.app.debug_value == 3: + self.loglevel = logging.CRITICAL + elif bpy.app.debug_value == 4: + self.loglevel = logging.DEBUG + + +class GLTF2_filter_action(bpy.types.PropertyGroup): + keep: bpy.props.BoolProperty(name="Keep Animation") + action: bpy.props.PointerProperty(type=bpy.types.Action) + + +def gltf_variant_ui_update(self, context): + from .blender.com.gltf2_blender_ui import variant_register, variant_unregister + if self.KHR_materials_variants_ui is True: + # register all needed types + variant_register() + else: + variant_unregister() + + +def gltf_animation_ui_update(self, context): + from .blender.com.gltf2_blender_ui import anim_ui_register, anim_ui_unregister + if self.animation_ui is True: + # register all needed types + anim_ui_register() + else: + anim_ui_unregister() + + +class GLTF_AddonPreferences(bpy.types.AddonPreferences): + bl_idname = __package__ + + settings_node_ui: bpy.props.BoolProperty( + default=False, + description="Displays glTF Material Output node in Shader Editor (Menu Add > Output)" + ) + + KHR_materials_variants_ui: bpy.props.BoolProperty( + default=False, + description="Displays glTF UI to manage material variants", + update=gltf_variant_ui_update + ) + + animation_ui: bpy.props.BoolProperty( + default=False, + description="Display glTF UI to manage animations", + update=gltf_animation_ui_update + ) + + gltfpack_path_ui: bpy.props.StringProperty( + default="", + name="glTFpack file path", + description="Path to gltfpack binary", + subtype='FILE_PATH' + ) + + allow_embedded_format: bpy.props.BoolProperty( + default=False, + name='Allow glTF Embedded format', + description="Allow glTF Embedded format" + ) + + def draw(self, context): + layout = self.layout + row = layout.row() + row.prop(self, "settings_node_ui", text="Shader Editor Add-ons") + row.prop(self, "KHR_materials_variants_ui", text="Material Variants") + row.prop(self, "animation_ui", text="Animation UI") + row = layout.row() + row.prop(self, "gltfpack_path_ui", text="Path to gltfpack") + row = layout.row() + row.prop(self, "allow_embedded_format", text="Allow glTF Embedded format") + if self.allow_embedded_format: + layout.label( + text="This is the least efficient of the available forms, and should only be used when required.", + icon='ERROR') + + +class IO_FH_gltf2(bpy.types.FileHandler): + bl_idname = "IO_FH_gltf2" + bl_label = "glTF 2.0" + bl_import_operator = "import_scene.gltf" + bl_export_operator = "export_scene.gltf" + bl_file_extensions = ".glb;.gltf" + + @classmethod + def poll_drop(cls, context): + return poll_file_object_drop(context) + + +def menu_func_import(self, context): + self.layout.operator(ImportGLTF2.bl_idname, text='glTF 2.0 (.glb/.gltf)') + + +classes = ( + ExportGLTF2, + ImportGLTF2, + IO_FH_gltf2, + GLTF2_filter_action, + GLTF_AddonPreferences +) + + +def register(): + from .blender.com import gltf2_blender_ui as blender_ui + + for c in classes: + bpy.utils.register_class(c) + # bpy.utils.register_module(__name__) + + blender_ui.register() + if bpy.context.preferences.addons['io_scene_gltf2'].preferences.KHR_materials_variants_ui is True: + blender_ui.variant_register() + if bpy.context.preferences.addons['io_scene_gltf2'].preferences.animation_ui is True: + blender_ui.anim_ui_register() + + # add to the export / import menu + bpy.types.TOPBAR_MT_file_export.append(menu_func_export) + bpy.types.TOPBAR_MT_file_import.append(menu_func_import) + + +def unregister(): + from .blender.com import gltf2_blender_ui as blender_ui + blender_ui.unregister() + if bpy.context.preferences.addons['io_scene_gltf2'].preferences.KHR_materials_variants_ui is True: + blender_ui.variant_unregister() + + for c in classes: + bpy.utils.unregister_class(c) + for f in exporter_extension_panel_unregister_functors: + f() + exporter_extension_panel_unregister_functors.clear() + + for f in importer_extension_panel_unregister_functors: + f() + importer_extension_panel_unregister_functors.clear() + + # bpy.utils.unregister_module(__name__) + + # remove from the export / import menu + bpy.types.TOPBAR_MT_file_export.remove(menu_func_export) + bpy.types.TOPBAR_MT_file_import.remove(menu_func_import) diff --git a/scripts/addons_core/io_scene_gltf2/blender/__init__.py b/scripts/addons_core/io_scene_gltf2/blender/__init__.py new file mode 100644 index 00000000000..815e044972b --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_conversion.py b/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_conversion.py new file mode 100755 index 00000000000..a6dd137c4d6 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_conversion.py @@ -0,0 +1,215 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from math import sin, cos, tan, atan +from mathutils import Matrix, Vector +import numpy as np +from ...io.com import gltf2_io_constants + +PBR_WATTS_TO_LUMENS = 683 +# Industry convention, biological peak at 555nm, scientific standard as part of SI candela definition. + + +# This means use the inverse of the TRS transform. +def inverted_trs_mapping_node(mapping_transform): + offset = mapping_transform["offset"] + rotation = mapping_transform["rotation"] + scale = mapping_transform["scale"] + + # Inverse of a TRS is not always a TRS. This function will be right + # at least when the following don't occur. + if abs(rotation) > 1e-5 and abs(scale[0] - scale[1]) > 1e-5: + return None + if abs(scale[0]) < 1e-5 or abs(scale[1]) < 1e-5: + return None + + new_offset = Matrix.Rotation(-rotation, 3, 'Z') @ Vector((-offset[0], -offset[1], 1)) + new_offset[0] /= scale[0] + new_offset[1] /= scale[1] + return { + "offset": new_offset[0:2], + "rotation": -rotation, + "scale": [1 / scale[0], 1 / scale[1]], + } + + +def texture_transform_blender_to_gltf(mapping_transform): + """ + Converts the offset/rotation/scale from a Mapping node applied in Blender's + UV space to the equivalent KHR_texture_transform. + """ + offset = mapping_transform.get('offset', [0, 0]) + rotation = mapping_transform.get('rotation', 0) + scale = mapping_transform.get('scale', [1, 1]) + return { + 'offset': [ + offset[0] - scale[1] * sin(rotation), + 1 - offset[1] - scale[1] * cos(rotation), + ], + 'rotation': rotation, + 'scale': [scale[0], scale[1]], + } + + +def texture_transform_gltf_to_blender(texture_transform): + """ + Converts a KHR_texture_transform into the equivalent offset/rotation/scale + for a Mapping node applied in Blender's UV space. + """ + offset = texture_transform.get('offset', [0, 0]) + rotation = texture_transform.get('rotation', 0) + scale = texture_transform.get('scale', [1, 1]) + return { + 'offset': [ + offset[0] + scale[1] * sin(rotation), + 1 - offset[1] - scale[1] * cos(rotation), + ], + 'rotation': rotation, + 'scale': [scale[0], scale[1]], + } + + +def get_target(property): + return { + "delta_location": "translation", + "delta_rotation_euler": "rotation", + "delta_rotation_quaternion": "rotation", + "delta_scale": "scale", + "location": "translation", + "rotation_axis_angle": "rotation", + "rotation_euler": "rotation", + "rotation_quaternion": "rotation", + "scale": "scale", + "value": "weights" + }.get(property, None) + + +def get_component_type(attribute_component_type): + return { + "INT8": gltf2_io_constants.ComponentType.Float, + "BYTE_COLOR": gltf2_io_constants.ComponentType.UnsignedShort, + "FLOAT2": gltf2_io_constants.ComponentType.Float, + "FLOAT_COLOR": gltf2_io_constants.ComponentType.Float, + "FLOAT_VECTOR": gltf2_io_constants.ComponentType.Float, + "FLOAT_VECTOR_4": gltf2_io_constants.ComponentType.Float, + "INT": gltf2_io_constants.ComponentType.Float, # No signed Int in glTF accessor + "FLOAT": gltf2_io_constants.ComponentType.Float, + "BOOLEAN": gltf2_io_constants.ComponentType.Float + }.get(attribute_component_type) + + +def get_channel_from_target(target): + return { + "rotation": "rotation_quaternion", + "translation": "location", + "scale": "scale" + }.get(target) + + +def get_data_type(attribute_component_type): + return { + "INT8": gltf2_io_constants.DataType.Scalar, + "BYTE_COLOR": gltf2_io_constants.DataType.Vec4, + "FLOAT2": gltf2_io_constants.DataType.Vec2, + "FLOAT_COLOR": gltf2_io_constants.DataType.Vec4, + "FLOAT_VECTOR": gltf2_io_constants.DataType.Vec3, + "FLOAT_VECTOR_4": gltf2_io_constants.DataType.Vec4, + "INT": gltf2_io_constants.DataType.Scalar, + "FLOAT": gltf2_io_constants.DataType.Scalar, + "BOOLEAN": gltf2_io_constants.DataType.Scalar, + }.get(attribute_component_type) + + +def get_data_length(attribute_component_type): + return { + "INT8": 1, + "BYTE_COLOR": 4, + "FLOAT2": 2, + "FLOAT_COLOR": 4, + "FLOAT_VECTOR": 3, + "FLOAT_VECTOR_4": 4, + "INT": 1, + "FLOAT": 1, + "BOOLEAN": 1 + }.get(attribute_component_type) + + +def get_numpy_type(attribute_component_type): + return { + "INT8": np.float32, + "BYTE_COLOR": np.float32, + "FLOAT2": np.float32, + "FLOAT_COLOR": np.float32, + "FLOAT_VECTOR": np.float32, + "FLOAT_VECTOR_4": np.float32, + "INT": np.float32, # signed integer are not supported by glTF + "FLOAT": np.float32, + "BOOLEAN": np.float32 + }.get(attribute_component_type) + + +def get_attribute_type(component_type, data_type): + if gltf2_io_constants.DataType.num_elements(data_type) == 1: + return { + gltf2_io_constants.ComponentType.Float: "FLOAT", + gltf2_io_constants.ComponentType.UnsignedByte: "INT" # What is the best for compatibility? + }.get(component_type, None) + elif gltf2_io_constants.DataType.num_elements(data_type) == 2: + return { + gltf2_io_constants.ComponentType.Float: "FLOAT2" + }.get(component_type, None) + elif gltf2_io_constants.DataType.num_elements(data_type) == 3: + return { + gltf2_io_constants.ComponentType.Float: "FLOAT_VECTOR" + }.get(component_type, None) + elif gltf2_io_constants.DataType.num_elements(data_type) == 4: + return { + gltf2_io_constants.ComponentType.Float: "FLOAT_COLOR", + gltf2_io_constants.ComponentType.UnsignedShort: "BYTE_COLOR", + gltf2_io_constants.ComponentType.UnsignedByte: "BYTE_COLOR" # What is the best for compatibility? + }.get(component_type, None) + else: + pass + + +def get_attribute(attributes, name, data_type, domain): + attribute = attributes.get(name) + if attribute is not None and attribute.data_type == data_type and attribute.domain == domain: + return attribute + else: + return None + + +def get_gltf_interpolation(interpolation): + return { + "BEZIER": "CUBICSPLINE", + "LINEAR": "LINEAR", + "CONSTANT": "STEP" + }.get(interpolation, "LINEAR") + + +def get_anisotropy_rotation_gltf_to_blender(rotation): + # glTF rotation is in randian, Blender in 0 to 1 + return rotation / (2 * np.pi) + + +def get_anisotropy_rotation_blender_to_gltf(rotation): + # glTF rotation is in randian, Blender in 0 to 1 + return rotation * (2 * np.pi) + + +def yvof_blender_to_gltf(angle, width, height, sensor_fit): + + aspect_ratio = width / height + + if width >= height: + if sensor_fit != 'VERTICAL': + return 2.0 * atan(tan(angle * 0.5) / aspect_ratio) + else: + return angle + else: + if sensor_fit != 'HORIZONTAL': + return angle + else: + return 2.0 * atan(tan(angle * 0.5) / aspect_ratio) diff --git a/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_data_path.py b/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_data_path.py new file mode 100755 index 00000000000..00809ba643e --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_data_path.py @@ -0,0 +1,80 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + + +def get_target_property_name(data_path: str) -> str: + """Retrieve target property.""" + + if data_path.endswith("]"): + return None + else: + return data_path.rsplit('.', 1)[-1] + + +def get_target_object_path(data_path: str) -> str: + """Retrieve target object data path without property""" + if data_path.endswith("]"): + return data_path.rsplit('[', 1)[0] + elif data_path.startswith("pose.bones["): + return data_path[:data_path.find('"]')] + '"]' + path_split = data_path.rsplit('.', 1) + self_targeting = len(path_split) < 2 + if self_targeting: + return "" + return path_split[0] + + +def get_rotation_modes(target_property: str): + """Retrieve rotation modes based on target_property""" + if target_property in ["rotation_euler", "delta_rotation_euler"]: + return True, ["XYZ", "XZY", "YXZ", "YZX", "ZXY", "ZYX"] + elif target_property in ["rotation_quaternion", "delta_rotation_quaternion"]: + return True, ["QUATERNION"] + elif target_property in ["rotation_axis_angle"]: + return True, ["AXIS_ANGLE"] + else: + return False, [] + + +def is_location(target_property): + return "location" in target_property + + +def is_rotation(target_property): + return "rotation" in target_property + + +def is_scale(target_property): + return "scale" in target_property + + +def get_delta_modes(target_property: str) -> str: + """Retrieve location based on target_property""" + return target_property.startswith("delta_") + + +def is_bone_anim_channel(data_path: str) -> bool: + return data_path[:10] == "pose.bones" + + +def get_sk_exported(key_blocks): + return [ + k + for k in key_blocks + if not skip_sk(key_blocks, k) + ] + + +def skip_sk(key_blocks, k): + # Do not export: + # - if muted + # - if relative key is SK itself (this avoid exporting Basis too if user didn't change order) + # - the Basis (the first SK of the list) + return k == k.relative_key \ + or k.mute \ + or is_first_index(key_blocks, k) is True + + +def is_first_index(key_blocks, k): + return key_blocks[0].name == k.name diff --git a/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_default.py b/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_default.py new file mode 100644 index 00000000000..c8b32bb439b --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_default.py @@ -0,0 +1,15 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +BLENDER_IOR = 1.45 +BLENDER_SPECULAR = 0.5 +BLENDER_SPECULAR_TINT = 0.0 + +BLENDER_GLTF_SPECIAL_COLLECTION = "glTF_not_exported" + +LIGHTS = { + "POINT": "point", + "SUN": "directional", + "SPOT": "spot" +} diff --git a/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_extras.py b/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_extras.py new file mode 100644 index 00000000000..4a9680c3bcd --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_extras.py @@ -0,0 +1,87 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + + +import bpy +from .gltf2_blender_json import is_json_convertible + + +# Custom properties, which are in most cases present and should not be imported/exported. +BLACK_LIST = ['cycles', 'cycles_visibility', 'cycles_curves', 'glTF2ExportSettings'] + + +def generate_extras(blender_element): + """Filter and create a custom property, which is stored in the glTF extra field.""" + if not blender_element: + return None + + extras = {} + + for custom_property in blender_element.keys(): + if custom_property in BLACK_LIST: + continue + + value = __to_json_compatible(blender_element[custom_property]) + + if value is not None: + extras[custom_property] = value + + if not extras: + return None + + return extras + + +def __to_json_compatible(value): + """Make a value (usually a custom property) compatible with json""" + + if isinstance(value, bpy.types.ID): + return value + + elif isinstance(value, str): + return value + + elif isinstance(value, (int, float)): + return value + + # for list classes + elif isinstance(value, list): + value = list(value) + # make sure contents are json-compatible too + for index in range(len(value)): + value[index] = __to_json_compatible(value[index]) + return value + + # for IDPropertyArray classes + elif hasattr(value, "to_list"): + value = value.to_list() + return value + + elif hasattr(value, "to_dict"): + value = value.to_dict() + if is_json_convertible(value): + return value + + return None + + +def set_extras(blender_element, extras, exclude=[]): + """Copy extras onto a Blender object.""" + if not extras or not isinstance(extras, dict): + return + + for custom_property, value in extras.items(): + if custom_property in BLACK_LIST: + continue + if custom_property in exclude: + continue + + try: + blender_element[custom_property] = value + except Exception: + # Try to convert to string + try: + blender_element[custom_property] = str(value) + except Exception: + print('Error setting property %s to value of type %s' % (custom_property, type(value))) diff --git a/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_json.py b/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_json.py new file mode 100755 index 00000000000..2109fe880e3 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_json.py @@ -0,0 +1,27 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import json +import bpy + + +class BlenderJSONEncoder(json.JSONEncoder): + """Blender JSON Encoder.""" + + def default(self, obj): + if isinstance(obj, bpy.types.ID): + return dict( + name=obj.name, + type=obj.__class__.__name__ + ) + return super(BlenderJSONEncoder, self).default(obj) + + +def is_json_convertible(data): + """Test, if a data set can be expressed as JSON.""" + try: + json.dumps(data, cls=BlenderJSONEncoder) + return True + except: + return False diff --git a/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_material_helpers.py b/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_material_helpers.py new file mode 100755 index 00000000000..91e90ce4c61 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_material_helpers.py @@ -0,0 +1,32 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy + +# Get compatibility at export with old files + + +def get_gltf_node_old_name(): + return "glTF Settings" + +# Old group name + + +def get_gltf_old_group_node_name(): + return "glTF Metallic Roughness" + + +def get_gltf_node_name(): + return "glTF Material Output" + + +def create_settings_group(name): + gltf_node_group = bpy.data.node_groups.new(name, 'ShaderNodeTree') + gltf_node_group.interface.new_socket("Occlusion", socket_type="NodeSocketFloat") + thicknessFactor = gltf_node_group.interface.new_socket("Thickness", socket_type="NodeSocketFloat", ) + thicknessFactor.default_value = 0.0 + gltf_node_group.nodes.new('NodeGroupOutput') + gltf_node_group_input = gltf_node_group.nodes.new('NodeGroupInput') + gltf_node_group_input.location = -200, 0 + return gltf_node_group diff --git a/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_math.py b/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_math.py new file mode 100755 index 00000000000..823899ebd4d --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_math.py @@ -0,0 +1,209 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import typing +import math +from mathutils import Matrix, Vector, Quaternion, Euler + +from .gltf2_blender_data_path import get_target_property_name + + +def list_to_mathutils(values: typing.List[float], data_path: str) -> typing.Union[Vector, Quaternion, Euler]: + """Transform a list to blender py object.""" + target = get_target_property_name(data_path) + + if target == 'delta_location': + return Vector(values) # TODO Should be Vector(values) - Vector(something)? + elif target == 'delta_rotation_euler': + return Euler(values).to_quaternion() # TODO Should be Euler(values).to_quaternion() @ something? + elif target == 'location': + return Vector(values) + elif target == 'rotation_axis_angle': + angle = values[0] + axis = values[1:] + return Quaternion(axis, math.radians(angle)) + elif target == 'rotation_euler': + return Euler(values).to_quaternion() + elif target == 'rotation_quaternion': + return Quaternion(values) + elif target == 'scale': + return Vector(values) + elif target == 'value': + return Vector(values) + + return values + + +def mathutils_to_gltf(x: typing.Union[Vector, Quaternion]) -> typing.List[float]: + """Transform a py object to glTF list.""" + if isinstance(x, Vector): + return list(x) + if isinstance(x, Quaternion): + # Blender has w-first quaternion notation + return [x[1], x[2], x[3], x[0]] + else: + return list(x) + + +def to_yup() -> Matrix: + """Transform to Yup.""" + return Matrix( + ((1.0, 0.0, 0.0, 0.0), + (0.0, 0.0, 1.0, 0.0), + (0.0, -1.0, 0.0, 0.0), + (0.0, 0.0, 0.0, 1.0)) + ) + + +to_zup = to_yup + + +def swizzle_yup(v: typing.Union[Vector, Quaternion], data_path: str) -> typing.Union[Vector, Quaternion]: + """Manage Yup.""" + target = get_target_property_name(data_path) + swizzle_func = { + "delta_location": swizzle_yup_location, + "delta_rotation_euler": swizzle_yup_rotation, + "location": swizzle_yup_location, + "rotation_axis_angle": swizzle_yup_rotation, + "rotation_euler": swizzle_yup_rotation, + "rotation_quaternion": swizzle_yup_rotation, + "scale": swizzle_yup_scale, + "value": swizzle_yup_value + }.get(target) + + if swizzle_func is None: + raise RuntimeError("Cannot transform values at {}".format(data_path)) + + return swizzle_func(v) + + +def swizzle_yup_location(loc: Vector) -> Vector: + """Manage Yup location.""" + return Vector((loc[0], loc[2], -loc[1])) + + +def swizzle_yup_rotation(rot: Quaternion) -> Quaternion: + """Manage Yup rotation.""" + return Quaternion((rot[0], rot[1], rot[3], -rot[2])) + + +def swizzle_yup_scale(scale: Vector) -> Vector: + """Manage Yup scale.""" + return Vector((scale[0], scale[2], scale[1])) + + +def swizzle_yup_value(value: typing.Any) -> typing.Any: + """Manage Yup value.""" + return value + + +def transform(v: typing.Union[Vector, Quaternion], data_path: str, transform: Matrix = Matrix.Identity( + 4), need_rotation_correction: bool = False) -> typing .Union[Vector, Quaternion]: + """Manage transformations.""" + target = get_target_property_name(data_path) + transform_func = { + "delta_location": transform_location, + "delta_rotation_euler": transform_rotation, + "location": transform_location, + "rotation_axis_angle": transform_rotation, + "rotation_euler": transform_rotation, + "rotation_quaternion": transform_rotation, + "scale": transform_scale, + "value": transform_value + }.get(target) + + if transform_func is None: + raise RuntimeError("Cannot transform values at {}".format(data_path)) + + return transform_func(v, transform, need_rotation_correction) + + +def transform_location(location: Vector, transform: Matrix = Matrix.Identity(4), + need_rotation_correction: bool = False) -> Vector: + """Transform location.""" + correction = Quaternion((2**0.5 / 2, -2**0.5 / 2, 0.0, 0.0)) + m = Matrix.Translation(location) + if need_rotation_correction: + m @= correction.to_matrix().to_4x4() + m = transform @ m + return m.to_translation() + + +def transform_rotation(rotation: Quaternion, transform: Matrix = Matrix.Identity(4), + need_rotation_correction: bool = False) -> Quaternion: + """Transform rotation.""" + rotation.normalize() + correction = Quaternion((2**0.5 / 2, -2**0.5 / 2, 0.0, 0.0)) + m = rotation.to_matrix().to_4x4() + if need_rotation_correction: + m @= correction.to_matrix().to_4x4() + m = transform @ m + return m.to_quaternion() + + +def transform_scale(scale: Vector, transform: Matrix = Matrix.Identity(4), + need_rotation_correction: bool = False) -> Vector: + """Transform scale.""" + m = Matrix.Identity(4) + m[0][0] = scale.x + m[1][1] = scale.y + m[2][2] = scale.z + m = transform @ m + + return m.to_scale() + + +def transform_value(value: Vector, _: Matrix = Matrix.Identity(4), need_rotation_correction: bool = False) -> Vector: + """Transform value.""" + return value + + +def round_if_near(value: float, target: float) -> float: + """If value is very close to target, round to target.""" + return value if abs(value - target) > 2.0e-6 else target + + +def scale_rot_swap_matrix(rot): + """Returns a matrix m st. Scale[s] Rot[rot] = Rot[rot] Scale[m s]. + If rot.to_matrix() is a signed permutation matrix, works for any s. + Otherwise works only if s is a uniform scaling. + """ + m = nearby_signed_perm_matrix(rot) # snap to signed perm matrix + m.transpose() # invert permutation + for i in range(3): + for j in range(3): + m[i][j] = abs(m[i][j]) # discard sign + return m + + +def nearby_signed_perm_matrix(rot): + """Returns a signed permutation matrix close to rot.to_matrix(). + (A signed permutation matrix is like a permutation matrix, except + the non-zero entries can be ±1.) + """ + m = rot.to_matrix() + x, y, z = m[0], m[1], m[2] + + # Set the largest entry in the first row to ±1 + a, b, c = abs(x[0]), abs(x[1]), abs(x[2]) + i = 0 if a >= b and a >= c else 1 if b >= c else 2 + x[i] = 1 if x[i] > 0 else -1 + x[(i + 1) % 3] = 0 + x[(i + 2) % 3] = 0 + + # Same for second row: only two columns to consider now. + a, b = abs(y[(i + 1) % 3]), abs(y[(i + 2) % 3]) + j = (i + 1) % 3 if a >= b else (i + 2) % 3 + y[j] = 1 if y[j] > 0 else -1 + y[(j + 1) % 3] = 0 + y[(j + 2) % 3] = 0 + + # Same for third row: only one column left + k = (0 + 1 + 2) - i - j + z[k] = 1 if z[k] > 0 else -1 + z[(k + 1) % 3] = 0 + z[(k + 2) % 3] = 0 + + return m diff --git a/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_ui.py b/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_ui.py new file mode 100644 index 00000000000..ce577e52fef --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_ui.py @@ -0,0 +1,734 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from ..com.gltf2_blender_material_helpers import get_gltf_node_name, create_settings_group + +################ glTF Material Output node ########################################### + + +def create_gltf_ao_group(operator, group_name): + + # create a new group + gltf_ao_group = bpy.data.node_groups.new(group_name, "ShaderNodeTree") + + return gltf_ao_group + + +class NODE_OT_GLTF_SETTINGS(bpy.types.Operator): + bl_idname = "node.gltf_settings_node_operator" + bl_label = "glTF Material Output" + bl_description = "Add a node to the active tree for glTF export" + + @classmethod + def poll(cls, context): + space = context.space_data + return ( + space is not None + and space.type == "NODE_EDITOR" + and context.object and context.object.active_material + and context.object.active_material.use_nodes is True + and bpy.context.preferences.addons['io_scene_gltf2'].preferences.settings_node_ui is True + ) + + def execute(self, context): + gltf_settings_node_name = get_gltf_node_name() + if gltf_settings_node_name in bpy.data.node_groups: + my_group = bpy.data.node_groups[get_gltf_node_name()] + else: + my_group = create_settings_group(gltf_settings_node_name) + node_tree = context.object.active_material.node_tree + new_node = node_tree.nodes.new("ShaderNodeGroup") + new_node.node_tree = bpy.data.node_groups[my_group.name] + return {"FINISHED"} + + +def add_gltf_settings_to_menu(self, context): + if bpy.context.preferences.addons['io_scene_gltf2'].preferences.settings_node_ui is True: + self.layout.operator("node.gltf_settings_node_operator") + +################################### KHR_materials_variants #################### + +# Global UI panel + + +class gltf2_KHR_materials_variants_variant(bpy.types.PropertyGroup): + variant_idx: bpy.props.IntProperty() + name: bpy.props.StringProperty(name="Variant Name") + + +class SCENE_UL_gltf2_variants(bpy.types.UIList): + def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index): + + if self.layout_type in {'DEFAULT', 'COMPACT'}: + layout.prop(item, "name", text="", emboss=False) + + elif self.layout_type in {'GRID'}: + layout.alignment = 'CENTER' + + +class SCENE_PT_gltf2_variants(bpy.types.Panel): + bl_label = "glTF Material Variants" + bl_space_type = 'VIEW_3D' + bl_region_type = 'UI' + bl_category = "glTF Variants" + + @classmethod + def poll(self, context): + return bpy.context.preferences.addons['io_scene_gltf2'].preferences.KHR_materials_variants_ui is True + + def draw(self, context): + layout = self.layout + row = layout.row() + + if bpy.data.scenes[0].get('gltf2_KHR_materials_variants_variants') and len( + bpy.data.scenes[0].gltf2_KHR_materials_variants_variants) > 0: + + row.template_list( + "SCENE_UL_gltf2_variants", + "", + bpy.data.scenes[0], + "gltf2_KHR_materials_variants_variants", + bpy.data.scenes[0], + "gltf2_active_variant") + col = row.column() + row = col.column(align=True) + row.operator("scene.gltf2_variant_add", icon="ADD", text="") + row.operator("scene.gltf2_variant_remove", icon="REMOVE", text="") + + row = layout.row() + row.operator("scene.gltf2_display_variant", text="Display Variant") + row = layout.row() + row.operator("scene.gltf2_assign_to_variant", text="Assign To Variant") + row = layout.row() + row.operator("scene.gltf2_reset_to_original", text="Reset To Original") + row.operator("scene.gltf2_assign_as_original", text="Assign as Original") + else: + row.operator("scene.gltf2_variant_add", text="Add Material Variant") + + +class SCENE_OT_gltf2_variant_add(bpy.types.Operator): + """Add a new Material Variant""" + bl_idname = "scene.gltf2_variant_add" + bl_label = "Add Material Variant" + bl_options = {'REGISTER'} + + @classmethod + def poll(self, context): + return True + + def execute(self, context): + var = bpy.data.scenes[0].gltf2_KHR_materials_variants_variants.add() + var.variant_idx = len(bpy.data.scenes[0].gltf2_KHR_materials_variants_variants) - 1 + var.name = "VariantName" + bpy.data.scenes[0].gltf2_active_variant = len(bpy.data.scenes[0].gltf2_KHR_materials_variants_variants) - 1 + return {'FINISHED'} + + +class SCENE_OT_gltf2_variant_remove(bpy.types.Operator): + """Add a new Material Variant""" + bl_idname = "scene.gltf2_variant_remove" + bl_label = "Remove Variant" + bl_options = {'REGISTER'} + + @classmethod + def poll(self, context): + return len(bpy.data.scenes[0].gltf2_KHR_materials_variants_variants) > 0 + + def execute(self, context): + bpy.data.scenes[0].gltf2_KHR_materials_variants_variants.remove(bpy.data.scenes[0].gltf2_active_variant) + + # loop on all mesh + for obj in [o for o in bpy.data.objects if o.type == "MESH"]: + mesh = obj.data + remove_idx_data = [] + for idx, i in enumerate(mesh.gltf2_variant_mesh_data): + remove_idx_variants = [] + for idx_var, v in enumerate(i.variants): + if v.variant.variant_idx == bpy.data.scenes[0].gltf2_active_variant: + remove_idx_variants.append(idx_var) + elif v.variant.variant_idx > bpy.data.scenes[0].gltf2_active_variant: + v.variant.variant_idx -= 1 + + if len(remove_idx_variants) > 0: + for idx_var in remove_idx_variants: + i.variants.remove(idx_var) + + if len(i.variants) == 0: + remove_idx_data.append(idx) + + if len(remove_idx_data) > 0: + for idx_data in remove_idx_data: + mesh.gltf2_variant_mesh_data.remove(idx_data) + + return {'FINISHED'} + + +# Operator to display a variant +class SCENE_OT_gltf2_display_variant(bpy.types.Operator): + bl_idname = "scene.gltf2_display_variant" + bl_label = "Display Variant" + bl_options = {'REGISTER'} + + @classmethod + def poll(self, context): + return len(bpy.data.scenes[0].gltf2_KHR_materials_variants_variants) > 0 + + def execute(self, context): + + gltf2_active_variant = bpy.data.scenes[0].gltf2_active_variant + + # loop on all mesh + for obj in [o for o in bpy.data.objects if o.type == "MESH"]: + mesh = obj.data + for i in mesh.gltf2_variant_mesh_data: + if i.variants and gltf2_active_variant in [v.variant.variant_idx for v in i.variants]: + mat = i.material + slot = i.material_slot_index + if slot < len(obj.material_slots): # Seems user remove some slots... + obj.material_slots[slot].material = mat + + return {'FINISHED'} + +# Operator to assign current mesh materials to a variant + + +class SCENE_OT_gltf2_assign_to_variant(bpy.types.Operator): + bl_idname = "scene.gltf2_assign_to_variant" + bl_label = "Assign To Variant" + bl_options = {'REGISTER'} + + @classmethod + def poll(self, context): + return len(bpy.data.scenes[0].gltf2_KHR_materials_variants_variants) > 0 \ + and bpy.context.object and bpy.context.object.type == "MESH" + + def execute(self, context): + gltf2_active_variant = bpy.data.scenes[0].gltf2_active_variant + obj = bpy.context.object + + # loop on material slots ( primitives ) + for mat_slot_idx, s in enumerate(obj.material_slots): + # Check if there is already data for this slot + found = False + for i in obj.data.gltf2_variant_mesh_data: + if i.material_slot_index == mat_slot_idx and i.material == s.material: + found = True + variant_primitive = i + + if found is False: + variant_primitive = obj.data.gltf2_variant_mesh_data.add() + variant_primitive.material_slot_index = mat_slot_idx + variant_primitive.material = s.material + + vari = variant_primitive.variants.add() + vari.variant.variant_idx = bpy.data.scenes[0].gltf2_active_variant + + return {'FINISHED'} + +# Operator to reset mesh to original (using default material when exists) + + +class SCENE_OT_gltf2_reset_to_original(bpy.types.Operator): + bl_idname = "scene.gltf2_reset_to_original" + bl_label = "Reset to Original" + bl_options = {'REGISTER'} + + @classmethod + def poll(self, context): + return bpy.context.object and bpy.context.object.type == "MESH" and len( + context.object.data.gltf2_variant_default_materials) > 0 + + def execute(self, context): + obj = bpy.context.object + + # loop on material slots ( primitives ) + for mat_slot_idx, s in enumerate(obj.material_slots): + # Check if there is a default material for this slot + found = False + for i in obj.data.gltf2_variant_default_materials: + if i.material_slot_index == mat_slot_idx: + s.material = i.default_material + break + + return {'FINISHED'} + +# Operator to assign current materials as default materials + + +class SCENE_OT_gltf2_assign_as_original(bpy.types.Operator): + bl_idname = "scene.gltf2_assign_as_original" + bl_label = "Assign as Original" + bl_options = {'REGISTER'} + + @classmethod + def poll(self, context): + return bpy.context.object and bpy.context.object.type == "MESH" + + def execute(self, context): + obj = bpy.context.object + + # loop on material slots ( primitives ) + for mat_slot_idx, s in enumerate(obj.material_slots): + # Check if there is a default material for this slot + found = False + for i in obj.data.gltf2_variant_default_materials: + if i.material_slot_index == mat_slot_idx: + found = True + # Update if needed + i.default_material = s.material + break + + if found is False: + default_mat = obj.data.gltf2_variant_default_materials.add() + default_mat.material_slot_index = mat_slot_idx + default_mat.default_material = s.material + + return {'FINISHED'} + +# Mesh Panel + + +class gltf2_KHR_materials_variant_pointer(bpy.types.PropertyGroup): + variant: bpy.props.PointerProperty(type=gltf2_KHR_materials_variants_variant) + + +class gltf2_KHR_materials_variants_default_material(bpy.types.PropertyGroup): + material_slot_index: bpy.props.IntProperty(name="Material Slot Index") + default_material: bpy.props.PointerProperty(type=bpy.types.Material) + + +class gltf2_KHR_materials_variants_primitive(bpy.types.PropertyGroup): + material_slot_index: bpy.props.IntProperty(name="Material Slot Index") + material: bpy.props.PointerProperty(type=bpy.types.Material) + variants: bpy.props.CollectionProperty(type=gltf2_KHR_materials_variant_pointer) + active_variant_idx: bpy.props.IntProperty() + + +class MESH_UL_gltf2_mesh_variants(bpy.types.UIList): + def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index): + + vari = item.variant + layout.context_pointer_set("id", vari) + + if self.layout_type in {'DEFAULT', 'COMPACT'}: + layout.prop(bpy.data.scenes[0].gltf2_KHR_materials_variants_variants[vari.variant_idx], + "name", text="", emboss=False) + elif self.layout_type in {'GRID'}: + layout.alignment = 'CENTER' + + +class MESH_PT_gltf2_mesh_variants(bpy.types.Panel): + bl_label = "glTF Material Variants" + bl_space_type = 'PROPERTIES' + bl_region_type = 'WINDOW' + bl_context = "material" + + @classmethod + def poll(self, context): + return bpy.context.preferences.addons['io_scene_gltf2'].preferences.KHR_materials_variants_ui is True \ + and len(bpy.context.object.material_slots) > 0 + + def draw(self, context): + layout = self.layout + + active_material_slots = bpy.context.object.active_material_index + + found = False + if 'gltf2_variant_mesh_data' in bpy.context.object.data.keys(): + for idx, prim in enumerate(bpy.context.object.data.gltf2_variant_mesh_data): + if prim.material_slot_index == active_material_slots and id(prim.material) == id( + bpy.context.object.material_slots[active_material_slots].material): + found = True + break + + row = layout.row() + if found is True: + row.template_list("MESH_UL_gltf2_mesh_variants", "", prim, "variants", prim, "active_variant_idx") + col = row.column() + row = col.column(align=True) + row.operator("scene.gltf2_variants_slot_add", icon="ADD", text="") + row.operator("scene.gltf2_remove_material_variant", icon="REMOVE", text="") + + row = layout.row() + if 'gltf2_KHR_materials_variants_variants' in bpy.data.scenes[0].keys() and len( + bpy.data.scenes[0].gltf2_KHR_materials_variants_variants) > 0: + row.prop_search( + context.object.data, + "gltf2_variant_pointer", + bpy.data.scenes[0], + "gltf2_KHR_materials_variants_variants", + text="Variant") + row = layout.row() + row.operator("scene.gltf2_material_to_variant", text="Assign To Variant") + else: + row.label(text="Please Create a Variant First") + else: + if 'gltf2_KHR_materials_variants_variants' in bpy.data.scenes[0].keys() and len( + bpy.data.scenes[0].gltf2_KHR_materials_variants_variants) > 0: + row.operator("scene.gltf2_variants_slot_add", text="Add a new Variant Slot") + else: + row.label(text="Please Create a Variant First") + + +class SCENE_OT_gltf2_variant_slot_add(bpy.types.Operator): + """Add a new Slot""" + bl_idname = "scene.gltf2_variants_slot_add" + bl_label = "Add new Slot" + bl_options = {'REGISTER'} + + @classmethod + def poll(self, context): + return len(bpy.context.object.material_slots) > 0 + + def execute(self, context): + mesh = context.object.data + # Check if there is already a data for this slot_idx + material + + found = False + for i in mesh.gltf2_variant_mesh_data: + if i.material_slot_index == context.object.active_material_index and i.material == context.object.material_slots[ + context.object.active_material_index].material: + found = True + variant_primitive = i + + if found is False: + variant_primitive = mesh.gltf2_variant_mesh_data.add() + variant_primitive.material_slot_index = context.object.active_material_index + variant_primitive.material = context.object.material_slots[context.object.active_material_index].material + + vari = variant_primitive.variants.add() + vari.variant.variant_idx = bpy.data.scenes[0].gltf2_active_variant + + return {'FINISHED'} + + +class SCENE_OT_gltf2_material_to_variant(bpy.types.Operator): + """Assign Variant to Slot""" + bl_idname = "scene.gltf2_material_to_variant" + bl_label = "Assign Material To Variant" + bl_options = {'REGISTER'} + + @classmethod + def poll(self, context): + return len(bpy.context.object.material_slots) > 0 and context.object.data.gltf2_variant_pointer != "" + + def execute(self, context): + mesh = context.object.data + + found = False + for i in mesh.gltf2_variant_mesh_data: + if i.material_slot_index == context.object.active_material_index and i.material == context.object.material_slots[ + context.object.active_material_index].material: + found = True + variant_primitive = i + + if found is False: + return {'CANCELLED'} + + vari = variant_primitive.variants[variant_primitive.active_variant_idx] + + # Retrieve variant idx + found = False + for v in bpy.data.scenes[0].gltf2_KHR_materials_variants_variants: + if v.name == context.object.data.gltf2_variant_pointer: + found = True + break + + if found is False: + return {'CANCELLED'} + + vari.variant.variant_idx = v.variant_idx + + return {'FINISHED'} + + +class SCENE_OT_gltf2_remove_material_variant(bpy.types.Operator): + """Remove a variant Slot""" + bl_idname = "scene.gltf2_remove_material_variant" + bl_label = "Remove a variant Slot" + bl_options = {'REGISTER'} + + @classmethod + def poll(self, context): + return len(bpy.context.object.material_slots) > 0 and len(bpy.context.object.data.gltf2_variant_mesh_data) > 0 + + def execute(self, context): + mesh = context.object.data + + found = False + found_idx = -1 + for idx, i in enumerate(mesh.gltf2_variant_mesh_data): + if i.material_slot_index == context.object.active_material_index and i.material == context.object.material_slots[ + context.object.active_material_index].material: + found = True + variant_primitive = i + found_idx = idx + + if found is False: + return {'CANCELLED'} + + variant_primitive.variants.remove(variant_primitive.active_variant_idx) + + if len(variant_primitive.variants) == 0: + mesh.gltf2_variant_mesh_data.remove(found_idx) + + return {'FINISHED'} + + +################ glTF Animation ########################################### + +class gltf2_animation_NLATrackNames(bpy.types.PropertyGroup): + name: bpy.props.StringProperty(name="NLA Track Name") + + +class SCENE_UL_gltf2_animation_track(bpy.types.UIList): + def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index): + + if self.layout_type in {'DEFAULT', 'COMPACT'}: + row = layout.row() + icon = 'SOLO_ON' if index == bpy.data.scenes[0].gltf2_animation_applied else 'SOLO_OFF' + row.prop(item, "name", text="", emboss=False) + op = row.operator("scene.gltf2_animation_apply", text='', icon=icon) + op.index = index + + elif self.layout_type in {'GRID'}: + layout.alignment = 'CENTER' + + +class SCENE_OT_gltf2_animation_apply(bpy.types.Operator): + """Apply glTF animations""" + bl_idname = "scene.gltf2_animation_apply" + bl_label = "Apply glTF animation" + bl_options = {'REGISTER'} + + index: bpy.props.IntProperty() + + @classmethod + def poll(self, context): + return True + + def execute(self, context): + + track_name = bpy.data.scenes[0].gltf2_animation_tracks[self.index].name + + # remove all actions from objects + for obj in bpy.context.scene.objects: + if obj.animation_data: + obj.animation_data.action = None + obj.matrix_world = obj.gltf2_animation_rest + + for track in [track for track in obj.animation_data.nla_tracks if track.name == + track_name and len(track.strips) > 0 and track.strips[0].action is not None]: + obj.animation_data.action = track.strips[0].action + + if obj.type == "MESH" and obj.data and obj.data.shape_keys and obj.data.shape_keys.animation_data: + obj.data.shape_keys.animation_data.action = None + for idx, data in enumerate(obj.gltf2_animation_weight_rest): + obj.data.shape_keys.key_blocks[idx + 1].value = data.val + + for track in [track for track in obj.data.shape_keys.animation_data.nla_tracks if track.name == + track_name and len(track.strips) > 0 and track.strips[0].action is not None]: + obj.data.shape_keys.animation_data.action = track.strips[0].action + + if obj.type in ["LIGHT", "CAMERA"] and obj.data and obj.data.animation_data: + obj.data.animation_data.action = None + for track in [track for track in obj.data.animation_data.nla_tracks if track.name == + track_name and len(track.strips) > 0 and track.strips[0].action is not None]: + obj.data.animation_data.action = track.strips[0].action + + for mat in bpy.data.materials: + if not mat.node_tree: + continue + if mat.node_tree.animation_data: + mat.node_tree.animation_data.action = None + for track in [track for track in mat.node_tree.animation_data.nla_tracks if track.name == + track_name and len(track.strips) > 0 and track.strips[0].action is not None]: + mat.node_tree.animation_data.action = track.strips[0].action + + bpy.data.scenes[0].gltf2_animation_applied = self.index + return {'FINISHED'} + + +class SCENE_PT_gltf2_animation(bpy.types.Panel): + bl_label = "glTF Animations" + bl_space_type = 'DOPESHEET_EDITOR' + bl_region_type = 'UI' + bl_category = "glTF" + + @classmethod + def poll(self, context): + return bpy.context.preferences.addons['io_scene_gltf2'].preferences.animation_ui is True + + def draw(self, context): + layout = self.layout + row = layout.row() + + if len(bpy.data.scenes[0].gltf2_animation_tracks) > 0: + row.template_list( + "SCENE_UL_gltf2_animation_track", + "", + bpy.data.scenes[0], + "gltf2_animation_tracks", + bpy.data.scenes[0], + "gltf2_animation_active") + else: + row.label(text="No glTF Animation") + + +class GLTF2_weight(bpy.types.PropertyGroup): + val: bpy.props.FloatProperty(name="weight") + +################################### Filtering animation #################### + + +class SCENE_OT_gltf2_action_filter_refresh(bpy.types.Operator): + """Refresh list of actions""" + bl_idname = "scene.gltf2_action_filter_refresh" + bl_label = "Refresh action list" + bl_options = {'REGISTER'} + + @classmethod + def poll(self, context): + return True + + def execute(self, context): + for action in bpy.data.actions: + if id(action) in [id(i.action) for i in bpy.data.scenes[0].gltf_action_filter]: + continue + item = bpy.data.scenes[0].gltf_action_filter.add() + item.action = action + item.keep = True + + return {'FINISHED'} + + +class SCENE_UL_gltf2_filter_action(bpy.types.UIList): + def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index): + + action = item.action + layout.context_pointer_set("id", action) + + if self.layout_type in {'DEFAULT', 'COMPACT'}: + layout.prop(item.action, "name", text="", emboss=False) + layout.prop(item, "keep", text="", emboss=True) + + elif self.layout_type in {'GRID'}: + layout.alignment = 'CENTER' + + +def export_panel_animation_action_filter(layout, operator): + if operator.export_animation_mode not in ["ACTIONS", "ACTIVE_ACTIONS", "BROADCAST"]: + return + + header, body = layout.panel("GLTF_export_action_filter", default_closed=True) + header.use_property_split = False + header.prop(operator, "export_action_filter", text="") + header.label(text="Action Filter") + if body and operator.export_action_filter: + body.active = operator.export_animations and operator.export_action_filter + + row = body.row() + + if len(bpy.data.actions) > 0: + row.template_list( + "SCENE_UL_gltf2_filter_action", + "", + bpy.data.scenes[0], + "gltf_action_filter", + bpy.data.scenes[0], + "gltf_action_filter_active") + col = row.column() + row = col.column(align=True) + row.operator("scene.gltf2_action_filter_refresh", icon="FILE_REFRESH", text="") + else: + row.label(text="No Actions in .blend file") + +############################################################################### + + +def register(): + bpy.utils.register_class(NODE_OT_GLTF_SETTINGS) + bpy.types.NODE_MT_category_shader_output.append(add_gltf_settings_to_menu) + bpy.utils.register_class(SCENE_OT_gltf2_action_filter_refresh) + bpy.utils.register_class(SCENE_UL_gltf2_filter_action) + + +def variant_register(): + bpy.utils.register_class(SCENE_OT_gltf2_display_variant) + bpy.utils.register_class(SCENE_OT_gltf2_assign_to_variant) + bpy.utils.register_class(SCENE_OT_gltf2_reset_to_original) + bpy.utils.register_class(SCENE_OT_gltf2_assign_as_original) + bpy.utils.register_class(SCENE_OT_gltf2_remove_material_variant) + bpy.utils.register_class(gltf2_KHR_materials_variants_variant) + bpy.utils.register_class(gltf2_KHR_materials_variant_pointer) + bpy.utils.register_class(gltf2_KHR_materials_variants_primitive) + bpy.utils.register_class(gltf2_KHR_materials_variants_default_material) + bpy.utils.register_class(SCENE_UL_gltf2_variants) + bpy.utils.register_class(SCENE_PT_gltf2_variants) + bpy.utils.register_class(MESH_UL_gltf2_mesh_variants) + bpy.utils.register_class(MESH_PT_gltf2_mesh_variants) + bpy.utils.register_class(SCENE_OT_gltf2_variant_add) + bpy.utils.register_class(SCENE_OT_gltf2_variant_remove) + bpy.utils.register_class(SCENE_OT_gltf2_material_to_variant) + bpy.utils.register_class(SCENE_OT_gltf2_variant_slot_add) + bpy.types.Mesh.gltf2_variant_mesh_data = bpy.props.CollectionProperty(type=gltf2_KHR_materials_variants_primitive) + bpy.types.Mesh.gltf2_variant_default_materials = bpy.props.CollectionProperty( + type=gltf2_KHR_materials_variants_default_material) + bpy.types.Mesh.gltf2_variant_pointer = bpy.props.StringProperty() + bpy.types.Scene.gltf2_KHR_materials_variants_variants = bpy.props.CollectionProperty( + type=gltf2_KHR_materials_variants_variant) + bpy.types.Scene.gltf2_active_variant = bpy.props.IntProperty() + + +def unregister(): + bpy.utils.unregister_class(NODE_OT_GLTF_SETTINGS) + bpy.utils.unregister_class(SCENE_UL_gltf2_filter_action) + bpy.utils.unregister_class(SCENE_OT_gltf2_action_filter_refresh) + + +def variant_unregister(): + bpy.utils.unregister_class(SCENE_OT_gltf2_variant_add) + bpy.utils.unregister_class(SCENE_OT_gltf2_variant_remove) + bpy.utils.unregister_class(SCENE_OT_gltf2_material_to_variant) + bpy.utils.unregister_class(SCENE_OT_gltf2_variant_slot_add) + bpy.utils.unregister_class(SCENE_OT_gltf2_display_variant) + bpy.utils.unregister_class(SCENE_OT_gltf2_assign_to_variant) + bpy.utils.unregister_class(SCENE_OT_gltf2_reset_to_original) + bpy.utils.unregister_class(SCENE_OT_gltf2_assign_as_original) + bpy.utils.unregister_class(SCENE_OT_gltf2_remove_material_variant) + bpy.utils.unregister_class(SCENE_PT_gltf2_variants) + bpy.utils.unregister_class(SCENE_UL_gltf2_variants) + bpy.utils.unregister_class(MESH_PT_gltf2_mesh_variants) + bpy.utils.unregister_class(MESH_UL_gltf2_mesh_variants) + bpy.utils.unregister_class(gltf2_KHR_materials_variants_default_material) + bpy.utils.unregister_class(gltf2_KHR_materials_variants_primitive) + bpy.utils.unregister_class(gltf2_KHR_materials_variants_variant) + bpy.utils.unregister_class(gltf2_KHR_materials_variant_pointer) + + +def anim_ui_register(): + bpy.utils.register_class(GLTF2_weight) + bpy.utils.register_class(SCENE_OT_gltf2_animation_apply) + bpy.utils.register_class(gltf2_animation_NLATrackNames) + bpy.utils.register_class(SCENE_UL_gltf2_animation_track) + bpy.types.Scene.gltf2_animation_tracks = bpy.props.CollectionProperty(type=gltf2_animation_NLATrackNames) + bpy.types.Scene.gltf2_animation_active = bpy.props.IntProperty() + bpy.types.Scene.gltf2_animation_applied = bpy.props.IntProperty() + bpy.types.Object.gltf2_animation_rest = bpy.props.FloatVectorProperty(name="Rest", size=[4, 4], subtype="MATRIX") + bpy.types.Object.gltf2_animation_weight_rest = bpy.props.CollectionProperty(type=GLTF2_weight) + bpy.utils.register_class(SCENE_PT_gltf2_animation) + + +def anim_ui_unregister(): + bpy.utils.unregister_class(SCENE_PT_gltf2_animation) + del bpy.types.Scene.gltf2_animation_active + del bpy.types.Scene.gltf2_animation_tracks + del bpy.types.Scene.gltf2_animation_applied + del bpy.types.Object.gltf2_animation_rest + del bpy.types.Object.gltf2_animation_weight_rest + bpy.utils.unregister_class(SCENE_UL_gltf2_animation_track) + bpy.utils.unregister_class(gltf2_animation_NLATrackNames) + bpy.utils.unregister_class(SCENE_OT_gltf2_animation_apply) + bpy.utils.unregister_class(GLTF2_weight) diff --git a/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_utils.py b/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_utils.py new file mode 100644 index 00000000000..7a07f5e0d09 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_utils.py @@ -0,0 +1,70 @@ +# SPDX-FileCopyrightText: 2018-2024 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + + +def fast_structured_np_unique(arr, *args, **kwargs): + """ + np.unique optimized for structured arrays when a sorted result is not required. + + np.unique works through sorting, but sorting a structured array requires as many sorts as there are fields in the + structured dtype. + + By viewing the array as a single non-structured dtype that sorts according to its bytes, unique elements can be + found with a single sort. Since the values are viewed as a different type to their original, this means that the + returned array of unique values may not be sorted according to their original type. + + Float field caveats: + All elements of -0.0 in the input array will be replaced with 0.0 to ensure that both values are collapsed into one. + NaN values can have lots of different byte representations (e.g. signalling/quiet and custom payloads). Only the + duplicates of each unique byte representation will be collapsed into one. + + Nested structured dtypes are not supported. + The behaviour of structured dtypes with overlapping fields is undefined. + """ + structured_dtype = arr.dtype + fields = structured_dtype.fields + if fields is None: + raise RuntimeError('%s is not a structured dtype' % structured_dtype) + + for field_name, (field_dtype, *_offset_and_optional_title) in fields.items(): + if field_dtype.subdtype is not None: + raise RuntimeError('Nested structured types are not supported in %s' % structured_dtype) + if field_dtype.kind == 'f': + # Replace all -0.0 in the array with 0.0 because -0.0 and 0.0 have different byte representations. + arr[field_name][arr[field_name] == -0.0] = 0.0 + elif field_dtype.kind not in "iuUSV": + # Signed integer, unsigned integer, unicode string, byte string (bytes) and raw bytes (void) can be left + # as they are. Everything else is unsupported. + raise RuntimeError('Unsupported structured field type %s for field %s' % (field_dtype, field_name)) + + structured_itemsize = structured_dtype.itemsize + + # Integer types sort the fastest, but are only available for specific itemsizes. + uint_dtypes_by_itemsize = {1: np.uint8, 2: np.uint16, 4: np.uint32, 8: np.uint64} + # Signed/unsigned makes no noticeable speed difference, but using unsigned will result in ordering according to + # individual bytes like the other, non-integer types. + if structured_itemsize in uint_dtypes_by_itemsize: + entire_structure_dtype = uint_dtypes_by_itemsize[structured_itemsize] + else: + # Construct a flexible size dtype with matching itemsize to the entire structured dtype. + # Should always be 4 because each character in a unicode string is UCS4. + str_itemsize = np.dtype((np.str_, 1)).itemsize + if structured_itemsize % str_itemsize == 0: + # Unicode strings seem to be slightly faster to sort than bytes. + entire_structure_dtype = np.dtype((np.str_, structured_itemsize // str_itemsize)) + else: + # Bytes seem to be slightly faster to sort than raw bytes (np.void). + entire_structure_dtype = np.dtype((np.bytes_, structured_itemsize)) + + result = np.unique(arr.view(entire_structure_dtype), *args, **kwargs) + + unique = result[0] if isinstance(result, tuple) else result + # View in the original dtype. + unique = unique.view(arr.dtype) + if isinstance(result, tuple): + return (unique,) + result[1:] + else: + return unique diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_animation.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_animation.py new file mode 100644 index 00000000000..38d50d13f6f --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_animation.py @@ -0,0 +1,55 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from .....io.com import gltf2_io +from .....io.exp.gltf2_io_user_extensions import export_user_extensions +from ....com.gltf2_blender_extras import generate_extras +from .gltf2_blender_gather_fcurves_channels import gather_animation_fcurves_channels + + +def gather_animation_fcurves( + obj_uuid: str, + blender_action: bpy.types.Action, + export_settings +): + + name = __gather_name(blender_action, export_settings) + + channels, to_be_sampled, extra_samplers = __gather_channels_fcurves(obj_uuid, blender_action, export_settings) + + animation = gltf2_io.Animation( + channels=channels, + extensions=None, + extras=__gather_extras(blender_action, export_settings), + name=name, + samplers=[] + ) + + if not animation.channels: + return None, to_be_sampled, extra_samplers + + blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object + export_user_extensions('animation_gather_fcurve', export_settings, blender_object, blender_action) + + return animation, to_be_sampled, extra_samplers + + +def __gather_name(blender_action: bpy.types.Action, + export_settings + ) -> str: + return blender_action.name + + +def __gather_channels_fcurves( + obj_uuid: str, + blender_action: bpy.types.Action, + export_settings): + return gather_animation_fcurves_channels(obj_uuid, blender_action, export_settings) + + +def __gather_extras(blender_action, export_settings): + if export_settings['gltf_extras']: + return generate_extras(blender_action) + return None diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_channel_target.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_channel_target.py new file mode 100644 index 00000000000..6828b9e3f34 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_channel_target.py @@ -0,0 +1,53 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import typing +from .....io.com import gltf2_io +from .....io.exp.gltf2_io_user_extensions import export_user_extensions +from ....com.gltf2_blender_conversion import get_target +from ...gltf2_blender_gather_cache import cached +from ...gltf2_blender_gather_joints import gather_joint_vnode + + +@cached +def gather_fcurve_channel_target( + obj_uuid: str, + channels: typing.Tuple[bpy.types.FCurve], + bone: typing.Optional[str], + export_settings +) -> gltf2_io.AnimationChannelTarget: + + animation_channel_target = gltf2_io.AnimationChannelTarget( + extensions=None, + extras=None, + node=__gather_node(obj_uuid, bone, export_settings), + path=__gather_path(channels, export_settings) + ) + + blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object + export_user_extensions('animation_gather_fcurve_channel_target', export_settings, blender_object, bone) + + return animation_channel_target + + +def __gather_node(obj_uuid: str, + bone: typing.Union[str, None], + export_settings + ) -> gltf2_io.Node: + + if bone is not None: + return gather_joint_vnode(export_settings['vtree'].nodes[obj_uuid].bones[bone], export_settings) + else: + return export_settings['vtree'].nodes[obj_uuid].node + + +def __gather_path(channels: typing.Tuple[bpy.types.FCurve], + export_settings + ) -> str: + + # Note: channels has some None items only for SK if some SK are not animated, so keep a not None channel item + target = [c for c in channels if c is not None][0].data_path.split('.')[-1] + + return get_target(target) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_channels.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_channels.py new file mode 100644 index 00000000000..de4ea6e1a7b --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_channels.py @@ -0,0 +1,377 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import typing +from .....io.exp.gltf2_io_user_extensions import export_user_extensions +from .....blender.com.gltf2_blender_data_path import skip_sk +from .....io.com import gltf2_io +from ....exp.gltf2_blender_gather_cache import cached +from ....com.gltf2_blender_data_path import get_target_object_path, get_target_property_name, get_rotation_modes +from ....com.gltf2_blender_conversion import get_target, get_channel_from_target +from ...gltf2_blender_get import get_object_from_datapath +from .gltf2_blender_gather_fcurves_channel_target import gather_fcurve_channel_target +from .gltf2_blender_gather_fcurves_sampler import gather_animation_fcurves_sampler + + +@cached +def gather_animation_fcurves_channels( + obj_uuid: int, + blender_action: bpy.types.Action, + export_settings +): + + channels_to_perform, to_be_sampled, extra_channels_to_perform = get_channel_groups( + obj_uuid, blender_action, export_settings) + + custom_range = None + if blender_action.use_frame_range: + custom_range = (blender_action.frame_start, blender_action.frame_end) + + channels = [] + extra_samplers = [] + + for chan in [chan for chan in channels_to_perform.values() if len(chan['properties']) != 0]: + for channel_group in chan['properties'].values(): + channel = __gather_animation_fcurve_channel( + chan['obj_uuid'], channel_group, chan['bone'], custom_range, export_settings) + if channel is not None: + channels.append(channel) + + if export_settings['gltf_export_extra_animations']: + for chan in [chan for chan in extra_channels_to_perform.values() if len(chan['properties']) != 0]: + for channel_group_name, channel_group in chan['properties'].items(): + + # No glTF channel here, as we don't have any target + # Trying to retrieve sampler directly + sampler = __gather_sampler(obj_uuid, tuple(channel_group), None, custom_range, True, export_settings) + if sampler is not None: + extra_samplers.append((channel_group_name, sampler, "OBJECT", None)) + + return channels, to_be_sampled, extra_samplers + + +def get_channel_groups(obj_uuid: str, blender_action: bpy.types.Action, export_settings, no_sample_option=False): + # no_sample_option is used when we want to retrieve all SK channels, to be evaluate. + targets = {} + targets_extra = {} + + blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object + + # When mutliple rotation mode detected, keep the currently used + multiple_rotation_mode_detected = {} + + # When both normal and delta are used --> Set to to_be_sampled list + to_be_sampled = [] # (object_uuid , type , prop, optional(bone.name) ) + + for fcurve in blender_action.fcurves: + type_ = None + # In some invalid files, channel hasn't any keyframes ... this channel need to be ignored + if len(fcurve.keyframe_points) == 0: + continue + try: + # example of target_property : location, rotation_quaternion, value + target_property = get_target_property_name(fcurve.data_path) + except: + export_settings['log'].warning( + "Invalid animation fcurve data path on action {}".format( + blender_action.name)) + continue + object_path = get_target_object_path(fcurve.data_path) + + # find the object affected by this action + # object_path : blank for blender_object itself, key_blocks[""] for SK, pose.bones[""] for bones + if not object_path: + if fcurve.data_path.startswith("["): + target = blender_object + type_ = "EXTRA" + else: + target = blender_object + type_ = "OBJECT" + else: + try: + target = get_object_from_datapath(blender_object, object_path) + + if blender_object.type == "ARMATURE" and fcurve.data_path.startswith("pose.bones["): + if target_property is not None: + if get_target(target_property) is not None: + type_ = "BONE" + else: + type_ = "EXTRA" + else: + type_ = "EXTRA" + + else: + type_ = "EXTRA" + if blender_object.type == "MESH" and object_path.startswith("key_blocks"): + shape_key = blender_object.data.shape_keys.path_resolve(object_path) + if skip_sk(blender_object.data.shape_keys.key_blocks, shape_key): + continue + target = blender_object.data.shape_keys + type_ = "SK" + except ValueError as e: + # if the object is a mesh and the action target path can not be resolved, we know that this is a morph + # animation. + if blender_object.type == "MESH": + try: + shape_key = blender_object.data.shape_keys.path_resolve(object_path) + if skip_sk(blender_object.data.shape_keys.key_blocks, shape_key): + continue + target = blender_object.data.shape_keys + type_ = "SK" + except: + # Something is wrong, for example a bone animation is linked to an object mesh... + export_settings['log'].warning( + "Invalid animation fcurve data path on action {}".format( + blender_action.name)) + continue + else: + export_settings['log'].warning("Animation target {} not found".format(object_path)) + continue + + # Detect that object or bone are not multiple keyed for euler and quaternion + # Keep only the current rotation mode used by object + rotation, rotation_modes = get_rotation_modes(target_property) + if rotation and target.rotation_mode not in rotation_modes: + multiple_rotation_mode_detected[target] = True + continue + + if type_ == "EXTRA": + # No group by property, because we are going to export fcurve separately + # We are going to evaluate fcurve, so no check if need to be sampled + if target_property is None: + target_property = fcurve.data_path + if not target_property.startswith("pose.bones["): + target_property = fcurve.data_path + target_data = targets_extra.get(target, {}) + target_data['type'] = type_ + target_data['bone'] = target.name + target_data['obj_uuid'] = obj_uuid + target_properties = target_data.get('properties', {}) + channels = target_properties.get(target_property, []) + channels.append(fcurve) + target_properties[target_property] = channels + target_data['properties'] = target_properties + targets_extra[target] = target_data + continue + + # group channels by target object and affected property of the target + target_data = targets.get(target, {}) + target_data['type'] = type_ + target_data['obj_uuid'] = obj_uuid + target_data['bone'] = target.name if type_ == "BONE" else None + + target_properties = target_data.get('properties', {}) + channels = target_properties.get(target_property, []) + channels.append(fcurve) + target_properties[target_property] = channels + target_data['properties'] = target_properties + targets[target] = target_data + + for targ in multiple_rotation_mode_detected.keys(): + export_settings['log'].warning("Multiple rotation mode detected for {}".format(targ.name)) + + # Now that all curves are extracted, + # - check that there is no normal + delta transforms + # - check that each group can be exported not sampled + # - be sure that shapekeys curves are correctly sorted + + for obj, target_data in targets.items(): + properties = target_data['properties'].keys() + properties = [get_target(prop) for prop in properties] + if len(properties) != len(set(properties)): + new_properties = {} + # There are some transformation + delta transformation + # We can't use fcurve, so the property will be sampled + for prop in target_data['properties'].keys(): + if len([get_target(p) for p in target_data['properties'] if get_target(p) == get_target(prop)]) > 1: + # normal + delta + to_be_sampled.append((obj_uuid, target_data['type'], get_channel_from_target( + get_target(prop)), None)) # None, because no delta exists on Bones + else: + new_properties[prop] = target_data['properties'][prop] + + target_data['properties'] = new_properties + + # Check if the property can be exported without sampling + new_properties = {} + for prop in target_data['properties'].keys(): + if no_sample_option is False and needs_baking( + obj_uuid, target_data['properties'][prop], export_settings) is True: + to_be_sampled.append((obj_uuid, target_data['type'], get_channel_from_target( + get_target(prop)), target_data['bone'])) # bone can be None if not a bone :) + else: + new_properties[prop] = target_data['properties'][prop] + + target_data['properties'] = new_properties + + # Make sure sort is correct for shapekeys + if target_data['type'] == "SK": + for prop in target_data['properties'].keys(): + target_data['properties'][prop] = tuple( + __get_channel_group_sorted( + target_data['properties'][prop], + export_settings['vtree'].nodes[obj_uuid].blender_object)) + else: + for prop in target_data['properties'].keys(): + target_data['properties'][prop] = tuple(target_data['properties'][prop]) + + to_be_sampled = list(set(to_be_sampled)) + + return targets, to_be_sampled, targets_extra + + +def __get_channel_group_sorted(channels: typing.Tuple[bpy.types.FCurve], blender_object: bpy.types.Object): + # if this is shapekey animation, we need to sort in same order than shapekeys + # else, no need to sort + if blender_object.type == "MESH": + first_channel = channels[0] + object_path = get_target_object_path(first_channel.data_path) + if object_path: + if not blender_object.data.shape_keys: + # Something is wrong. Maybe the user assigned an armature action + # to a mesh object. Returning without sorting + return channels + + # This is shapekeys, we need to sort channels + shapekeys_idx = {} + cpt_sk = 0 + for sk in blender_object.data.shape_keys.key_blocks: + if skip_sk(blender_object.data.shape_keys.key_blocks, sk): + continue + shapekeys_idx[sk.name] = cpt_sk + cpt_sk += 1 + + # Note: channels will have some None items only for SK if some SK are not animated + idx_channel_mapping = [] + all_sorted_channels = [] + for sk_c in channels: + try: + sk_name = blender_object.data.shape_keys.path_resolve(get_target_object_path(sk_c.data_path)).name + idx = shapekeys_idx[sk_name] + idx_channel_mapping.append((shapekeys_idx[sk_name], sk_c)) + except: + # Something is wrong. For example, an armature action linked to a mesh object + continue + + existing_idx = dict(idx_channel_mapping) + for i in range(0, cpt_sk): + if i not in existing_idx.keys(): + all_sorted_channels.append(None) + else: + all_sorted_channels.append(existing_idx[i]) + + if all([i is None for i in all_sorted_channels]): # all channel in error, and some non keyed SK + return channels # This happen when an armature action is linked to a mesh object with non keyed SK + + return tuple(all_sorted_channels) + + # if not shapekeys, stay in same order, because order doesn't matter + return channels + + +def __gather_animation_fcurve_channel(obj_uuid: str, + channel_group: typing.Tuple[bpy.types.FCurve], + bone: typing.Optional[str], + custom_range: typing.Optional[set], + export_settings + ) -> typing.Union[gltf2_io.AnimationChannel, None]: + + __target = __gather_target(obj_uuid, channel_group, bone, export_settings) + if __target.path is not None: + sampler = __gather_sampler(obj_uuid, channel_group, bone, custom_range, False, export_settings) + + if sampler is None: + # After check, no need to animate this node for this channel + return None + + animation_channel = gltf2_io.AnimationChannel( + extensions=None, + extras=None, + sampler=sampler, + target=__target + ) + + blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object + export_user_extensions('animation_gather_fcurve_channel_target', export_settings, blender_object, bone) + + return animation_channel + return None + + +def __gather_target(obj_uuid: str, + channel_group: typing.Tuple[bpy.types.FCurve], + bone: typing.Optional[str], + export_settings + ) -> gltf2_io.AnimationChannelTarget: + + return gather_fcurve_channel_target(obj_uuid, channel_group, bone, export_settings) + + +def __gather_sampler(obj_uuid: str, + channel_group: typing.Tuple[bpy.types.FCurve], + bone: typing.Optional[str], + custom_range: typing.Optional[set], + extra_mode: bool, + export_settings) -> gltf2_io.AnimationSampler: + + return gather_animation_fcurves_sampler(obj_uuid, channel_group, bone, custom_range, extra_mode, export_settings) + + +def needs_baking(obj_uuid: str, + channels: typing.Tuple[bpy.types.FCurve], + export_settings + ) -> bool: + """ + Check if baking is needed. + + Some blender animations need to be baked as they can not directly be expressed in glTF. + """ + def all_equal(lst): + return lst[1:] == lst[:-1] + + # Note: channels has some None items only for SK if some SK are not animated + # Sampling due to unsupported interpolation + interpolation = [c for c in channels if c is not None][0].keyframe_points[0].interpolation + if interpolation not in ["BEZIER", "LINEAR", "CONSTANT"]: + export_settings['log'].warning( + "Baking animation because of an unsupported interpolation method: {}".format(interpolation) + ) + return True + + if any(any(k.interpolation != interpolation for k in c.keyframe_points) for c in channels if c is not None): + # There are different interpolation methods in one action group + export_settings['log'].warning( + "Baking animation because there are keyframes with different " + "interpolation methods in one channel" + ) + return True + + if not all_equal([len(c.keyframe_points) for c in channels if c is not None]): + export_settings['log'].warning( + "Baking animation because the number of keyframes is not " + "equal for all channel tracks" + ) + return True + + if len([c for c in channels if c is not None][0].keyframe_points) <= 1: + # we need to bake to 'STEP', as at least two keyframes are required to interpolate + return True + + if not all_equal(list(zip([[k.co[0] for k in c.keyframe_points] for c in channels if c is not None]))): + # The channels have differently located keyframes + export_settings['log'].warning("Baking animation because of differently located keyframes in one channel") + return True + + if export_settings['vtree'].nodes[obj_uuid].blender_object.type == "ARMATURE": + animation_target = get_object_from_datapath( + export_settings['vtree'].nodes[obj_uuid].blender_object, [ + c for c in channels if c is not None][0].data_path) + if isinstance(animation_target, bpy.types.PoseBone): + if len(animation_target.constraints) != 0: + # Constraints such as IK act on the bone -> can not be represented in glTF atm + export_settings['log'].warning("Baking animation because of unsupported constraints acting on the bone") + return True + + return False diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_keyframes.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_keyframes.py new file mode 100644 index 00000000000..64417d4f093 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_keyframes.py @@ -0,0 +1,208 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import typing +from .....blender.com.gltf2_blender_data_path import get_sk_exported +from ....com.gltf2_blender_data_path import get_target_object_path +from ...gltf2_blender_gather_cache import cached +from ..gltf2_blender_gather_keyframes import Keyframe + + +@cached +def gather_fcurve_keyframes( + obj_uuid: str, + channel_group: typing.Tuple[bpy.types.FCurve], + bone: typing.Optional[str], + custom_range: typing.Optional[set], + extra_mode: bool, + export_settings): + + keyframes = [] + + non_keyed_values = gather_non_keyed_values(obj_uuid, channel_group, bone, extra_mode, export_settings) + + # Just use the keyframes as they are specified in blender + # Note: channels has some None items only for SK if some SK are not animated + frames = [keyframe.co[0] for keyframe in [c for c in channel_group if c is not None][0].keyframe_points] + # some weird files have duplicate frame at same time, removed them + frames = sorted(set(frames)) + + if export_settings['gltf_negative_frames'] == "CROP": + frames = [f for f in frames if f >= 0] + + if export_settings['gltf_frame_range'] is True: + frames = [f for f in frames if f >= bpy.context.scene.frame_start and f <= bpy.context.scene.frame_end] + + if custom_range is not None: + frames = [f for f in frames if f >= custom_range[0] and f <= custom_range[1]] + + if len(frames) == 0: + return None + + for i, frame in enumerate(frames): + key = Keyframe(channel_group, frame, None) + key.value = [c.evaluate(frame) for c in channel_group if c is not None] + # Complete key with non keyed values, if needed + if len([c for c in channel_group if c is not None]) != key.get_target_len(): + complete_key(key, non_keyed_values) + + # compute tangents for cubic spline interpolation + if [c for c in channel_group if c is not None][0].keyframe_points[0].interpolation == "BEZIER": + # Construct the in tangent + if frame == frames[0]: + # start in-tangent should become all zero + key.set_first_tangent() + else: + # otherwise construct an in tangent coordinate from the keyframes control points. We intermediately + # use a point at t+1 to define the tangent. This allows the tangent control point to be transformed + # normally, but only works for locally linear transformation. The more non-linear a transform, the + # more imprecise this method is. + # We could use any other (v1, t1) for which (v1 - v0) / (t1 - t0) equals the tangent. By using t+1 + # for both in and out tangents, we guarantee that (even if there are errors or numerical imprecisions) + # symmetrical control points translate to symmetrical tangents. + # Note: I am not sure that linearity is never broken with quaternions and their normalization. + # Especially at sign swap it might occur that the value gets negated but the control point not. + # I have however not once encountered an issue with this. + key.in_tangent = [c.keyframe_points[i].co[1] + + (c.keyframe_points[i].handle_left[1] - + c.keyframe_points[i].co[1]) / + (c.keyframe_points[i].handle_left[0] - + c.keyframe_points[i].co[0]) for c in channel_group if c is not None] + # Construct the out tangent + if frame == frames[-1]: + # end out-tangent should become all zero + key.set_last_tangent() + else: + # otherwise construct an in tangent coordinate from the keyframes control points. + # This happens the same way how in tangents are handled above. + key.out_tangent = [c.keyframe_points[i].co[1] + + (c.keyframe_points[i].handle_right[1] - + c.keyframe_points[i].co[1]) / + (c.keyframe_points[i].handle_right[0] - + c.keyframe_points[i].co[0]) for c in channel_group if c is not None] + + __complete_key_tangents(key, non_keyed_values) + + keyframes.append(key) + + return keyframes + + +def gather_non_keyed_values( + obj_uuid: str, + channel_group: typing.Tuple[bpy.types.FCurve], + bone: typing.Optional[str], + extra_mode: bool, + export_settings +) -> typing.Tuple[typing.Optional[float]]: + + if extra_mode is True: + # No need to check if there are non non keyed values, as we export fcurve independently + return [None] + + blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object + + non_keyed_values = [] + + # Note: channels has some None items only for SK if some SK are not animated + if None not in channel_group: + # classic case for object TRS or bone TRS + # Or if all morph target are animated + + target = channel_group[0].data_path.split('.')[-1] + if target == "value": + # All morph targets are animated + return tuple([None] * len(channel_group)) + + indices = [c.array_index for c in channel_group] + indices.sort() + length = { + "delta_location": 3, + "delta_rotation_euler": 3, + "delta_rotation_quaternion": 4, + "delta_scale": 3, + "location": 3, + "rotation_axis_angle": 4, + "rotation_euler": 3, + "rotation_quaternion": 4, + "scale": 3, + "value": len(channel_group) + }.get(target) + + if length is None: + # This is not a known target + return () + + for i in range(0, length): + if i in indices: + non_keyed_values.append(None) + else: + if bone is None: + non_keyed_values.append({ + "delta_location": blender_object.delta_location, + "delta_rotation_euler": blender_object.delta_rotation_euler, + "delta_rotation_quaternion": blender_object.delta_rotation_quaternion, + "delta_scale": blender_object.delta_scale, + "location": blender_object.location, + "rotation_axis_angle": blender_object.rotation_axis_angle, + "rotation_euler": blender_object.rotation_euler, + "rotation_quaternion": blender_object.rotation_quaternion, + "scale": blender_object.scale + }[target][i]) + else: + # TODO, this is not working if the action is not active (NLA case for example) ? + trans, rot, scale = blender_object.pose.bones[bone].matrix_basis.decompose() + non_keyed_values.append({ + "location": trans, + "rotation_axis_angle": rot, + "rotation_euler": rot, + "rotation_quaternion": rot, + "scale": scale + }[target][i]) + + return tuple(non_keyed_values) + + else: + # We are in case of morph target, where all targets are not animated + # So channels has some None items + first_channel = [c for c in channel_group if c is not None][0] + object_path = get_target_object_path(first_channel.data_path) + if object_path: + shapekeys_idx = {} + cpt_sk = 0 + for sk in get_sk_exported(blender_object.data.shape_keys.key_blocks): + shapekeys_idx[cpt_sk] = sk.name + cpt_sk += 1 + + for idx_c, channel in enumerate(channel_group): + if channel is None: + non_keyed_values.append(blender_object.data.shape_keys.key_blocks[shapekeys_idx[idx_c]].value) + else: + non_keyed_values.append(None) + + return tuple(non_keyed_values) + + +def complete_key(key: Keyframe, non_keyed_values: typing.Tuple[typing.Optional[float]]): + """ + Complete keyframe with non keyed values + """ + for i in range(0, key.get_target_len()): + if i in key.get_indices(): + continue # this is a keyed array_index or a SK animated + key.set_value_index(i, non_keyed_values[i]) + + +def __complete_key_tangents(key: Keyframe, non_keyed_values: typing.Tuple[typing.Optional[float]]): + """ + Complete keyframe with non keyed values for tangents + """ + for i in range(0, key.get_target_len()): + if i in key.get_indices(): + continue # this is a keyed array_index or a SK animated + if key.in_tangent is not None: + key.set_value_index_in(i, non_keyed_values[i]) + if key.out_tangent is not None: + key.set_value_index_out(i, non_keyed_values[i]) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_sampler.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_sampler.py new file mode 100644 index 00000000000..bae40a666f9 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_sampler.py @@ -0,0 +1,231 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import typing +import mathutils +from .....io.com import gltf2_io +from .....io.com import gltf2_io_constants +from .....blender.com.gltf2_blender_conversion import get_gltf_interpolation +from .....io.exp import gltf2_io_binary_data +from .....io.exp.gltf2_io_user_extensions import export_user_extensions +from ....com.gltf2_blender_data_path import get_target_property_name +from ....com import gltf2_blender_math +from ...gltf2_blender_gather_cache import cached +from ...gltf2_blender_gather_accessors import gather_accessor +from ...gltf2_blender_gather_tree import VExportNode +from .gltf2_blender_gather_fcurves_keyframes import gather_fcurve_keyframes + + +@cached +def gather_animation_fcurves_sampler( + obj_uuid: str, + channel_group: typing.Tuple[bpy.types.FCurve], + bone: typing.Optional[str], + custom_range: typing.Optional[set], + extra_mode: bool, + export_settings +) -> gltf2_io.AnimationSampler: + + # matrix_parent_inverse needed for fcurves? + + keyframes = __gather_keyframes( + obj_uuid, + channel_group, + bone, + custom_range, + extra_mode, + export_settings) + + if keyframes is None: + # After check, no need to animate this node for this channel + return None + + # Now we are raw input/output, we need to convert to glTF data + input, output = __convert_keyframes(obj_uuid, channel_group, bone, keyframes, extra_mode, export_settings) + + sampler = gltf2_io.AnimationSampler( + extensions=None, + extras=None, + input=input, + interpolation=__gather_interpolation(channel_group, export_settings), + output=output + ) + + blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object + export_user_extensions('animation_gather_fcurve_channel_sampler', export_settings, blender_object, bone) + + return sampler + + +@cached +def __gather_keyframes( + obj_uuid: str, + channel_group: typing.Tuple[bpy.types.FCurve], + bone: typing.Optional[str], + custom_range: typing.Optional[set], + extra_mode: bool, + export_settings +): + + return gather_fcurve_keyframes(obj_uuid, channel_group, bone, custom_range, extra_mode, export_settings) + + +def __convert_keyframes( + obj_uuid: str, + channel_group: typing.Tuple[bpy.types.FCurve], + bone_name: typing.Optional[str], + keyframes, + extra_mode: bool, + export_settings): + + times = [k.seconds for k in keyframes] + input = gather_accessor( + gltf2_io_binary_data.BinaryData.from_list(times, gltf2_io_constants.ComponentType.Float), + gltf2_io_constants.ComponentType.Float, + len(times), + tuple([max(times)]), + tuple([min(times)]), + gltf2_io_constants.DataType.Scalar, + export_settings) + + is_yup = export_settings['gltf_yup'] + + need_rotation_correction = ( + export_settings['gltf_cameras'] and export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.CAMERA) or ( + export_settings['gltf_lights'] and export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.LIGHT) + + target_datapath = [c for c in channel_group if c is not None][0].data_path + + if bone_name is not None: + bone = export_settings['vtree'].nodes[obj_uuid].blender_object.pose.bones[bone_name] + if bone.parent is None: + # bone at root of armature + axis_basis_change = mathutils.Matrix.Identity(4) + if is_yup: + axis_basis_change = mathutils.Matrix( + ((1.0, 0.0, 0.0, 0.0), + (0.0, 0.0, 1.0, 0.0), + (0.0, -1.0, 0.0, 0.0), + (0.0, 0.0, 0.0, 1.0))) + correction_matrix_local = axis_basis_change @ bone.bone.matrix_local + else: + # Bone is not at root of armature + # There are 2 cases : + parent_uuid = export_settings['vtree'].nodes[export_settings['vtree'] + .nodes[obj_uuid].bones[bone_name]].parent_uuid + if parent_uuid is not None and export_settings['vtree'].nodes[parent_uuid].blender_type == VExportNode.BONE: + # export bone is not at root of armature neither + blender_bone_parent = export_settings['vtree'].nodes[parent_uuid].blender_bone + correction_matrix_local = ( + blender_bone_parent.bone.matrix_local.inverted_safe() @ + bone.bone.matrix_local + ) + else: + # exported bone (after filter) is at root of armature + axis_basis_change = mathutils.Matrix.Identity(4) + if is_yup: + axis_basis_change = mathutils.Matrix( + ((1.0, 0.0, 0.0, 0.0), + (0.0, 0.0, 1.0, 0.0), + (0.0, -1.0, 0.0, 0.0), + (0.0, 0.0, 0.0, 1.0))) + correction_matrix_local = axis_basis_change + + transform = correction_matrix_local + + else: + if export_settings['vtree'].nodes[obj_uuid].blender_object.parent is not None: + matrix_parent_inverse = export_settings['vtree'].nodes[obj_uuid].blender_object.matrix_parent_inverse.copy( + ).freeze() + else: + matrix_parent_inverse = mathutils.Matrix.Identity(4).freeze() + transform = matrix_parent_inverse + + values = [] + fps = (bpy.context.scene.render.fps * bpy.context.scene.render.fps_base) + for keyframe in keyframes: + + if extra_mode is True: + # Export as is, without trying to convert + keyframe_value = keyframe.value + if keyframe.in_tangent is not None: + keyframe_value = keyframe.in_tangent + keyframe_value + if keyframe.out_tangent is not None: + keyframe_value = keyframe_value + keyframe.out_tangent + values += keyframe_value + continue + + # Transform the data and build gltf control points + value = gltf2_blender_math.transform(keyframe.value, target_datapath, transform, need_rotation_correction) + if is_yup and bone_name is None: + value = gltf2_blender_math.swizzle_yup(value, target_datapath) + keyframe_value = gltf2_blender_math.mathutils_to_gltf(value) + + if keyframe.in_tangent is not None: + # we can directly transform the tangent as it currently is represented by a control point + in_tangent = gltf2_blender_math.transform( + keyframe.in_tangent, target_datapath, transform, need_rotation_correction) + if is_yup and bone_name is None: + in_tangent = gltf2_blender_math.swizzle_yup(in_tangent, target_datapath) + # the tangent in glTF is relative to the keyframe value and uses seconds + if not isinstance(value, list): + in_tangent = fps * (in_tangent - value) + else: + in_tangent = [fps * (in_tangent[i] - value[i]) for i in range(len(value))] + keyframe_value = gltf2_blender_math.mathutils_to_gltf(in_tangent) + keyframe_value # append + + if keyframe.out_tangent is not None: + # we can directly transform the tangent as it currently is represented by a control point + out_tangent = gltf2_blender_math.transform( + keyframe.out_tangent, target_datapath, transform, need_rotation_correction) + if is_yup and bone_name is None: + out_tangent = gltf2_blender_math.swizzle_yup(out_tangent, target_datapath) + # the tangent in glTF is relative to the keyframe value and uses seconds + if not isinstance(value, list): + out_tangent = fps * (out_tangent - value) + else: + out_tangent = [fps * (out_tangent[i] - value[i]) for i in range(len(value))] + keyframe_value = keyframe_value + gltf2_blender_math.mathutils_to_gltf(out_tangent) # append + + values += keyframe_value + + # store the keyframe data in a binary buffer + component_type = gltf2_io_constants.ComponentType.Float + if get_target_property_name(target_datapath) == "value": + # channels with 'weight' targets must have scalar accessors + data_type = gltf2_io_constants.DataType.Scalar + else: + data_type = gltf2_io_constants.DataType.vec_type_from_num(len(keyframes[0].value)) + + output = gltf2_io.Accessor( + buffer_view=gltf2_io_binary_data.BinaryData.from_list(values, component_type), + byte_offset=None, + component_type=component_type, + count=len(values) // gltf2_io_constants.DataType.num_elements(data_type), + extensions=None, + extras=None, + max=None, + min=None, + name=None, + normalized=None, + sparse=None, + type=data_type + ) + + return input, output + + +def __gather_interpolation( + channel_group: typing.Tuple[bpy.types.FCurve], + export_settings, +) -> str: + + # Note: channels has some None items only for SK if some SK are not animated + # Non-sampled keyframes implies that all keys are of the same type, and that the + # type is supported by glTF (because we checked in needs_baking). + blender_keyframe = [c for c in channel_group if c is not None][0].keyframe_points[0] + + # Select the interpolation method. + return get_gltf_interpolation(blender_keyframe.interpolation) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_action.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_action.py new file mode 100644 index 00000000000..f34a828c950 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_action.py @@ -0,0 +1,740 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import typing +from ....io.com import gltf2_io +from ....io.exp.gltf2_io_user_extensions import export_user_extensions +from ....blender.com.gltf2_blender_conversion import get_gltf_interpolation +from ...com.gltf2_blender_data_path import is_bone_anim_channel +from ...com.gltf2_blender_extras import generate_extras +from ..gltf2_blender_gather_cache import cached +from ..gltf2_blender_gather_tree import VExportNode +from .fcurves.gltf2_blender_gather_fcurves_animation import gather_animation_fcurves +from .sampled.armature.armature_action_sampled import gather_action_armature_sampled +from .sampled.armature.armature_channels import gather_sampled_bone_channel +from .sampled.object.gltf2_blender_gather_object_action_sampled import gather_action_object_sampled +from .sampled.shapekeys.gltf2_blender_gather_sk_action_sampled import gather_action_sk_sampled +from .sampled.object.gltf2_blender_gather_object_channels import gather_object_sampled_channels, gather_sampled_object_channel +from .sampled.shapekeys.gltf2_blender_gather_sk_channels import gather_sampled_sk_channel +from .gltf2_blender_gather_drivers import get_sk_drivers +from .gltf2_blender_gather_animation_utils import reset_bone_matrix, reset_sk_data, link_samplers, add_slide_data, merge_tracks_perform, bake_animation + + +def gather_actions_animations(export_settings): + + prepare_actions_range(export_settings) + + animations = [] + merged_tracks = {} + + vtree = export_settings['vtree'] + for obj_uuid in vtree.get_all_objects(): + + # Do not manage not exported objects + if vtree.nodes[obj_uuid].node is None: + if export_settings["gltf_armature_object_remove"] is True: + # Manage armature object, as this is the object that has the animation + if not vtree.nodes[obj_uuid].blender_object: + continue + else: + continue + + if export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.COLLECTION: + continue + + animations_, merged_tracks = gather_action_animations(obj_uuid, merged_tracks, len(animations), export_settings) + animations += animations_ + + if export_settings['gltf_animation_mode'] == "ACTIVE_ACTIONS": + # Fake an animation with all animations of the scene + merged_tracks = {} + merged_tracks_name = 'Animation' + if(len(export_settings['gltf_nla_strips_merged_animation_name']) > 0): + merged_tracks_name = export_settings['gltf_nla_strips_merged_animation_name'] + merged_tracks[merged_tracks_name] = [] + for idx, animation in enumerate(animations): + merged_tracks[merged_tracks_name].append(idx) + + new_animations = merge_tracks_perform(merged_tracks, animations, export_settings) + + return new_animations + + +def prepare_actions_range(export_settings): + + track_slide = {} + + vtree = export_settings['vtree'] + for obj_uuid in vtree.get_all_objects(): + + if vtree.nodes[obj_uuid].blender_type == VExportNode.COLLECTION: + continue + + # Do not manage not exported objects + if vtree.nodes[obj_uuid].node is None: + if export_settings["gltf_armature_object_remove"] is True: + # Manage armature object, as this is the object that has the animation + if not vtree.nodes[obj_uuid].blender_object: + continue + else: + continue + + if obj_uuid not in export_settings['ranges']: + export_settings['ranges'][obj_uuid] = {} + + blender_actions = __get_blender_actions(obj_uuid, export_settings) + for blender_action, track, type_ in blender_actions: + + # What about frame_range bug for single keyframe animations ? 107030 + start_frame = int(blender_action.frame_range[0]) + end_frame = int(blender_action.frame_range[1]) + + if end_frame - start_frame == 1: + # To workaround Blender bug 107030, check manually + try: # Avoid crash in case of strange/buggy fcurves + start_frame = int(min([c.range()[0] for c in blender_action.fcurves])) + end_frame = int(max([c.range()[1] for c in blender_action.fcurves])) + except: + pass + + export_settings['ranges'][obj_uuid][blender_action.name] = {} + + # If some negative frame and crop -> set start at 0 + if start_frame < 0 and export_settings['gltf_negative_frames'] == "CROP": + start_frame = 0 + + if export_settings['gltf_frame_range'] is True: + start_frame = max(bpy.context.scene.frame_start, start_frame) + end_frame = min(bpy.context.scene.frame_end, end_frame) + + export_settings['ranges'][obj_uuid][blender_action.name]['start'] = start_frame + export_settings['ranges'][obj_uuid][blender_action.name]['end'] = end_frame + + if export_settings['gltf_negative_frames'] == "SLIDE": + if track is not None: + if not (track.startswith("NlaTrack") or track.startswith("[Action Stash]")): + if track not in track_slide.keys() or ( + track in track_slide.keys() and start_frame < track_slide[track]): + track_slide.update({track: start_frame}) + else: + if start_frame < 0: + add_slide_data(start_frame, obj_uuid, blender_action.name, export_settings) + else: + if export_settings['gltf_animation_mode'] == "ACTIVE_ACTIONS": + if None not in track_slide.keys() or ( + None in track_slide.keys() and start_frame < track_slide[None]): + track_slide.update({None: start_frame}) + else: + if start_frame < 0: + add_slide_data(start_frame, obj_uuid, blender_action.name, export_settings) + + if export_settings['gltf_anim_slide_to_zero'] is True and start_frame > 0: + if track is not None: + if not (track.startswith("NlaTrack") or track.startswith("[Action Stash]")): + if track not in track_slide.keys() or ( + track in track_slide.keys() and start_frame < track_slide[track]): + track_slide.update({track: start_frame}) + else: + add_slide_data(start_frame, obj_uuid, blender_action.name, export_settings) + else: + if export_settings['gltf_animation_mode'] == "ACTIVE_ACTIONS": + if None not in track_slide.keys() or ( + None in track_slide.keys() and start_frame < track_slide[None]): + track_slide.update({None: start_frame}) + else: + add_slide_data(start_frame, obj_uuid, blender_action.name, export_settings) + + if type_ == "SHAPEKEY" and export_settings['gltf_bake_animation']: + export_settings['ranges'][obj_uuid][obj_uuid] = {} + export_settings['ranges'][obj_uuid][obj_uuid]['start'] = bpy.context.scene.frame_start + export_settings['ranges'][obj_uuid][obj_uuid]['end'] = bpy.context.scene.frame_end + + # For baking drivers + if export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.ARMATURE and export_settings['gltf_morph_anim'] is True: + obj_drivers = get_sk_drivers(obj_uuid, export_settings) + for obj_dr in obj_drivers: + if obj_dr not in export_settings['ranges']: + export_settings['ranges'][obj_dr] = {} + export_settings['ranges'][obj_dr][obj_uuid + "_" + blender_action.name] = {} + export_settings['ranges'][obj_dr][obj_uuid + "_" + blender_action.name]['start'] = start_frame + export_settings['ranges'][obj_dr][obj_uuid + "_" + blender_action.name]['end'] = end_frame + + if len(blender_actions) == 0 and export_settings['gltf_bake_animation']: + # No animation on this object + # In case of baking animation, we will use scene frame range + # Will be calculated later if max range. Can be set here if scene frame range + export_settings['ranges'][obj_uuid][obj_uuid] = {} + export_settings['ranges'][obj_uuid][obj_uuid]['start'] = bpy.context.scene.frame_start + export_settings['ranges'][obj_uuid][obj_uuid]['end'] = bpy.context.scene.frame_end + + # For baking drivers + if export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.ARMATURE and export_settings['gltf_morph_anim'] is True: + obj_drivers = get_sk_drivers(obj_uuid, export_settings) + for obj_dr in obj_drivers: + if obj_dr not in export_settings['ranges']: + export_settings['ranges'][obj_dr] = {} + export_settings['ranges'][obj_dr][obj_uuid + "_" + obj_uuid] = {} + export_settings['ranges'][obj_dr][obj_uuid + "_" + + obj_uuid]['start'] = bpy.context.scene.frame_start + export_settings['ranges'][obj_dr][obj_uuid + "_" + obj_uuid]['end'] = bpy.context.scene.frame_end + + if (export_settings['gltf_negative_frames'] == "SLIDE" + or export_settings['gltf_anim_slide_to_zero'] is True) \ + and len(track_slide) > 0: + # Need to store animation slides + for obj_uuid in vtree.get_all_objects(): + + # Do not manage not exported objects + if vtree.nodes[obj_uuid].node is None: + if export_settings['gltf_armature_object_remove'] is True: + # Manage armature object, as this is the object that has the animation + if not vtree.nodes[obj_uuid].blender_object: + continue + else: + continue + + blender_actions = __get_blender_actions(obj_uuid, export_settings) + for blender_action, track, type_ in blender_actions: + if track in track_slide.keys(): + if export_settings['gltf_negative_frames'] == "SLIDE" and track_slide[track] < 0: + add_slide_data(track_slide[track], obj_uuid, blender_action.name, export_settings) + elif export_settings['gltf_anim_slide_to_zero'] is True: + add_slide_data(track_slide[track], obj_uuid, blender_action.name, export_settings) + + +def gather_action_animations(obj_uuid: int, + tracks: typing.Dict[str, + typing.List[int]], + offset: int, + export_settings) -> typing.Tuple[typing.List[gltf2_io.Animation], + typing.Dict[str, + typing.List[int]]]: + """ + Gather all animations which contribute to the objects property, and corresponding track names + + :param blender_object: The blender object which is animated + :param export_settings: + :return: A list of glTF2 animations and tracks + """ + animations = [] + + blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object + + # Collect all 'actions' affecting this object. There is a direct mapping between blender actions and glTF animations + blender_actions = __get_blender_actions(obj_uuid, export_settings) + + # When object is not animated at all (no SK) + # We can create an animation for this object + if len(blender_actions) == 0: + animation = bake_animation(obj_uuid, obj_uuid, export_settings) + if animation is not None: + animations.append(animation) + + +# Keep current situation and prepare export + current_action = None + current_sk_action = None + current_world_matrix = None + if blender_object and blender_object.animation_data and blender_object.animation_data.action: + # There is an active action. Storing it, to be able to restore after switching all actions during export + current_action = blender_object.animation_data.action + elif len(blender_actions) != 0 and blender_object.animation_data is not None and blender_object.animation_data.action is None: + # No current action set, storing world matrix of object + current_world_matrix = blender_object.matrix_world.copy() + + if blender_object and blender_object.type == "MESH" \ + and blender_object.data is not None \ + and blender_object.data.shape_keys is not None \ + and blender_object.data.shape_keys.animation_data is not None \ + and blender_object.data.shape_keys.animation_data.action is not None: + current_sk_action = blender_object.data.shape_keys.animation_data.action + + # Remove any solo (starred) NLA track. Restored after export + solo_track = None + if blender_object and blender_object.animation_data: + for track in blender_object.animation_data.nla_tracks: + if track.is_solo: + solo_track = track + track.is_solo = False + break + + # Remove any tweak mode. Restore after export + if blender_object and blender_object.animation_data: + restore_tweak_mode = blender_object.animation_data.use_tweak_mode + + # Remove use of NLA. Restore after export + if blender_object and blender_object.animation_data: + current_use_nla = blender_object.animation_data.use_nla + blender_object.animation_data.use_nla = False + + # Try to disable all except armature in viewport, for performance + if export_settings['gltf_optimize_armature_disable_viewport'] \ + and export_settings['vtree'].nodes[obj_uuid].blender_object.type == "ARMATURE": + + # If the skinned mesh has driver(s), we can't disable it to bake armature. + need_to_enable_again = False + sk_drivers = get_sk_drivers(obj_uuid, export_settings) + if len(sk_drivers) == 0: + need_to_enable_again = True + # Before baking, disabling from viewport all meshes + for obj in [n.blender_object for n in export_settings['vtree'].nodes.values() if n.blender_type in + [VExportNode.OBJECT, VExportNode.ARMATURE, VExportNode.COLLECTION]]: + obj.hide_viewport = True + export_settings['vtree'].nodes[obj_uuid].blender_object.hide_viewport = False + else: + export_settings['log'].warning("Can't disable viewport because of drivers") + # We changed the option here, so we don't need to re-check it later, during + export_settings['gltf_optimize_armature_disable_viewport'] = False + + export_user_extensions('animation_switch_loop_hook', export_settings, blender_object, False) + +# Export + + # Export all collected actions. + for blender_action, track_name, on_type in blender_actions: + + # Set action as active, to be able to bake if needed + if on_type == "OBJECT": # Not for shapekeys! + if blender_object.animation_data.action is None \ + or (blender_object.animation_data.action.name != blender_action.name): + if blender_object.animation_data.is_property_readonly('action'): + blender_object.animation_data.use_tweak_mode = False + try: + reset_bone_matrix(blender_object, export_settings) + export_user_extensions( + 'pre_animation_switch_hook', + export_settings, + blender_object, + blender_action, + track_name, + on_type) + blender_object.animation_data.action = blender_action + export_user_extensions( + 'post_animation_switch_hook', + export_settings, + blender_object, + blender_action, + track_name, + on_type) + except: + error = "Action is readonly. Please check NLA editor" + export_settings['log'].warning( + "Animation '{}' could not be exported. Cause: {}".format( + blender_action.name, error)) + continue + + if on_type == "SHAPEKEY": + if blender_object.data.shape_keys.animation_data.action is None \ + or (blender_object.data.shape_keys.animation_data.action.name != blender_action.name): + if blender_object.data.shape_keys.animation_data.is_property_readonly('action'): + blender_object.data.shape_keys.animation_data.use_tweak_mode = False + reset_sk_data(blender_object, blender_actions, export_settings) + export_user_extensions( + 'pre_animation_switch_hook', + export_settings, + blender_object, + blender_action, + track_name, + on_type) + blender_object.data.shape_keys.animation_data.action = blender_action + export_user_extensions( + 'post_animation_switch_hook', + export_settings, + blender_object, + blender_action, + track_name, + on_type) + + if export_settings['gltf_force_sampling'] is True: + if export_settings['vtree'].nodes[obj_uuid].blender_object.type == "ARMATURE": + animation, extra_samplers = gather_action_armature_sampled( + obj_uuid, blender_action, None, export_settings) + elif on_type == "OBJECT": + animation, extra_samplers = gather_action_object_sampled( + obj_uuid, blender_action, None, export_settings) + else: + animation = gather_action_sk_sampled(obj_uuid, blender_action, None, export_settings) + else: + # Not sampled + # This returns + # - animation on fcurves + # - fcurve that cannot be handled not sampled, to be sampled + # to_be_sampled is : (object_uuid , type , prop, optional(bone.name) ) + animation, to_be_sampled, extra_samplers = gather_animation_fcurves( + obj_uuid, blender_action, export_settings) + for (obj_uuid, type_, prop, bone) in to_be_sampled: + if type_ == "BONE": + channel = gather_sampled_bone_channel( + obj_uuid, + bone, + prop, + blender_action.name, + True, + get_gltf_interpolation("LINEAR"), + export_settings) + elif type_ == "OBJECT": + channel = gather_sampled_object_channel( + obj_uuid, prop, blender_action.name, True, get_gltf_interpolation("LINEAR"), export_settings) + elif type_ == "SK": + channel = gather_sampled_sk_channel(obj_uuid, blender_action.name, export_settings) + elif type_ == "EXTRA": + channel = None + else: + export_settings['log'].error("Type unknown. Should not happen") + + if animation is None and channel is not None: + # If all channels need to be sampled, no animation was created + # Need to create animation, and add channel + animation = gltf2_io.Animation( + channels=[channel], + extensions=None, + extras=__gather_extras(blender_action, export_settings), + name=blender_action.name, + samplers=[] + ) + else: + if channel is not None: + animation.channels.append(channel) + + # Add extra samplers + # Because this is not core glTF specification, you can add extra samplers using hook + if export_settings['gltf_export_extra_animations'] and len(extra_samplers) != 0: + export_user_extensions( + 'extra_animation_manage', + export_settings, + extra_samplers, + obj_uuid, + blender_object, + blender_action, + animation) + + # If we are in a SK animation, and we need to bake (if there also in TRS anim) + if len([a for a in blender_actions if a[2] == "OBJECT"]) == 0 and on_type == "SHAPEKEY": + if export_settings['gltf_bake_animation'] is True and export_settings['gltf_force_sampling'] is True: + # We also have to check if this is a skinned mesh, because we don't have to force animation baking on this case + # (skinned meshes TRS must be ignored, says glTF specification) + if export_settings['vtree'].nodes[obj_uuid].skin is None: + if obj_uuid not in export_settings['ranges'].keys(): + export_settings['ranges'][obj_uuid] = {} + export_settings['ranges'][obj_uuid][obj_uuid] = export_settings['ranges'][obj_uuid][blender_action.name] + channels, _ = gather_object_sampled_channels(obj_uuid, obj_uuid, export_settings) + if channels is not None: + if animation is None: + animation = gltf2_io.Animation( + channels=channels, + extensions=None, # as other animations + extras=None, # Because there is no animation to get extras from + name=blender_object.name, # Use object name as animation name + samplers=[] + ) + else: + animation.channels.extend(channels) + + if len([a for a in blender_actions if a[2] == "SHAPEKEY"]) == 0 \ + and export_settings['gltf_morph_anim'] \ + and blender_object.type == "MESH" \ + and blender_object.data is not None \ + and blender_object.data.shape_keys is not None: + if export_settings['gltf_bake_animation'] is True and export_settings['gltf_force_sampling'] is True: + # We need to check that this mesh is not driven by armature parent + # In that case, no need to bake, because animation is already baked by driven sk armature + ignore_sk = False + if export_settings['vtree'].nodes[obj_uuid].parent_uuid is not None \ + and export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_type == VExportNode.ARMATURE: + obj_drivers = get_sk_drivers(export_settings['vtree'].nodes[obj_uuid].parent_uuid, export_settings) + if obj_uuid in obj_drivers: + ignore_sk = True + + if ignore_sk is False: + if obj_uuid not in export_settings['ranges'].keys(): + export_settings['ranges'][obj_uuid] = {} + export_settings['ranges'][obj_uuid][obj_uuid] = export_settings['ranges'][obj_uuid][blender_action.name] + channel = gather_sampled_sk_channel(obj_uuid, obj_uuid, export_settings) + if channel is not None: + if animation is None: + animation = gltf2_io.Animation( + channels=[channel], + extensions=None, # as other animations + extras=None, # Because there is no animation to get extras from + name=blender_object.name, # Use object name as animation name + samplers=[] + ) + else: + animation.channels.append(channel) + + if animation is not None: + link_samplers(animation, export_settings) + animations.append(animation) + + # Store data for merging animation later + if track_name is not None: # Do not take into account animation not in NLA + # Do not take into account default NLA track names + if not (track_name.startswith("NlaTrack") or track_name.startswith("[Action Stash]")): + if track_name not in tracks.keys(): + tracks[track_name] = [] + tracks[track_name].append(offset + len(animations) - 1) # Store index of animation in animations + + +# Restoring current situation + + # Restore action status + # TODO: do this in a finally + if blender_object and blender_object.animation_data: + if blender_object.animation_data.action is not None: + if current_action is None: + # remove last exported action + reset_bone_matrix(blender_object, export_settings) + blender_object.animation_data.action = None + elif blender_object.animation_data.action.name != current_action.name: + # Restore action that was active at start of exporting + reset_bone_matrix(blender_object, export_settings) + blender_object.animation_data.action = current_action + if solo_track is not None: + solo_track.is_solo = True + blender_object.animation_data.use_tweak_mode = restore_tweak_mode + blender_object.animation_data.use_nla = current_use_nla + + if blender_object and blender_object.type == "MESH" \ + and blender_object.data is not None \ + and blender_object.data.shape_keys is not None \ + and blender_object.data.shape_keys.animation_data is not None: + reset_sk_data(blender_object, blender_actions, export_settings) + blender_object.data.shape_keys.animation_data.action = current_sk_action + + if blender_object and current_world_matrix is not None: + blender_object.matrix_world = current_world_matrix + + if export_settings['gltf_optimize_armature_disable_viewport'] \ + and export_settings['vtree'].nodes[obj_uuid].blender_object.type == "ARMATURE": + if need_to_enable_again is True: + # And now, restoring meshes in viewport + for node, obj in [(n, n.blender_object) for n in export_settings['vtree'].nodes.values() + if n.blender_type in [VExportNode.OBJECT, VExportNode.ARMATURE, VExportNode.COLLECTION]]: + obj.hide_viewport = node.default_hide_viewport + export_settings['vtree'].nodes[obj_uuid].blender_object.hide_viewport = export_settings['vtree'].nodes[obj_uuid].default_hide_viewport + + export_user_extensions('animation_switch_loop_hook', export_settings, blender_object, True) + + return animations, tracks + + +@cached +def __get_blender_actions(obj_uuid: str, + export_settings + ) -> typing.List[typing.Tuple[bpy.types.Action, str, str]]: + blender_actions = [] + blender_tracks = {} + action_on_type = {} + + blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object + + export_user_extensions('pre_gather_actions_hook', export_settings, blender_object) + + if export_settings['gltf_animation_mode'] == "BROADCAST": + return __get_blender_actions_broadcast(obj_uuid, export_settings) + + if blender_object and blender_object.animation_data is not None: + # Collect active action. + if blender_object.animation_data.action is not None: + + # Check the action is not in list of actions to ignore + if hasattr(bpy.data.scenes[0], "gltf_action_filter") and id(blender_object.animation_data.action) in [ + id(item.action) for item in bpy.data.scenes[0].gltf_action_filter if item.keep is False]: + pass # We ignore this action + else: + blender_actions.append(blender_object.animation_data.action) + blender_tracks[blender_object.animation_data.action.name] = None + action_on_type[blender_object.animation_data.action.name] = "OBJECT" + + # Collect associated strips from NLA tracks. + if export_settings['gltf_animation_mode'] == "ACTIONS": + for track in blender_object.animation_data.nla_tracks: + # Multi-strip tracks do not export correctly yet (they need to be baked), + # so skip them for now and only write single-strip tracks. + non_muted_strips = [strip for strip in track.strips if strip.action is not None and strip.mute is False] + if track.strips is None or len(non_muted_strips) != 1: + export_settings['log'].warning( + "NLA track '{}' has {} strips, but only single-strip tracks are supported in 'actions' mode.".format( + track.name, len( + track.strips)), popup=True) + continue + for strip in non_muted_strips: + + # Check the action is not in list of actions to ignore + if hasattr(bpy.data.scenes[0], "gltf_action_filter") and id(strip.action) in [ + id(item.action) for item in bpy.data.scenes[0].gltf_action_filter if item.keep is False]: + continue # We ignore this action + + blender_actions.append(strip.action) + # Always set after possible active action -> None will be overwrite + blender_tracks[strip.action.name] = track.name + action_on_type[strip.action.name] = "OBJECT" + + # For caching, actions linked to SK must be after actions about TRS + if export_settings['gltf_morph_anim'] and blender_object and blender_object.type == "MESH" \ + and blender_object.data is not None \ + and blender_object.data.shape_keys is not None \ + and blender_object.data.shape_keys.animation_data is not None: + + if blender_object.data.shape_keys.animation_data.action is not None: + + # Check the action is not in list of actions to ignore + if hasattr(bpy.data.scenes[0], "gltf_action_filter") and id(blender_object.data.shape_keys.animation_data.action) in [ + id(item.action) for item in bpy.data.scenes[0].gltf_action_filter if item.keep is False]: + pass # We ignore this action + else: + blender_actions.append(blender_object.data.shape_keys.animation_data.action) + blender_tracks[blender_object.data.shape_keys.animation_data.action.name] = None + action_on_type[blender_object.data.shape_keys.animation_data.action.name] = "SHAPEKEY" + + if export_settings['gltf_animation_mode'] == "ACTIONS": + for track in blender_object.data.shape_keys.animation_data.nla_tracks: + # Multi-strip tracks do not export correctly yet (they need to be baked), + # so skip them for now and only write single-strip tracks. + non_muted_strips = [strip for strip in track.strips if strip.action is not None and strip.mute is False] + if track.strips is None or len(non_muted_strips) != 1: + continue + for strip in non_muted_strips: + # Check the action is not in list of actions to ignore + if hasattr(bpy.data.scenes[0], "gltf_action_filter") and id(strip.action) in [ + id(item.action) for item in bpy.data.scenes[0].gltf_action_filter if item.keep is False]: + continue # We ignore this action + + blender_actions.append(strip.action) + # Always set after possible active action -> None will be overwrite + blender_tracks[strip.action.name] = track.name + action_on_type[strip.action.name] = "SHAPEKEY" + + # If there are only 1 armature, include all animations, even if not in NLA + # But only if armature has already some animation_data + # If not, we says that this armature is never animated, so don't add these additional actions + if export_settings['gltf_export_anim_single_armature'] is True: + if blender_object and blender_object.type == "ARMATURE" and blender_object.animation_data is not None: + if len(export_settings['vtree'].get_all_node_of_type(VExportNode.ARMATURE)) == 1: + # Keep all actions on objects (no Shapekey animation) + for act in [a for a in bpy.data.actions if a.id_root == "OBJECT"]: + # We need to check this is an armature action + # Checking that at least 1 bone is animated + if not __is_armature_action(act): + continue + # Check if this action is already taken into account + if act.name in blender_tracks.keys(): + continue + + # Check the action is not in list of actions to ignore + if hasattr(bpy.data.scenes[0], "gltf_action_filter") and id(act) in [id(item.action) + for item in bpy.data.scenes[0].gltf_action_filter if item.keep is False]: + continue # We ignore this action + + blender_actions.append(act) + blender_tracks[act.name] = None + action_on_type[act.name] = "OBJECT" + + # Use a class to get parameters, to be able to modify them + class GatherActionHookParameters: + def __init__(self, blender_actions, blender_tracks, action_on_type): + self.blender_actions = blender_actions + self.blender_tracks = blender_tracks + self.action_on_type = action_on_type + + gatheractionhookparams = GatherActionHookParameters(blender_actions, blender_tracks, action_on_type) + + export_user_extensions('gather_actions_hook', export_settings, blender_object, gatheractionhookparams) + + # Get params back from hooks + blender_actions = gatheractionhookparams.blender_actions + blender_tracks = gatheractionhookparams.blender_tracks + action_on_type = gatheractionhookparams.action_on_type + + # Remove duplicate actions. + blender_actions = list(set(blender_actions)) + # sort animations alphabetically (case insensitive) so they have a defined order and match Blender's Action list + blender_actions.sort(key=lambda a: a.name.lower()) + + return [(blender_action, blender_tracks[blender_action.name], action_on_type[blender_action.name]) + for blender_action in blender_actions] + + +def __is_armature_action(blender_action) -> bool: + for fcurve in blender_action.fcurves: + if is_bone_anim_channel(fcurve.data_path): + return True + return False + + +def __gather_extras(blender_action, export_settings): + if export_settings['gltf_extras']: + return generate_extras(blender_action) + return None + + +def __get_blender_actions_broadcast(obj_uuid, export_settings): + blender_actions = [] + blender_tracks = {} + action_on_type = {} + + blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object + + # Note : Like in FBX exporter: + # - Object with animation data will get all actions + # - Object without animation will not get any action + + # Collect all actions + for blender_action in bpy.data.actions: + if hasattr(bpy.data.scenes[0], "gltf_action_filter") and id(blender_action) in [ + id(item.action) for item in bpy.data.scenes[0].gltf_action_filter if item.keep is False]: + continue # We ignore this action + + # Keep all actions on objects (no Shapekey animation, No armature animation (on bones)) + if blender_action.id_root == "OBJECT": # TRS and Bone animations + if blender_object.animation_data is None: + continue + if blender_object and blender_object.type == "ARMATURE" and __is_armature_action(blender_action): + blender_actions.append(blender_action) + blender_tracks[blender_action.name] = None + action_on_type[blender_action.name] = "OBJECT" + elif blender_object.type == "MESH": + if not __is_armature_action(blender_action): + blender_actions.append(blender_action) + blender_tracks[blender_action.name] = None + action_on_type[blender_action.name] = "OBJECT" + elif blender_action.id_root == "KEY": + if blender_object.type != "MESH" or blender_object.data is None or blender_object.data.shape_keys is None or blender_object.data.shape_keys.animation_data is None: + continue + # Checking that the object has some SK and some animation on it + if blender_object is None: + continue + if blender_object.type != "MESH": + continue + if blender_object.data is None or blender_object.data.shape_keys is None: + continue + blender_actions.append(blender_action) + blender_tracks[blender_action.name] = None + action_on_type[blender_action.name] = "SHAPEKEY" + + # Use a class to get parameters, to be able to modify them + + class GatherActionHookParameters: + def __init__(self, blender_actions, blender_tracks, action_on_type): + self.blender_actions = blender_actions + self.blender_tracks = blender_tracks + self.action_on_type = action_on_type + + gatheractionhookparams = GatherActionHookParameters(blender_actions, blender_tracks, action_on_type) + + export_user_extensions('gather_actions_hook', export_settings, blender_object, gatheractionhookparams) + + # Get params back from hooks + blender_actions = gatheractionhookparams.blender_actions + blender_tracks = gatheractionhookparams.blender_tracks + action_on_type = gatheractionhookparams.action_on_type + + # Remove duplicate actions. + blender_actions = list(set(blender_actions)) + # sort animations alphabetically (case insensitive) so they have a defined order and match Blender's Action list + blender_actions.sort(key=lambda a: a.name.lower()) + + return [(blender_action, blender_tracks[blender_action.name], action_on_type[blender_action.name]) + for blender_action in blender_actions] diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animation_utils.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animation_utils.py new file mode 100644 index 00000000000..12de006b322 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animation_utils.py @@ -0,0 +1,291 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import typing +from mathutils import Matrix +from ....blender.com.gltf2_blender_data_path import get_sk_exported +from ....io.com import gltf2_io +from ....io.exp.gltf2_io_user_extensions import export_user_extensions +from ..gltf2_blender_gather_tree import VExportNode +from .sampled.armature.armature_action_sampled import gather_action_armature_sampled +from .sampled.object.gltf2_blender_gather_object_action_sampled import gather_action_object_sampled +from .sampled.shapekeys.gltf2_blender_gather_sk_channels import gather_sampled_sk_channel +from .sampled.data.gltf2_blender_gather_data_channels import gather_data_sampled_channels +from .gltf2_blender_gather_drivers import get_sk_drivers + + +def link_samplers(animation: gltf2_io.Animation, export_settings): + """ + Move animation samplers to their own list and store their indices at their previous locations. + + After gathering, samplers are stored in the channels properties of the animation and need to be moved + to their own list while storing an index into this list at the position where they previously were. + This behaviour is similar to that of the glTFExporter that traverses all nodes + :param animation: + :param export_settings: + :return: + """ + # TODO: move this to some util module and update gltf2 exporter also + T = typing.TypeVar('T') + + def __append_unique_and_get_index(l: typing.List[T], item: T): + if item in l: + return l.index(item) + else: + index = len(l) + l.append(item) + return index + + for i, channel in enumerate(animation.channels): + animation.channels[i].sampler = __append_unique_and_get_index(animation.samplers, channel.sampler) + + +def reset_bone_matrix(blender_object, export_settings) -> None: + if export_settings['gltf_export_reset_pose_bones'] is False: + return + + # Only for armatures + if blender_object.type != "ARMATURE": + return + + # Remove current action if any + if blender_object.animation_data and blender_object.animation_data.action: + blender_object.animation_data.action = None + + # Resetting bones TRS to avoid to keep not keyed value on a future action set + for bone in blender_object.pose.bones: + bone.matrix_basis = Matrix() + + +def reset_sk_data(blender_object, blender_actions, export_settings) -> None: + # Using NLA for SK is not so common + # Reset to 0.0 will happen here only if there are at least 2 tracks to export + if export_settings['gltf_export_reset_sk_data'] is False: + return + + if len([i for i in blender_actions if i[2] == "SHAPEKEY"]) <= 1: + return + + if blender_object.type != "MESH": + return + + # Reset + for sk in get_sk_exported(blender_object.data.shape_keys.key_blocks): + sk.value = 0.0 + + +def add_slide_data(start_frame, uuid: int, key: str, export_settings, add_drivers=True): + + if uuid not in export_settings['slide'].keys(): + export_settings['slide'][uuid] = {} + export_settings['slide'][uuid][key] = start_frame + + # Add slide info for driver sk too + if add_drivers is True: + obj_drivers = get_sk_drivers(uuid, export_settings) + for obj_dr in obj_drivers: + if obj_dr not in export_settings['slide'].keys(): + export_settings['slide'][obj_dr] = {} + export_settings['slide'][obj_dr][uuid + "_" + key] = start_frame + + +def merge_tracks_perform(merged_tracks, animations, export_settings): + to_delete_idx = [] + for merged_anim_track in merged_tracks.keys(): + if len(merged_tracks[merged_anim_track]) < 2: + + # There is only 1 animation in the track + # If name of the track is not a default name, use this name for action + if len(merged_tracks[merged_anim_track]) != 0: + animations[merged_tracks[merged_anim_track][0]].name = merged_anim_track + + continue + + base_animation_idx = None + offset_sampler = 0 + + for idx, anim_idx in enumerate(merged_tracks[merged_anim_track]): + if idx == 0: + base_animation_idx = anim_idx + animations[anim_idx].name = merged_anim_track + already_animated = [] + for channel in animations[anim_idx].channels: + already_animated.append((channel.target.node, channel.target.path)) + continue + + to_delete_idx.append(anim_idx) + + # Merging extensions + # Provide a hook to handle extension merging since there is no way to know author intent + export_user_extensions( + 'merge_animation_extensions_hook', + export_settings, + animations[anim_idx], + animations[base_animation_idx]) + + # Merging extras + # Warning, some values can be overwritten if present in multiple merged animations + if animations[anim_idx].extras is not None: + for k in animations[anim_idx].extras.keys(): + if animations[base_animation_idx].extras is None: + animations[base_animation_idx].extras = {} + animations[base_animation_idx].extras[k] = animations[anim_idx].extras[k] + + offset_sampler = len(animations[base_animation_idx].samplers) + for sampler in animations[anim_idx].samplers: + animations[base_animation_idx].samplers.append(sampler) + + for channel in animations[anim_idx].channels: + if (channel.target.node, channel.target.path) in already_animated: + export_settings['log'].warning( + "Some strips have same channel animation ({}), on node {} !".format( + channel.target.path, channel.target.node.name)) + continue + animations[base_animation_idx].channels.append(channel) + animations[base_animation_idx].channels[-1].sampler = animations[base_animation_idx].channels[-1].sampler + offset_sampler + already_animated.append((channel.target.node, channel.target.path)) + + new_animations = [] + if len(to_delete_idx) != 0: + for idx, animation in enumerate(animations): + if idx in to_delete_idx: + continue + new_animations.append(animation) + else: + new_animations = animations + + # If some strips have same channel animations, we already ignored some. + # But if the channels was exactly the same, we already pick index of sampler, and we have a mix of samplers, and index of samplers, in animation.samplers + # So get back to list of objects only + # This can lead to unused samplers... but keep them, as, anyway, data are not exported properly + for anim in new_animations: + new_samplers = [] + for s in anim.samplers: + if type(s) == int: + new_samplers.append(anim.samplers[s]) + else: + new_samplers.append(s) + anim.samplers = new_samplers + + return new_animations + + +def bake_animation(obj_uuid: str, animation_key: str, export_settings, mode=None): + + # Bake situation does not export any extra animation channels, as we bake TRS + weights on Track or scene level, without direct + # Access to fcurve and action data + + # if there is no animation in file => no need to bake + if len(bpy.data.actions) == 0: + return None + + blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object + + # No TRS animation are found for this object. + # But we may need to bake + # (Only when force sampling is ON) + # If force sampling is OFF, can lead to inconsistent export anyway + if (export_settings['gltf_bake_animation'] is True + or export_settings['gltf_animation_mode'] == "NLA_TRACKS") \ + and blender_object and blender_object.type != "ARMATURE" and export_settings['gltf_force_sampling'] is True: + animation = None + # We also have to check if this is a skinned mesh, because we don't have to force animation baking on this case + # (skinned meshes TRS must be ignored, says glTF specification) + if export_settings['vtree'].nodes[obj_uuid].skin is None: + if mode is None or mode == "OBJECT": + animation, _ = gather_action_object_sampled(obj_uuid, None, animation_key, export_settings) + + # Need to bake sk only if not linked to a driver sk by parent armature + # In case of NLA track export, no baking of SK + if export_settings['gltf_morph_anim'] \ + and blender_object \ + and blender_object.type == "MESH" \ + and blender_object.data is not None \ + and blender_object.data.shape_keys is not None: + + ignore_sk = False + if export_settings['vtree'].nodes[obj_uuid].parent_uuid is not None \ + and export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_type == VExportNode.ARMATURE: + obj_drivers = get_sk_drivers(export_settings['vtree'].nodes[obj_uuid].parent_uuid, export_settings) + if obj_uuid in obj_drivers: + ignore_sk = True + + if mode == "OBJECT": + ignore_sk = True + + if ignore_sk is False: + channel = gather_sampled_sk_channel(obj_uuid, animation_key, export_settings) + if channel is not None: + if animation is None: + animation = gltf2_io.Animation( + channels=[channel], + extensions=None, # as other animations + extras=None, # Because there is no animation to get extras from + name=blender_object.name, # Use object name as animation name + samplers=[] + ) + else: + animation.channels.append(channel) + + if animation is not None and animation.channels: + link_samplers(animation, export_settings) + return animation + + elif (export_settings['gltf_bake_animation'] is True + or export_settings['gltf_animation_mode'] == "NLA_TRACKS") \ + and blender_object \ + and blender_object.type == "ARMATURE" \ + and mode is None or mode == "OBJECT": + # We need to bake all bones. Because some bone can have some constraints linking to + # some other armature bones, for example + + animation, _ = gather_action_armature_sampled(obj_uuid, None, animation_key, export_settings) + link_samplers(animation, export_settings) + if animation is not None: + return animation + return None + + +def bake_data_animation(blender_type_data, blender_id, animation_key, on_type, export_settings): + # if there is no animation in file => no need to bake + if len(bpy.data.actions) == 0: + return None + + total_channels = [] + animation = None + + if (export_settings['gltf_bake_animation'] is True + or export_settings['gltf_animation_mode'] == "NLA_TRACKS"): + + if blender_type_data == "materials": + blender_data_object = [i for i in bpy.data.materials if id(i) == blender_id][0] + elif blender_type_data == "cameras": + blender_data_object = [i for i in bpy.data.cameras if id(i) == blender_id][0] + elif blender_type_data == "lights": + blender_data_object = [i for i in bpy.data.lights if id(i) == blender_id][0] + else: + pass # Should not happen + + # Export now KHR_animation_pointer for materials / light / camera + for i in [a for a in export_settings['KHR_animation_pointer'][blender_type_data].keys() if a == blender_id]: + if len(export_settings['KHR_animation_pointer'][blender_type_data][i]['paths']) == 0: + continue + + channels = gather_data_sampled_channels(blender_type_data, i, animation_key, on_type, export_settings) + if channels is not None: + total_channels.extend(channels) + + if len(total_channels) > 0: + animation = gltf2_io.Animation( + channels=total_channels, + extensions=None, # as other animations + extras=None, # Because there is no animation to get extras from + name=blender_data_object.name, # Use object name as animation name + samplers=[] + ) + + if animation is not None and animation.channels: + link_samplers(animation, export_settings) + return animation diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animations.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animations.py new file mode 100644 index 00000000000..cb5a70f7faf --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animations.py @@ -0,0 +1,22 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + + +from .gltf2_blender_gather_action import gather_actions_animations +from .gltf2_blender_gather_scene_animation import gather_scene_animations +from .gltf2_blender_gather_tracks import gather_tracks_animations + + +def gather_animations(export_settings): + + # Reinit stored data + export_settings['ranges'] = {} + export_settings['slide'] = {} + + if export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS", "BROADCAST"]: + return gather_actions_animations(export_settings) + elif export_settings['gltf_animation_mode'] == "SCENE": + return gather_scene_animations(export_settings) + elif export_settings['gltf_animation_mode'] == "NLA_TRACKS": + return gather_tracks_animations(export_settings) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_drivers.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_drivers.py new file mode 100644 index 00000000000..22ff47522d3 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_drivers.py @@ -0,0 +1,79 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from ....blender.com.gltf2_blender_data_path import get_sk_exported, skip_sk +from ...com.gltf2_blender_data_path import get_target_object_path +from ..gltf2_blender_gather_cache import skdriverdiscovercache + + +@skdriverdiscovercache +def get_sk_drivers(blender_armature_uuid, export_settings): + + # If no SK are exported --> No driver animation to export + if export_settings['gltf_morph_anim'] is False: + return [] + + drivers = [] + + # Take into account skinned mesh, and mesh parented to a bone of the armature + children_list = export_settings['vtree'].nodes[blender_armature_uuid].children.copy() + for bone in export_settings['vtree'].get_all_bones(blender_armature_uuid): + children_list.extend(export_settings['vtree'].nodes[bone].children) + + for child_uuid in children_list: + + if export_settings['vtree'].nodes[child_uuid].blender_type == "BONE": + continue + + child = export_settings['vtree'].nodes[child_uuid].blender_object + + if not child.data: + continue + # child.data can be an armature - which has no shapekeys + if not hasattr(child.data, 'shape_keys'): + continue + if not child.data.shape_keys: + continue + if not child.data.shape_keys.animation_data: + continue + if not child.data.shape_keys.animation_data.drivers: + continue + if len(child.data.shape_keys.animation_data.drivers) <= 0: + continue + + shapekeys_idx = {} + cpt_sk = 0 + for sk in get_sk_exported(child.data.shape_keys.key_blocks): + shapekeys_idx[sk.name] = cpt_sk + cpt_sk += 1 + + # Note: channels will have some None items only for SK if some SK are not animated + idx_channel_mapping = [] + all_sorted_channels = [] + for sk_c in child.data.shape_keys.animation_data.drivers: + # Check if driver is valid. If not, ignore this driver channel + try: + # Check if driver is valid. + # Try/Except is no more a suffisant check, starting with version Blender 3.0, + # Blender crashes when trying to resolve path on invalid driver + if not sk_c.is_valid: + continue + sk_name = child.data.shape_keys.path_resolve(get_target_object_path(sk_c.data_path)).name + except: + continue + if skip_sk(child.data.shape_keys.key_blocks, child.data.shape_keys.key_blocks[sk_name]): + continue + idx_channel_mapping.append((shapekeys_idx[sk_name], sk_c)) + existing_idx = dict(idx_channel_mapping) + for i in range(0, cpt_sk): + if i not in existing_idx.keys(): + all_sorted_channels.append(None) + else: + all_sorted_channels.append(existing_idx[i]) + + # Checks there are some driver on SK, and that there is not only invalid drivers + if len(all_sorted_channels) > 0 and not all([i is None for i in all_sorted_channels]): + drivers.append(child_uuid) + + return drivers diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_keyframes.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_keyframes.py new file mode 100644 index 00000000000..a8eeac6ef23 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_keyframes.py @@ -0,0 +1,127 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import typing +import bpy +import mathutils +from ...com import gltf2_blender_math + + +class Keyframe: + def __init__(self, channels: typing.Tuple[bpy.types.FCurve], frame: float, bake_channel: typing.Union[str, None]): + self.seconds = frame / (bpy.context.scene.render.fps * bpy.context.scene.render.fps_base) + self.frame = frame + self.fps = (bpy.context.scene.render.fps * bpy.context.scene.render.fps_base) + self.__length_morph = 0 + # Note: channels has some None items only for SK if some SK are not animated + if bake_channel is None: + if not all([c is None for c in channels]): + self.target = [c for c in channels if c is not None][0].data_path.split('.')[-1] + if self.target != "value": + self.__indices = [c.array_index for c in channels] + else: + self.__indices = [i for i, c in enumerate(channels) if c is not None] + self.__length_morph = len(channels) + else: + # If all channels are None (baking evaluate SK case) + self.target = "value" + self.__indices = [] + self.__length_morph = len(channels) + for i in range(self.get_target_len()): + self.__indices.append(i) + + else: + if bake_channel == "value": + self.__length_morph = len(channels) + self.target = bake_channel + self.__indices = [] + for i in range(self.get_target_len()): + self.__indices.append(i) + + # Data holders for virtual properties + self.__value = None + self.__in_tangent = None + self.__out_tangent = None + + def get_target_len(self): + length = { + "delta_location": 3, + "delta_rotation_euler": 3, + "delta_rotation_quaternion": 4, + "delta_scale": 3, + "location": 3, + "rotation_axis_angle": 4, + "rotation_euler": 3, + "rotation_quaternion": 4, + "scale": 3, + "value": self.__length_morph + }.get(self.target, 1) + + return length + + def __set_indexed(self, value): + # Sometimes blender animations only reference a subset of components of a data target. Keyframe should always + # contain a complete Vector/ Quaternion --> use the array_index value of the keyframe to set components in such + # structures + # For SK, must contains all SK values + result = [0.0] * self.get_target_len() + for i, v in zip(self.__indices, value): + result[i] = v + return result + + def get_indices(self): + return self.__indices + + def set_value_index(self, idx, val): + self.__value[idx] = val + + def set_value_index_in(self, idx, val): + self.__in_tangent[idx] = val + + def set_value_index_out(self, idx, val): + self.__out_tangent[idx] = val + + def set_first_tangent(self): + self.__in_tangent = self.__value + + def set_last_tangent(self): + self.__out_tangent = self.__value + + @property + def value(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]: + if self.target == "value": + return self.__value + return gltf2_blender_math.list_to_mathutils(self.__value, self.target) + + @value.setter + def value(self, value: typing.List[float]): + self.__value = self.__set_indexed(value) + + @value.setter + def value_total(self, value: typing.List[float]): + self.__value = value + + @property + def in_tangent(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]: + if self.__in_tangent is None: + return None + if self.target == "value": + return self.__in_tangent + return gltf2_blender_math.list_to_mathutils(self.__in_tangent, self.target) + + @in_tangent.setter + def in_tangent(self, value: typing.List[float]): + self.__in_tangent = self.__set_indexed(value) + + @property + def out_tangent(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]: + if self.__out_tangent is None: + return None + if self.target == "value": + return self.__out_tangent + return gltf2_blender_math.list_to_mathutils(self.__out_tangent, self.target) + + @out_tangent.setter + def out_tangent(self, value: typing.List[float]): + self.__out_tangent = self.__set_indexed(value) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_scene_animation.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_scene_animation.py new file mode 100644 index 00000000000..2df61a6fb9a --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_scene_animation.py @@ -0,0 +1,231 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from ....io.com import gltf2_io +from ...com.gltf2_blender_extras import generate_extras +from ..gltf2_blender_gather_tree import VExportNode +from .gltf2_blender_gather_drivers import get_sk_drivers +from .sampled.armature.armature_channels import gather_armature_sampled_channels +from .sampled.object.gltf2_blender_gather_object_channels import gather_object_sampled_channels +from .sampled.shapekeys.gltf2_blender_gather_sk_channels import gather_sk_sampled_channels +from .sampled.data.gltf2_blender_gather_data_channels import gather_data_sampled_channels +from .gltf2_blender_gather_animation_utils import link_samplers, add_slide_data + + +def gather_scene_animations(export_settings): + + # if there is no animation in file => no need to bake. Except if we are trying to bake GN instances + if len(bpy.data.actions) == 0 and export_settings['gltf_gn_mesh'] is False: + # TODO : get a better filter by checking we really have some GN instances... + return [] + + total_channels = [] + animations = [] + + start_frame = bpy.context.scene.frame_start + end_frame = bpy.context.scene.frame_end + + # The following options has no impact: + # - We force sampling & baking + # - Export_frame_range --> Because this is the case for SCENE mode, because we bake all scene frame range + # - CROP or SLIDE --> Scene don't have negative frames + + # This mode will bake all objects like there are in the scene + vtree = export_settings['vtree'] + for obj_uuid in vtree.get_all_objects(): + + # Do not manage not exported objects + if vtree.nodes[obj_uuid].node is None: + if export_settings['gltf_armature_object_remove'] is True: + # Manage armature object, as this is the object that has the animation + if not vtree.nodes[obj_uuid].blender_object: + continue + else: + continue + + if export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.COLLECTION: + continue + + # blender_object can be None for GN instances + blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object + + export_settings['ranges'][obj_uuid] = {} + export_settings['ranges'][obj_uuid][obj_uuid] = {'start': start_frame, 'end': end_frame} + if blender_object and blender_object.type == "ARMATURE": + # Manage sk drivers + obj_drivers = get_sk_drivers(obj_uuid, export_settings) + for obj_dr in obj_drivers: + if obj_dr not in export_settings['ranges']: + export_settings['ranges'][obj_dr] = {} + export_settings['ranges'][obj_dr][obj_uuid + "_" + obj_uuid] = {} + export_settings['ranges'][obj_dr][obj_uuid + "_" + obj_uuid]['start'] = start_frame + export_settings['ranges'][obj_dr][obj_uuid + "_" + obj_uuid]['end'] = end_frame + + if export_settings['gltf_anim_slide_to_zero'] is True and start_frame > 0: + add_slide_data(start_frame, obj_uuid, obj_uuid, export_settings) + + # Perform baking animation export + + if blender_object and blender_object.type != "ARMATURE": + # We have to check if this is a skinned mesh, because we don't have to force animation baking on this case + if export_settings['vtree'].nodes[obj_uuid].skin is None: + channels, _ = gather_object_sampled_channels(obj_uuid, obj_uuid, export_settings) + if channels is not None: + total_channels.extend(channels) + if export_settings['gltf_morph_anim'] and blender_object.type == "MESH" \ + and blender_object.data is not None \ + and blender_object.data.shape_keys is not None: + + # We must ignore sk for meshes that are driven by armature parent + ignore_sk = False + if export_settings['vtree'].nodes[obj_uuid].parent_uuid is not None \ + and export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_type == VExportNode.ARMATURE: + obj_drivers = get_sk_drivers(export_settings['vtree'].nodes[obj_uuid].parent_uuid, export_settings) + if obj_uuid in obj_drivers: + ignore_sk = True + + if ignore_sk is False: + channels = gather_sk_sampled_channels(obj_uuid, obj_uuid, export_settings) + if channels is not None: + total_channels.extend(channels) + elif blender_object is None: + # This is GN instances + # Currently, not checking if this instance is skinned.... #TODO + channels, _ = gather_object_sampled_channels(obj_uuid, obj_uuid, export_settings) + if channels is not None: + total_channels.extend(channels) + else: + channels, _ = gather_armature_sampled_channels(obj_uuid, obj_uuid, export_settings) + if channels is not None: + total_channels.extend(channels) + + if export_settings['gltf_anim_scene_split_object'] is True: + if len(total_channels) > 0: + animation = gltf2_io.Animation( + channels=total_channels, + extensions=None, + extras=__gather_extras(blender_object, export_settings), + name=blender_object.name if blender_object else "GN Instance", + samplers=[] + ) + link_samplers(animation, export_settings) + animations.append(animation) + + total_channels = [] + + if export_settings['gltf_export_anim_pointer'] is True: + # Export now KHR_animation_pointer for materials + for mat in export_settings['KHR_animation_pointer']['materials'].keys(): + if len(export_settings['KHR_animation_pointer']['materials'][mat]['paths']) == 0: + continue + + blender_material = [m for m in bpy.data.materials if id(m) == mat][0] + + export_settings['ranges'][id(blender_material)] = {} + export_settings['ranges'][id(blender_material)][id(blender_material)] = { + 'start': start_frame, 'end': end_frame} + + if export_settings['gltf_anim_slide_to_zero'] is True and start_frame > 0: + add_slide_data(start_frame, mat, mat, export_settings, add_drivers=False) + + channels = gather_data_sampled_channels('materials', mat, mat, None, export_settings) + if channels is not None: + total_channels.extend(channels) + + if export_settings['gltf_anim_scene_split_object'] is True: + if len(total_channels) > 0: + animation = gltf2_io.Animation( + channels=total_channels, + extensions=None, + extras=__gather_extras(blender_material, export_settings), + name=blender_material.name, + samplers=[] + ) + link_samplers(animation, export_settings) + animations.append(animation) + + total_channels = [] + + # Export now KHR_animation_pointer for lights + for light in export_settings['KHR_animation_pointer']['lights'].keys(): + if len(export_settings['KHR_animation_pointer']['lights'][light]['paths']) == 0: + continue + + blender_light = [l for l in bpy.data.lights if id(l) == light][0] + + export_settings['ranges'][id(blender_light)] = {} + export_settings['ranges'][id(blender_light)][id(blender_light)] = {'start': start_frame, 'end': end_frame} + + if export_settings['gltf_anim_slide_to_zero'] is True and start_frame > 0: + add_slide_data(start_frame, light, light, export_settings, add_drivers=False) + + channels = gather_data_sampled_channels('lights', light, light, None, export_settings) + if channels is not None: + total_channels.extend(channels) + + if export_settings['gltf_anim_scene_split_object'] is True: + if len(total_channels) > 0: + animation = gltf2_io.Animation( + channels=total_channels, + extensions=None, + extras=__gather_extras(blender_light, export_settings), + name=blender_light.name, + samplers=[] + ) + link_samplers(animation, export_settings) + animations.append(animation) + + total_channels = [] + + # Export now KHR_animation_pointer for cameras + for cam in export_settings['KHR_animation_pointer']['cameras'].keys(): + if len(export_settings['KHR_animation_pointer']['cameras'][cam]['paths']) == 0: + continue + + blender_camera = [l for l in bpy.data.cameras if id(l) == cam][0] + + export_settings['ranges'][id(blender_camera)] = {} + export_settings['ranges'][id(blender_camera)][id(blender_camera)] = {'start': start_frame, 'end': end_frame} + + if export_settings['gltf_anim_slide_to_zero'] is True and start_frame > 0: + add_slide_data(start_frame, cam, cam, export_settings, add_drivers=False) + + channels = gather_data_sampled_channels('cameras', cam, cam, None, export_settings) + if channels is not None: + total_channels.extend(channels) + + if export_settings['gltf_anim_scene_split_object'] is True: + if len(total_channels) > 0: + animation = gltf2_io.Animation( + channels=total_channels, + extensions=None, + extras=__gather_extras(blender_camera, export_settings), + name=blender_camera.name, + samplers=[] + ) + link_samplers(animation, export_settings) + animations.append(animation) + + total_channels = [] + + if export_settings['gltf_anim_scene_split_object'] is False: + if len(total_channels) > 0: + animation = gltf2_io.Animation( + channels=total_channels, + extensions=None, + extras=__gather_extras(bpy.context.scene, export_settings), + name=bpy.context.scene.name, + samplers=[] + ) + link_samplers(animation, export_settings) + animations.append(animation) + + return animations + + +def __gather_extras(blender_asset, export_settings): + if export_settings['gltf_extras']: + return generate_extras(blender_asset) + return None diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_tracks.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_tracks.py new file mode 100644 index 00000000000..6317c5bf92a --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_tracks.py @@ -0,0 +1,718 @@ +# SPDX-FileCopyrightText: 2018-2023 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import typing +from ....io.com import gltf2_io +from ....io.exp.gltf2_io_user_extensions import export_user_extensions +from ..gltf2_blender_gather_cache import cached +from ..gltf2_blender_gather_tree import VExportNode +from .gltf2_blender_gather_animation_utils import merge_tracks_perform, bake_animation, bake_data_animation, add_slide_data, reset_bone_matrix, reset_sk_data +from .gltf2_blender_gather_drivers import get_sk_drivers +from .sampled.gltf2_blender_gather_animation_sampling_cache import get_cache_data + + +def gather_tracks_animations(export_settings): + + animations = [] + merged_tracks = {} + + vtree = export_settings['vtree'] + for obj_uuid in vtree.get_all_objects(): + + # Do not manage not exported objects + if vtree.nodes[obj_uuid].node is None: + if export_settings['gltf_armature_object_remove'] is True: + # Manage armature object, as this is the object that has the animation + if not vtree.nodes[obj_uuid].blender_object: + continue + else: + continue + + if export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.COLLECTION: + continue + + animations_, merged_tracks = gather_track_animations(obj_uuid, merged_tracks, len(animations), export_settings) + animations += animations_ + + if export_settings['gltf_export_anim_pointer'] is True: + # Manage Material tracks (for KHR_animation_pointer) + for mat in export_settings['KHR_animation_pointer']['materials'].keys(): + animations_, merged_tracks = gather_data_track_animations( + 'materials', mat, merged_tracks, len(animations), export_settings) + animations += animations_ + + # Manage Cameras tracks (for KHR_animation_pointer) + for cam in export_settings['KHR_animation_pointer']['cameras'].keys(): + animations_, merged_tracks = gather_data_track_animations( + 'cameras', cam, merged_tracks, len(animations), export_settings) + animations += animations_ + + # Manage lights tracks (for KHR_animation_pointer) + for light in export_settings['KHR_animation_pointer']['lights'].keys(): + animations_, merged_tracks = gather_data_track_animations( + 'lights', light, merged_tracks, len(animations), export_settings) + animations += animations_ + + new_animations = merge_tracks_perform(merged_tracks, animations, export_settings) + + return new_animations + + +def gather_track_animations(obj_uuid: int, + tracks: typing.Dict[str, + typing.List[int]], + offset: int, + export_settings) -> typing.Tuple[typing.List[gltf2_io.Animation], + typing.Dict[str, + typing.List[int]]]: + + animations = [] + + # Bake situation does not export any extra animation channels, as we bake TRS + weights on Track or scene level, without direct + # Access to fcurve and action data + + blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object + # Collect all tracks affecting this object. + blender_tracks = __get_blender_tracks(obj_uuid, export_settings) + + # If no tracks, return + # This will avoid to set / reset some data + if len(blender_tracks) == 0: + return animations, tracks + + # Keep current situation + current_action = None + current_sk_action = None + current_world_matrix = None + current_use_nla = None + current_use_nla_sk = None + restore_track_mute = {} + restore_track_mute["OBJECT"] = {} + restore_track_mute["SHAPEKEY"] = {} + + if blender_object.animation_data: + current_action = blender_object.animation_data.action + current_use_nla = blender_object.animation_data.use_nla + restore_tweak_mode = blender_object.animation_data.use_tweak_mode + current_world_matrix = blender_object.matrix_world.copy() + + if blender_object.type == "MESH" \ + and blender_object.data is not None \ + and blender_object.data.shape_keys is not None \ + and blender_object.data.shape_keys.animation_data is not None: + current_sk_action = blender_object.data.shape_keys.animation_data.action + current_use_nla_sk = blender_object.data.shape_keys.animation_data.use_nla + + # Prepare export for obj + solo_track = None + if blender_object.animation_data: + blender_object.animation_data.action = None + blender_object.animation_data.use_nla = True + # Remove any solo (starred) NLA track. Restored after export + for track in blender_object.animation_data.nla_tracks: + if track.is_solo: + solo_track = track + track.is_solo = False + break + + solo_track_sk = None + if blender_object.type == "MESH" \ + and blender_object.data is not None \ + and blender_object.data.shape_keys is not None \ + and blender_object.data.shape_keys.animation_data is not None: + # Remove any solo (starred) NLA track. Restored after export + for track in blender_object.data.shape_keys.animation_data.nla_tracks: + if track.is_solo: + solo_track_sk = track + track.is_solo = False + break + + # Mute all channels + for track_group in [b[0] for b in blender_tracks if b[2] == "OBJECT"]: + for track in track_group: + restore_track_mute["OBJECT"][track.idx] = blender_object.animation_data.nla_tracks[track.idx].mute + blender_object.animation_data.nla_tracks[track.idx].mute = True + for track_group in [b[0] for b in blender_tracks if b[2] == "SHAPEKEY"]: + for track in track_group: + restore_track_mute["SHAPEKEY"][track.idx] = blender_object.data.shape_keys.animation_data.nla_tracks[track.idx].mute + blender_object.data.shape_keys.animation_data.nla_tracks[track.idx].mute = True + + export_user_extensions('animation_track_switch_loop_hook', export_settings, blender_object, False) + + # Export + + # Export all collected tracks. + for bl_tracks, track_name, on_type in blender_tracks: + prepare_tracks_range(obj_uuid, bl_tracks, track_name, export_settings) + + if on_type == "OBJECT": + # Enable tracks + for track in bl_tracks: + export_user_extensions( + 'pre_animation_track_switch_hook', + export_settings, + blender_object, + track, + track_name, + on_type) + blender_object.animation_data.nla_tracks[track.idx].mute = False + export_user_extensions( + 'post_animation_track_switch_hook', + export_settings, + blender_object, + track, + track_name, + on_type) + else: + # Enable tracks + for track in bl_tracks: + export_user_extensions( + 'pre_animation_track_switch_hook', + export_settings, + blender_object, + track, + track_name, + on_type) + blender_object.data.shape_keys.animation_data.nla_tracks[track.idx].mute = False + export_user_extensions( + 'post_animation_track_switch_hook', + export_settings, + blender_object, + track, + track_name, + on_type) + + reset_bone_matrix(blender_object, export_settings) + if on_type == "SHAPEKEY": + reset_sk_data(blender_object, blender_tracks, export_settings) + + # Export animation + animation = bake_animation(obj_uuid, track_name, export_settings, mode=on_type) + get_cache_data.reset_cache() + if animation is not None: + animations.append(animation) + + # Store data for merging animation later + # Do not take into account default NLA track names + if not (track_name.startswith("NlaTrack") or track_name.startswith("[Action Stash]")): + if track_name not in tracks.keys(): + tracks[track_name] = [] + tracks[track_name].append(offset + len(animations) - 1) # Store index of animation in animations + + # Restoring muting + if on_type == "OBJECT": + for track in bl_tracks: + blender_object.animation_data.nla_tracks[track.idx].mute = True + else: + for track in bl_tracks: + blender_object.data.shape_keys.animation_data.nla_tracks[track.idx].mute = True + + # Restoring + if current_action is not None: + blender_object.animation_data.action = current_action + if current_sk_action is not None: + blender_object.data.shape_keys.animation_data.action = current_sk_action + if solo_track is not None: + solo_track.is_solo = True + if solo_track_sk is not None: + solo_track_sk.is_solo = True + if blender_object.animation_data: + blender_object.animation_data.use_nla = current_use_nla + blender_object.animation_data.use_tweak_mode = restore_tweak_mode + for track_group in [b[0] for b in blender_tracks if b[2] == "OBJECT"]: + for track in track_group: + blender_object.animation_data.nla_tracks[track.idx].mute = restore_track_mute["OBJECT"][track.idx] + if blender_object.type == "MESH" \ + and blender_object.data is not None \ + and blender_object.data.shape_keys is not None \ + and blender_object.data.shape_keys.animation_data is not None: + blender_object.data.shape_keys.animation_data.use_nla = current_use_nla_sk + for track_group in [b[0] for b in blender_tracks if b[2] == "SHAPEKEY"]: + for track in track_group: + blender_object.data.shape_keys.animation_data.nla_tracks[track.idx].mute = restore_track_mute["SHAPEKEY"][track.idx] + + blender_object.matrix_world = current_world_matrix + + export_user_extensions('animation_track_switch_loop_hook', export_settings, blender_object, True) + + return animations, tracks + + +@cached +def __get_blender_tracks(obj_uuid: str, export_settings): + + blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object + export_user_extensions('pre_gather_tracks_hook', export_settings, blender_object) + + tracks, names, types = __get_nla_tracks_obj(obj_uuid, export_settings) + tracks_sk, names_sk, types_sk = __get_nla_tracks_sk(obj_uuid, export_settings) + + tracks.extend(tracks_sk) + names.extend(names_sk) + types.extend(types_sk) + + # Use a class to get parameters, to be able to modify them + class GatherTrackHookParameters: + def __init__(self, blender_tracks, blender_tracks_name, track_on_type): + self.blender_tracks = blender_tracks + self.blender_tracks_name = blender_tracks_name + self.track_on_type = track_on_type + + gathertrackhookparams = GatherTrackHookParameters(tracks, names, types) + + export_user_extensions('gather_tracks_hook', export_settings, blender_object, gathertrackhookparams) + + # Get params back from hooks + tracks = gathertrackhookparams.blender_tracks + names = gathertrackhookparams.blender_tracks_name + types = gathertrackhookparams.track_on_type + + return list(zip(tracks, names, types)) + + +class NLATrack: + def __init__(self, idx, frame_start, frame_end, default_solo, default_muted): + self.idx = idx + self.frame_start = frame_start + self.frame_end = frame_end + self.default_solo = default_solo + self.default_muted = default_muted + + +def __get_nla_tracks_obj(obj_uuid: str, export_settings): + + obj = export_settings['vtree'].nodes[obj_uuid].blender_object + + if not obj.animation_data: + return [], [], [] + if len(obj.animation_data.nla_tracks) == 0: + return [], [], [] + + exported_tracks = [] + + current_exported_tracks = [] + + for idx_track, track in enumerate(obj.animation_data.nla_tracks): + if len(track.strips) == 0: + continue + + stored_track = NLATrack( + idx_track, + track.strips[0].frame_start, + track.strips[-1].frame_end, + track.is_solo, + track.mute + ) + + # Keep tracks where some blending together + if any([strip.blend_type != 'REPLACE' for strip in track.strips]): + # There is some blending. Keeping with previous track + pass + else: + # The previous one(s) can go to the list, if any (not for first track) + if len(current_exported_tracks) != 0: + exported_tracks.append(current_exported_tracks) + current_exported_tracks = [] + current_exported_tracks.append(stored_track) + + # End of loop. Keep the last one(s) + exported_tracks.append(current_exported_tracks) + + track_names = [obj.animation_data.nla_tracks[tracks_group[0].idx].name for tracks_group in exported_tracks] + on_types = ['OBJECT'] * len(track_names) + return exported_tracks, track_names, on_types + + +def __get_nla_tracks_sk(obj_uuid: str, export_settings): + + obj = export_settings['vtree'].nodes[obj_uuid].blender_object + + if not obj.type == "MESH": + return [], [], [] + if obj.data is None: + return [], [], [] + if obj.data.shape_keys is None: + return [], [], [] + if not obj.data.shape_keys.animation_data: + return [], [], [] + if len(obj.data.shape_keys.animation_data.nla_tracks) == 0: + return [], [], [] + + exported_tracks = [] + + current_exported_tracks = [] + + for idx_track, track in enumerate(obj.data.shape_keys.animation_data.nla_tracks): + if len(track.strips) == 0: + continue + + stored_track = NLATrack( + idx_track, + track.strips[0].frame_start, + track.strips[-1].frame_end, + track.is_solo, + track.mute + ) + + # Keep tracks where some blending together + if any([strip.blend_type != 'REPLACE' for strip in track.strips]): + # There is some blending. Keeping with previous track + pass + else: + # The previous one(s) can go to the list, if any (not for first track) + if len(current_exported_tracks) != 0: + exported_tracks.append(current_exported_tracks) + current_exported_tracks = [] + current_exported_tracks.append(stored_track) + + # End of loop. Keep the last one(s) + exported_tracks.append(current_exported_tracks) + + track_names = [obj.data.shape_keys.animation_data.nla_tracks[tracks_group[0].idx].name for tracks_group in exported_tracks] + on_types = ['SHAPEKEY'] * len(track_names) + return exported_tracks, track_names, on_types + + +def prepare_tracks_range(obj_uuid, tracks, track_name, export_settings, with_driver=True): + + track_slide = {} + + for idx, btrack in enumerate(tracks): + frame_start = btrack.frame_start if idx == 0 else min(frame_start, btrack.frame_start) + frame_end = btrack.frame_end if idx == 0 else max(frame_end, btrack.frame_end) + + # If some negative frame and crop -> set start at 0 + if frame_start < 0 and export_settings['gltf_negative_frames'] == "CROP": + frame_start = 0 + + if export_settings['gltf_frame_range'] is True: + frame_start = max(bpy.context.scene.frame_start, frame_start) + frame_end = min(bpy.context.scene.frame_end, frame_end) + + export_settings['ranges'][obj_uuid] = {} + export_settings['ranges'][obj_uuid][track_name] = {} + export_settings['ranges'][obj_uuid][track_name]['start'] = int(frame_start) + export_settings['ranges'][obj_uuid][track_name]['end'] = int(frame_end) + + if export_settings['gltf_negative_frames'] == "SLIDE": + if not (track_name.startswith("NlaTrack") or track_name.startswith("[Action Stash]")): + if track_name not in track_slide.keys() or ( + track_name in track_slide.keys() and frame_start < track_slide[track_name]): + track_slide.update({track_name: frame_start}) + else: + if frame_start < 0: + add_slide_data(frame_start, obj_uuid, track_name, export_settings) + + if export_settings['gltf_anim_slide_to_zero'] is True and frame_start > 0: + if not (track_name.startswith("NlaTrack") or track_name.startswith("[Action Stash]")): + if track_name not in track_slide.keys() or ( + track_name in track_slide.keys() and frame_start < track_slide[track_name]): + track_slide.update({track_name: frame_start}) + else: + add_slide_data(frame_start, obj_uuid, track_name, export_settings) + + # For drivers + if with_driver is True: + if export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.ARMATURE and export_settings['gltf_morph_anim'] is True: + obj_drivers = get_sk_drivers(obj_uuid, export_settings) + for obj_dr in obj_drivers: + if obj_dr not in export_settings['ranges']: + export_settings['ranges'][obj_dr] = {} + export_settings['ranges'][obj_dr][obj_uuid + "_" + track_name] = {} + export_settings['ranges'][obj_dr][obj_uuid + "_" + track_name]['start'] = frame_start + export_settings['ranges'][obj_dr][obj_uuid + "_" + track_name]['end'] = frame_end + + if (export_settings['gltf_negative_frames'] == "SLIDE" + or export_settings['gltf_anim_slide_to_zero'] is True) \ + and len(track_slide) > 0: + + if track_name in track_slide.keys(): + if export_settings['gltf_negative_frames'] == "SLIDE" and track_slide[track_name] < 0: + add_slide_data(track_slide[track_name], obj_uuid, track_name, export_settings) + elif export_settings['gltf_anim_slide_to_zero'] is True: + add_slide_data(track_slide[track_name], obj_uuid, track_name, export_settings) + + +def gather_data_track_animations( + blender_type_data: str, + blender_id: str, + tracks: typing.Dict[str, typing.List[int]], + offset: int, + export_settings) -> typing.Tuple[typing.List[gltf2_io.Animation], typing.Dict[str, typing.List[int]]]: + + animations = [] + + # Collect all tracks affecting this object. + blender_tracks = __get_data_blender_tracks(blender_type_data, blender_id, export_settings) + + if blender_type_data == "materials": + blender_data_object = [mat for mat in bpy.data.materials if id(mat) == blender_id][0] + elif blender_type_data == "cameras": + blender_data_object = [cam for cam in bpy.data.cameras if id(cam) == blender_id][0] + elif blender_type_data == "lights": + blender_data_object = [light for light in bpy.data.lights if id(light) == blender_id][0] + else: + pass # Should not happen + + # Keep current situation + current_action = None + current_nodetree_action = None + current_use_nla = None + current_use_nla_node_tree = None + restore_track_mute = {} + restore_track_mute["MATERIAL"] = {} + restore_track_mute["NODETREE"] = {} + restore_track_mute["LIGHT"] = {} + restore_track_mute["CAMERA"] = {} + + if blender_data_object.animation_data: + current_action = blender_data_object.animation_data.action + current_use_nla = blender_data_object.animation_data.use_nla + restore_tweak_mode = blender_data_object.animation_data.use_tweak_mode + + if blender_type_data in ["materials", "lights"] \ + and blender_data_object.node_tree is not None \ + and blender_data_object.node_tree.animation_data is not None: + current_nodetree_action = blender_data_object.node_tree.animation_data.action + current_use_nla_node_tree = blender_data_object.node_tree.animation_data.use_nla + + # Prepare export for obj + solo_track = None + if blender_data_object.animation_data: + blender_data_object.animation_data.action = None + blender_data_object.animation_data.use_nla = True + # Remove any solo (starred) NLA track. Restored after export + for track in blender_data_object.animation_data.nla_tracks: + if track.is_solo: + solo_track = track + track.is_solo = False + break + + solo_track_sk = None + if blender_type_data == ["materials", "lights"] \ + and blender_data_object.node_tree is not None \ + and blender_data_object.node_tree.animation_data is not None: + # Remove any solo (starred) NLA track. Restored after export + for track in blender_data_object.node_tree.animation_data.nla_tracks: + if track.is_solo: + solo_track_sk = track + track.is_solo = False + break + + # Mute all channels + if blender_type_data == "materials": + for track_group in [b[0] for b in blender_tracks if b[2] == "MATERIAL"]: + for track in track_group: + restore_track_mute["MATERIAL"][track.idx] = blender_data_object.animation_data.nla_tracks[track.idx].mute + blender_data_object.animation_data.nla_tracks[track.idx].mute = True + for track_group in [b[0] for b in blender_tracks if b[2] == "NODETREE"]: + for track in track_group: + restore_track_mute["NODETREE"][track.idx] = blender_data_object.node_tree.animation_data.nla_tracks[track.idx].mute + blender_data_object.node_tree.animation_data.nla_tracks[track.idx].mute = True + elif blender_type_data == "cameras": + for track_group in [b[0] for b in blender_tracks if b[2] == "CAMERA"]: + for track in track_group: + restore_track_mute["CAMERA"][track.idx] = blender_data_object.animation_data.nla_tracks[track.idx].mute + blender_data_object.animation_data.nla_tracks[track.idx].mute = True + elif blender_type_data == "lights": + for track_group in [b[0] for b in blender_tracks if b[2] == "LIGHT"]: + for track in track_group: + restore_track_mute["LIGHT"][track.idx] = blender_data_object.animation_data.nla_tracks[track.idx].mute + blender_data_object.animation_data.nla_tracks[track.idx].mute = True + for track_group in [b[0] for b in blender_tracks if b[2] == "NODETREE"]: + for track in track_group: + restore_track_mute["NODETREE"][track.idx] = blender_data_object.node_tree.animation_data.nla_tracks[track.idx].mute + blender_data_object.node_tree.animation_data.nla_tracks[track.idx].mute = True + + # Export + + # Export all collected tracks. + for bl_tracks, track_name, on_type in blender_tracks: + prepare_tracks_range(blender_id, bl_tracks, track_name, export_settings, with_driver=False) + + if on_type in ["MATERIAL", "CAMERA", "LIGHT"]: + # Enable tracks + for track in bl_tracks: + blender_data_object.animation_data.nla_tracks[track.idx].mute = False + elif on_type == "NODETREE": + # Enable tracks + for track in bl_tracks: + blender_data_object.node_tree.animation_data.nla_tracks[track.idx].mute = False + + # Export animation + animation = bake_data_animation(blender_type_data, blender_id, track_name, on_type, export_settings) + get_cache_data.reset_cache() + if animation is not None: + animations.append(animation) + + # Store data for merging animation later + # Do not take into account default NLA track names + if not (track_name.startswith("NlaTrack") or track_name.startswith("[Action Stash]")): + if track_name not in tracks.keys(): + tracks[track_name] = [] + tracks[track_name].append(offset + len(animations) - 1) # Store index of animation in animations + + # Restoring muting + if on_type in ["MATERIAL", "CAMERA", "LIGHT"]: + for track in bl_tracks: + blender_data_object.animation_data.nla_tracks[track.idx].mute = True + elif on_type == "NODETREE": + for track in bl_tracks: + blender_data_object.node_tree.animation_data.nla_tracks[track.idx].mute = True + + # Restoring + if current_action is not None: + blender_data_object.animation_data.action = current_action + if current_nodetree_action is not None: + blender_data_object.node_tree.animation_data.action = current_nodetree_action + if solo_track is not None: + solo_track.is_solo = True + if solo_track_sk is not None: + solo_track_sk.is_solo = True + if blender_data_object.animation_data: + blender_data_object.animation_data.use_nla = current_use_nla + blender_data_object.animation_data.use_tweak_mode = restore_tweak_mode + if blender_type_data == "materials": + for track_group in [b[0] for b in blender_tracks if b[2] == "MATERIAL"]: + for track in track_group: + blender_data_object.animation_data.nla_tracks[track.idx].mute = restore_track_mute["MATERIAL"][track.idx] + elif blender_type_data == "cameras": + for track_group in [b[0] for b in blender_tracks if b[2] == "CAMERA"]: + for track in track_group: + blender_data_object.animation_data.nla_tracks[track.idx].mute = restore_track_mute["CAMERA"][track.idx] + elif blender_type_data == "lights": + for track_group in [b[0] for b in blender_tracks if b[2] == "LIGHT"]: + for track in track_group: + blender_data_object.animation_data.nla_tracks[track.idx].mute = restore_track_mute["LIGHT"][track.idx] + if blender_type_data in ["materials", "lights"] \ + and blender_data_object.node_tree is not None \ + and blender_data_object.node_tree.animation_data is not None: + blender_data_object.node_tree.animation_data.use_nla = current_use_nla_node_tree + for track_group in [b[0] for b in blender_tracks if b[2] == "NODETREE"]: + for track in track_group: + blender_data_object.node_tree.animation_data.nla_tracks[track.idx].mute = restore_track_mute["NODETREE"][track.idx] + + return animations, tracks + + +def __get_data_blender_tracks(blender_type_data, blender_id, export_settings): + tracks, names, types = __get_nla_tracks_material(blender_type_data, blender_id, export_settings) + if blender_type_data in ["materials", "lights"]: + tracks_tree, names_tree, types_tree = __get_nla_tracks_material_node_tree( + blender_type_data, blender_id, export_settings) + else: + tracks_tree, names_tree, types_tree = [], [], [] + + tracks.extend(tracks_tree) + names.extend(names_tree) + types.extend(types_tree) + + return list(zip(tracks, names, types)) + + +def __get_nla_tracks_material(blender_type_data, blender_id, export_settings): + if blender_type_data == "materials": + blender_data_object = [mat for mat in bpy.data.materials if id(mat) == blender_id][0] + elif blender_type_data == "cameras": + blender_data_object = [cam for cam in bpy.data.cameras if id(cam) == blender_id][0] + elif blender_type_data == "lights": + blender_data_object = [light for light in bpy.data.lights if id(light) == blender_id][0] + else: + pass # Should not happen + + if not blender_data_object.animation_data: + return [], [], [] + if len(blender_data_object.animation_data.nla_tracks) == 0: + return [], [], [] + + exported_tracks = [] + + current_exported_tracks = [] + + for idx_track, track in enumerate(blender_data_object.animation_data.nla_tracks): + if len(track.strips) == 0: + continue + + stored_track = NLATrack( + idx_track, + track.strips[0].frame_start, + track.strips[-1].frame_end, + track.is_solo, + track.mute + ) + + # Keep tracks where some blending together + if any([strip.blend_type != 'REPLACE' for strip in track.strips]): + # There is some blending. Keeping with previous track + pass + else: + # The previous one(s) can go to the list, if any (not for first track) + if len(current_exported_tracks) != 0: + exported_tracks.append(current_exported_tracks) + current_exported_tracks = [] + current_exported_tracks.append(stored_track) + + # End of loop. Keep the last one(s) + exported_tracks.append(current_exported_tracks) + + track_names = [blender_data_object.animation_data.nla_tracks[tracks_group[0].idx].name for tracks_group in exported_tracks] + if blender_type_data == "materials": + on_types = ['MATERIAL'] * len(track_names) + elif blender_type_data == "cameras": + on_types = ['CAMERA'] * len(track_names) + elif blender_type_data == "lights": + on_types = ['LIGHT'] * len(track_names) + else: + pass # Should not happen + return exported_tracks, track_names, on_types + + +def __get_nla_tracks_material_node_tree(blender_type_data, blender_id, export_settings): + if blender_type_data == "materials": + blender_object_data = [mat for mat in bpy.data.materials if id(mat) == blender_id][0] + elif blender_type_data == "lights": + blender_object_data = [light for light in bpy.data.lights if id(light) == blender_id][0] + + if not blender_object_data.node_tree: + return [], [], [] + if not blender_object_data.node_tree.animation_data: + return [], [], [] + if len(blender_object_data.node_tree.animation_data.nla_tracks) == 0: + return [], [], [] + + exported_tracks = [] + + current_exported_tracks = [] + + for idx_track, track in enumerate(blender_object_data.node_tree.animation_data.nla_tracks): + if len(track.strips) == 0: + continue + + stored_track = NLATrack( + idx_track, + track.strips[0].frame_start, + track.strips[-1].frame_end, + track.is_solo, + track.mute + ) + + # Keep tracks where some blending together + if any([strip.blend_type != 'REPLACE' for strip in track.strips]): + # There is some blending. Keeping with previous track + pass + else: + # The previous one(s) can go to the list, if any (not for first track) + if len(current_exported_tracks) != 0: + exported_tracks.append(current_exported_tracks) + current_exported_tracks = [] + current_exported_tracks.append(stored_track) + + # End of loop. Keep the last one(s) + exported_tracks.append(current_exported_tracks) + + track_names = [ + blender_object_data.node_tree.animation_data.nla_tracks[tracks_group[0].idx].name for tracks_group in exported_tracks] + on_types = ['NODETREE'] * len(track_names) + return exported_tracks, track_names, on_types diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_action_sampled.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_action_sampled.py new file mode 100644 index 00000000000..0ec6cec5365 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_action_sampled.py @@ -0,0 +1,88 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import typing +from ......io.exp.gltf2_io_user_extensions import export_user_extensions +from ......io.com import gltf2_io +from .....com.gltf2_blender_extras import generate_extras +from ...fcurves.gltf2_blender_gather_fcurves_sampler import gather_animation_fcurves_sampler +from .armature_channels import gather_armature_sampled_channels + + +def gather_action_armature_sampled(armature_uuid: str, + blender_action: typing.Optional[bpy.types.Action], + cache_key: str, + export_settings): + + blender_object = export_settings['vtree'].nodes[armature_uuid].blender_object + + name = __gather_name(blender_action, armature_uuid, cache_key, export_settings) + + try: + channels, extra_channels = __gather_channels( + armature_uuid, blender_action.name if blender_action else cache_key, export_settings) + animation = gltf2_io.Animation( + channels=channels, + extensions=None, + extras=__gather_extras(blender_action, export_settings), + name=name, + samplers=[] # We need to gather the samplers after gathering all channels --> populate this list in __link_samplers + ) + except RuntimeError as error: + export_settings['log'].warning("Animation '{}' could not be exported. Cause: {}".format(name, error)) + return None + + export_user_extensions('pre_gather_animation_hook', export_settings, animation, blender_action, blender_object) + + extra_samplers = [] + if export_settings['gltf_export_extra_animations']: + for chan in [chan for chan in extra_channels.values() if len(chan['properties']) != 0]: + for channel_group_name, channel_group in chan['properties'].items(): + + # No glTF channel here, as we don't have any target + # Trying to retrieve sampler directly + sampler = gather_animation_fcurves_sampler( + armature_uuid, tuple(channel_group), None, None, True, export_settings) + if sampler is not None: + extra_samplers.append((channel_group_name, sampler)) + + if not animation.channels: + return None, extra_samplers + + # To allow reuse of samplers in one animation : This will be done later, when we know all channels are here + + export_user_extensions( + 'gather_animation_hook', + export_settings, + animation, + blender_action, + blender_object) # For compatibility for older version + export_user_extensions('animation_action_armature_sampled', export_settings, + animation, blender_object, blender_action, cache_key) + + return animation, extra_samplers + + +def __gather_name(blender_action: bpy.types.Action, + armature_uuid: str, + cache_key: str, + export_settings + ) -> str: + if blender_action: + return blender_action.name + elif armature_uuid == cache_key: + return export_settings['vtree'].nodes[armature_uuid].blender_object.name + else: + return cache_key + + +def __gather_channels(armature_uuid, blender_action_name, export_settings) -> typing.List[gltf2_io.AnimationChannel]: + return gather_armature_sampled_channels(armature_uuid, blender_action_name, export_settings) + + +def __gather_extras(blender_action, export_settings): + if export_settings['gltf_extras']: + return generate_extras(blender_action) if blender_action else None + return None diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_channel_target.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_channel_target.py new file mode 100644 index 00000000000..f75c60f33f0 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_channel_target.py @@ -0,0 +1,54 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from ......io.exp.gltf2_io_user_extensions import export_user_extensions +from ......io.com import gltf2_io +from ....gltf2_blender_gather_cache import cached +from ....gltf2_blender_gather_joints import gather_joint_vnode + + +@cached +def gather_armature_sampled_channel_target( + armature_uuid: str, + bone: str, + channel: str, + export_settings +) -> gltf2_io.AnimationChannelTarget: + + blender_object = export_settings['vtree'].nodes[armature_uuid].blender_object + + animation_channel_target = gltf2_io.AnimationChannelTarget( + extensions=__gather_extensions(armature_uuid, bone, channel, export_settings), + extras=__gather_extras(armature_uuid, bone, channel, export_settings), + node=__gather_node(armature_uuid, bone, export_settings), + path=__gather_path(channel, export_settings) + ) + + export_user_extensions('gather_animation_bone_sampled_channel_target_hook', + export_settings, + blender_object, + bone, + channel) + + return animation_channel_target + + +def __gather_extensions(armature_uuid, bone, channel, export_settings): + return None + + +def __gather_extras(armature_uuid, bone, channel, export_settings): + return None + + +def __gather_node(armature_uuid, bone, export_settings): + return gather_joint_vnode(export_settings['vtree'].nodes[armature_uuid].bones[bone], export_settings) + + +def __gather_path(channel, export_settings): + return { + "location": "translation", + "rotation_quaternion": "rotation", + "scale": "scale" + }.get(channel) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_channels.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_channels.py new file mode 100644 index 00000000000..903078d6a74 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_channels.py @@ -0,0 +1,216 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import typing +from ......io.com import gltf2_io +from ......io.exp.gltf2_io_user_extensions import export_user_extensions +from .....com.gltf2_blender_conversion import get_gltf_interpolation +from .....com.gltf2_blender_conversion import get_target, get_channel_from_target +from ...fcurves.gltf2_blender_gather_fcurves_channels import get_channel_groups +from ...fcurves.gltf2_blender_gather_fcurves_channels import needs_baking +from ...gltf2_blender_gather_drivers import get_sk_drivers +from ..object.gltf2_blender_gather_object_channels import gather_sampled_object_channel +from ..shapekeys.gltf2_blender_gather_sk_channels import gather_sampled_sk_channel +from .armature_channel_target import gather_armature_sampled_channel_target +from .armature_sampler import gather_bone_sampled_animation_sampler + + +def gather_armature_sampled_channels(armature_uuid, blender_action_name, + export_settings) -> typing.List[gltf2_io.AnimationChannel]: + channels = [] + extra_channels = {} + + # Then bake all bones + bones_to_be_animated = [] + bones_uuid = export_settings["vtree"].get_all_bones(armature_uuid) + bones_to_be_animated = [ + export_settings["vtree"].nodes[b].blender_bone.name for b in bones_uuid if export_settings["vtree"].nodes[b].leaf_reference is None] + + # List of really animated bones is needed for optimization decision + list_of_animated_bone_channels = {} + if armature_uuid != blender_action_name and blender_action_name in bpy.data.actions: + # Not bake situation + channels_animated, to_be_sampled, extra_channels = get_channel_groups( + armature_uuid, bpy.data.actions[blender_action_name], export_settings) + for chan in [chan for chan in channels_animated.values() if chan['bone'] is not None]: + for prop in chan['properties'].keys(): + list_of_animated_bone_channels[(chan['bone'], get_channel_from_target(get_target(prop)))] = get_gltf_interpolation( + chan['properties'][prop][0].keyframe_points[0].interpolation) # Could be exported without sampling : keep interpolation + + for _, _, chan_prop, chan_bone in [chan for chan in to_be_sampled if chan[1] == "BONE"]: + list_of_animated_bone_channels[ + ( + chan_bone, + chan_prop, + ) + ] = get_gltf_interpolation("LINEAR") # if forced to be sampled, keep LINEAR interpolation + + for bone in bones_to_be_animated: + for p in ["location", "rotation_quaternion", "scale"]: + channel = gather_sampled_bone_channel( + armature_uuid, + bone, + p, + blender_action_name, + (bone, p) in list_of_animated_bone_channels.keys(), + list_of_animated_bone_channels[(bone, p)] if (bone, p) in list_of_animated_bone_channels.keys() else get_gltf_interpolation("LINEAR"), + export_settings) + if channel is not None: + channels.append(channel) + + # Retrieve animation on armature object itself, if any + # If armature is baked (no animation of armature), need to use all channels + if blender_action_name == armature_uuid or export_settings['gltf_animation_mode'] in ["SCENE", "NLA_TRACKS"]: + armature_channels = [] + else: + armature_channels = __gather_armature_object_channel( + armature_uuid, bpy.data.actions[blender_action_name], export_settings) + + for p in ["location", "rotation_quaternion", "scale"]: + armature_channel = gather_sampled_object_channel( + armature_uuid, + p, + blender_action_name, + p in [a[0] for a in armature_channels], + [c[1] for c in armature_channels if c[0] == p][0] if p in [a[0] for a in armature_channels] else "LINEAR", + export_settings + ) + + if armature_channel is not None: + channels.append(armature_channel) + + # Retrieve channels for drivers, if needed + drivers_to_manage = get_sk_drivers(armature_uuid, export_settings) + for obj_driver_uuid in drivers_to_manage: + channel = gather_sampled_sk_channel(obj_driver_uuid, armature_uuid + "_" + blender_action_name, export_settings) + if channel is not None: + channels.append(channel) + + return channels, extra_channels + + +def gather_sampled_bone_channel( + armature_uuid: str, + bone: str, + channel: str, + action_name: str, + node_channel_is_animated: bool, + node_channel_interpolation: str, + export_settings +): + + __target = __gather_target(armature_uuid, bone, channel, export_settings) + if __target.path is not None: + sampler = __gather_sampler( + armature_uuid, + bone, + channel, + action_name, + node_channel_is_animated, + node_channel_interpolation, + export_settings) + + if sampler is None: + # After check, no need to animate this node for this channel + return None + + animation_channel = gltf2_io.AnimationChannel( + extensions=None, + extras=None, + sampler=sampler, + target=__target + ) + + export_user_extensions('gather_animation_channel_hook', + export_settings, + animation_channel, + channel, + export_settings['vtree'].nodes[armature_uuid].blender_object, + bone, + action_name, + node_channel_is_animated + ) + + return animation_channel + return None + + +def __gather_target(armature_uuid: str, + bone: str, + channel: str, + export_settings + ) -> gltf2_io.AnimationChannelTarget: + + return gather_armature_sampled_channel_target( + armature_uuid, bone, channel, export_settings) + + +def __gather_sampler( + armature_uuid, + bone, + channel, + action_name, + node_channel_is_animated, + node_channel_interpolation, + export_settings): + return gather_bone_sampled_animation_sampler( + armature_uuid, + bone, + channel, + action_name, + node_channel_is_animated, + node_channel_interpolation, + export_settings + ) + + +def __gather_armature_object_channel(obj_uuid: str, blender_action, export_settings): + channels = [] + + channels_animated, to_be_sampled, extra_channels = get_channel_groups(obj_uuid, blender_action, export_settings) + # Remove all channel linked to bones, keep only directly object channels + channels_animated = [c for c in channels_animated.values() if c['type'] == "OBJECT"] + to_be_sampled = [c for c in to_be_sampled if c[1] == "OBJECT"] + + original_channels = [] + for c in channels_animated: + original_channels.extend([(prop, c['properties'][prop][0].keyframe_points[0].interpolation) + for prop in c['properties'].keys()]) + + for c, inter in original_channels: + channels.append( + ( + { + "location": "location", + "rotation_quaternion": "rotation_quaternion", + "rotation_euler": "rotation_quaternion", + "scale": "scale", + "delta_location": "location", + "delta_scale": "scale", + "delta_rotation_euler": "rotation_quaternion", + "delta_rotation_quaternion": "rotation_quaternion" + }.get(c), + get_gltf_interpolation(inter) + ) + ) + + for c in to_be_sampled: + channels.append( + ( + { + "location": "location", + "rotation_quaternion": "rotation_quaternion", + "rotation_euler": "rotation_quaternion", + "scale": "scale", + "delta_location": "location", + "delta_scale": "scale", + "delta_rotation_euler": "rotation_quaternion", + "delta_rotation_quaternion": "rotation_quaternion" + }.get(c[2]), + get_gltf_interpolation("LINEAR") # Forced to be sampled, so use LINEAR + ) + ) + + return channels diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_keyframes.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_keyframes.py new file mode 100644 index 00000000000..b94199eb327 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_keyframes.py @@ -0,0 +1,93 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import typing +import numpy as np +from ....gltf2_blender_gather_cache import cached +from ...gltf2_blender_gather_keyframes import Keyframe +from ..gltf2_blender_gather_animation_sampling_cache import get_cache_data + + +@cached +def gather_bone_sampled_keyframes( + armature_uuid: str, + bone: str, + channel: str, + action_name: str, + node_channel_is_animated: bool, + export_settings +) -> typing.List[Keyframe]: + + start_frame = export_settings['ranges'][armature_uuid][action_name]['start'] + end_frame = export_settings['ranges'][armature_uuid][action_name]['end'] + + keyframes = [] + + frame = start_frame + step = export_settings['gltf_frame_step'] + + while frame <= end_frame: + key = Keyframe(None, frame, channel) + + mat = get_cache_data( + 'bone', + armature_uuid, + bone, + action_name, + frame, + step, + export_settings) + + trans, rot, scale = mat.decompose() + + key.value = { + "location": trans, + "rotation_quaternion": rot, + "scale": scale + }[channel] + + keyframes.append(key) + frame += step + + if len(keyframes) == 0: + # For example, option CROP negative frames, but all are negatives + return None + + if not export_settings['gltf_optimize_animation']: + # For bones, if all values are the same, keeping only if changing values, or if user want to keep data + if node_channel_is_animated is True: + return keyframes # Always keeping + else: + # baked bones + if export_settings['gltf_optimize_animation_keep_armature'] is False: + # Not keeping if not changing property + cst = fcurve_is_constant(keyframes) + return None if cst is True else keyframes + else: + # Keep data, as requested by user. We keep all samples, as user don't want to optimize + return keyframes + + else: + + # For armatures + # Check if all values are the same + # In that case, if there is no real keyframe on this channel for this given bone, + # We can ignore these keyframes + # if there are some fcurve, we can keep only 2 keyframes, first and last + cst = fcurve_is_constant(keyframes) + + if node_channel_is_animated is True: # fcurve on this bone for this property + # Keep animation, but keep only 2 keyframes if data are not changing + return [keyframes[0], keyframes[-1]] if cst is True and len(keyframes) >= 2 else keyframes + else: # bone is not animated (no fcurve) + # Not keeping if not changing property if user decided to not keep + if export_settings['gltf_optimize_animation_keep_armature'] is False: + return None if cst is True else keyframes + else: + # Keep at least 2 keyframes if data are not changing + return [keyframes[0], keyframes[-1]] if cst is True and len(keyframes) >= 2 else keyframes + + +def fcurve_is_constant(keyframes): + return all([j < 0.0001 for j in np.ptp([[k.value[i] for i in range(len(keyframes[0].value))] for k in keyframes], axis=0)]) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_sampler.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_sampler.py new file mode 100644 index 00000000000..0edec680f33 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_sampler.py @@ -0,0 +1,231 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import typing +import mathutils +from ......io.com import gltf2_io +from ......io.exp.gltf2_io_user_extensions import export_user_extensions +from ......io.com import gltf2_io_constants +from ......io.exp import gltf2_io_binary_data +from .....com import gltf2_blender_math +from ....gltf2_blender_gather_accessors import gather_accessor +from ....gltf2_blender_gather_cache import cached +from ....gltf2_blender_gather_tree import VExportNode +from .armature_keyframes import gather_bone_sampled_keyframes + + +@cached +def gather_bone_sampled_animation_sampler( + armature_uuid: str, + bone: str, + channel: str, + action_name: str, + node_channel_is_animated: bool, + node_channel_interpolation: str, + export_settings +): + + pose_bone = export_settings['vtree'].nodes[armature_uuid].blender_object.pose.bones[bone] + + keyframes = __gather_keyframes( + armature_uuid, + bone, + channel, + action_name, + node_channel_is_animated, + export_settings) + + if keyframes is None: + # After check, no need to animate this node for this channel + return None + + # Now we are raw input/output, we need to convert to glTF data + input, output = __convert_keyframes(armature_uuid, bone, channel, keyframes, action_name, export_settings) + + sampler = gltf2_io.AnimationSampler( + extensions=None, + extras=None, + input=input, + interpolation=__gather_interpolation( + node_channel_is_animated, + node_channel_interpolation, + keyframes, + export_settings), + output=output) + + export_user_extensions('gather_animation_sampler_hook', + export_settings, + sampler, + export_settings['vtree'].nodes[armature_uuid].blender_object, + pose_bone, + action_name, + node_channel_is_animated) + + return sampler + + +@cached +def __gather_keyframes( + armature_uuid: str, + bone: str, + channel: str, + action_name: str, + node_channel_is_animated: bool, + export_settings +): + + keyframes = gather_bone_sampled_keyframes( + armature_uuid, + bone, + channel, + action_name, + node_channel_is_animated, + export_settings + ) + + if keyframes is None: + # After check, no need to animation this node + return None + + return keyframes + + +def __convert_keyframes(armature_uuid, bone_name, channel, keyframes, action_name, export_settings): + + # Sliding can come from: + # - option SLIDE for negative frames + # - option to start animation at frame 0 for looping + if armature_uuid in export_settings['slide'].keys( + ) and action_name in export_settings['slide'][armature_uuid].keys(): + for k in keyframes: + k.frame += -export_settings['slide'][armature_uuid][action_name] + k.seconds = k.frame / (bpy.context.scene.render.fps * bpy.context.scene.render.fps_base) + + times = [k.seconds for k in keyframes] + input = gather_accessor( + gltf2_io_binary_data.BinaryData.from_list(times, gltf2_io_constants.ComponentType.Float), + gltf2_io_constants.ComponentType.Float, + len(times), + tuple([max(times)]), + tuple([min(times)]), + gltf2_io_constants.DataType.Scalar, + export_settings) + + is_yup = export_settings['gltf_yup'] + + bone = export_settings['vtree'].nodes[armature_uuid].blender_object.pose.bones[bone_name] + target_datapath = "pose.bones['" + bone_name + "']." + channel + + if bone.parent is None: + # bone at root of armature + axis_basis_change = mathutils.Matrix.Identity(4) + if is_yup: + axis_basis_change = mathutils.Matrix( + ((1.0, 0.0, 0.0, 0.0), + (0.0, 0.0, 1.0, 0.0), + (0.0, -1.0, 0.0, 0.0), + (0.0, 0.0, 0.0, 1.0))) + correction_matrix_local = axis_basis_change @ bone.bone.matrix_local + else: + # Bone is not at root of armature + # There are 2 cases : + parent_uuid = export_settings['vtree'].nodes[export_settings['vtree'] + .nodes[armature_uuid].bones[bone.name]].parent_uuid + if parent_uuid is not None and export_settings['vtree'].nodes[parent_uuid].blender_type == VExportNode.BONE: + # export bone is not at root of armature neither + blender_bone_parent = export_settings['vtree'].nodes[parent_uuid].blender_bone + correction_matrix_local = ( + blender_bone_parent.bone.matrix_local.inverted_safe() @ + bone.bone.matrix_local + ) + else: + # exported bone (after filter) is at root of armature + axis_basis_change = mathutils.Matrix.Identity(4) + if is_yup: + axis_basis_change = mathutils.Matrix( + ((1.0, 0.0, 0.0, 0.0), + (0.0, 0.0, 1.0, 0.0), + (0.0, -1.0, 0.0, 0.0), + (0.0, 0.0, 0.0, 1.0))) + correction_matrix_local = axis_basis_change + transform = correction_matrix_local + + values = [] + fps = (bpy.context.scene.render.fps * bpy.context.scene.render.fps_base) + for keyframe in keyframes: + # Transform the data and build gltf control points + value = gltf2_blender_math.transform(keyframe.value, target_datapath, transform, False) + keyframe_value = gltf2_blender_math.mathutils_to_gltf(value) + + if keyframe.in_tangent is not None: + # we can directly transform the tangent as it currently is represented by a control point + in_tangent = gltf2_blender_math.transform(keyframe.in_tangent, target_datapath, transform, False) + + # the tangent in glTF is relative to the keyframe value and uses seconds + if not isinstance(value, list): + in_tangent = fps * (in_tangent - value) + else: + in_tangent = [fps * (in_tangent[i] - value[i]) for i in range(len(value))] + keyframe_value = gltf2_blender_math.mathutils_to_gltf(in_tangent) + keyframe_value # append + + if keyframe.out_tangent is not None: + # we can directly transform the tangent as it currently is represented by a control point + out_tangent = gltf2_blender_math.transform(keyframe.out_tangent, target_datapath, transform, False) + + # the tangent in glTF is relative to the keyframe value and uses seconds + if not isinstance(value, list): + out_tangent = fps * (out_tangent - value) + else: + out_tangent = [fps * (out_tangent[i] - value[i]) for i in range(len(value))] + keyframe_value = keyframe_value + gltf2_blender_math.mathutils_to_gltf(out_tangent) # append + + values += keyframe_value + + # store the keyframe data in a binary buffer + component_type = gltf2_io_constants.ComponentType.Float + data_type = gltf2_io_constants.DataType.vec_type_from_num(len(keyframes[0].value)) + + output = gltf2_io.Accessor( + buffer_view=gltf2_io_binary_data.BinaryData.from_list(values, component_type), + byte_offset=None, + component_type=component_type, + count=len(values) // gltf2_io_constants.DataType.num_elements(data_type), + extensions=None, + extras=None, + max=None, + min=None, + name=None, + normalized=None, + sparse=None, + type=data_type + ) + + return input, output + + +def __gather_interpolation(node_channel_is_animated, node_channel_interpolation, keyframes, export_settings): + + if len(keyframes) > 2: + # keep STEP as STEP, other become LINEAR + return { + "STEP": "STEP" + }.get(node_channel_interpolation, "LINEAR") + elif len(keyframes) == 1: + if node_channel_is_animated is False: + return "STEP" + elif node_channel_interpolation == "CUBICSPLINE": + return "LINEAR" # We can't have a single keyframe with CUBICSPLINE + else: + return node_channel_interpolation + else: + # If we only have 2 keyframes, set interpolation to STEP if baked + if node_channel_is_animated is False: + # baked => We have first and last keyframe + return "STEP" + else: + if keyframes[0].value == keyframes[1].value: + return "STEP" + else: + return "LINEAR" diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_channel_target.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_channel_target.py new file mode 100644 index 00000000000..a9fd74682a5 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_channel_target.py @@ -0,0 +1,48 @@ +# SPDX-FileCopyrightText: 2018-2023 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from ......io.com import gltf2_io +from ....gltf2_blender_gather_cache import cached + + +@cached +def gather_data_sampled_channel_target( + blender_type_data: str, + blender_id, + channel: str, + additional_key: str, # Used to differentiate between material / material node_tree + export_settings +) -> gltf2_io.AnimationChannelTarget: + + animation_channel_target = gltf2_io.AnimationChannelTarget( + extensions=__gather_extensions(blender_type_data, blender_id, channel, export_settings), + extras=__gather_extras(blender_type_data, blender_id, channel, export_settings), + node=__gather_node(blender_type_data, blender_id, export_settings), + path=__gather_path(blender_type_data, blender_id, channel, export_settings) + ) + + return animation_channel_target + + +def __gather_extensions(blender_type_data, blender_id, channel, export_settings): + return None + + +def __gather_extras(blender_type_data, blender_id, channel, export_settings): + return None + + +def __gather_node(blender_type_data, blender_id, export_settings): + if blender_type_data == "materials": + return export_settings['KHR_animation_pointer']['materials'][blender_id]['glTF_material'] + elif blender_type_data == "lights": + return export_settings['KHR_animation_pointer']['lights'][blender_id]['glTF_light'] + elif blender_type_data == "cameras": + return export_settings['KHR_animation_pointer']['cameras'][blender_id]['glTF_camera'] + else: + pass # This should never happen + + +def __gather_path(blender_type_data, blender_id, channel, export_settings): + return export_settings['KHR_animation_pointer'][blender_type_data][blender_id]['paths'][channel]['path'] diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_channels.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_channels.py new file mode 100644 index 00000000000..ae459f3c8de --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_channels.py @@ -0,0 +1,113 @@ +# SPDX-FileCopyrightText: 2018-2023 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import typing +from ......io.com import gltf2_io +from ......blender.com.gltf2_blender_conversion import get_gltf_interpolation +from .gltf2_blender_gather_data_channel_target import gather_data_sampled_channel_target +from .gltf2_blender_gather_data_sampler import gather_data_sampled_animation_sampler + + +def gather_data_sampled_channels(blender_type_data, blender_id, blender_action_name, + additional_key, export_settings) -> typing.List[gltf2_io.AnimationChannel]: + channels = [] + + list_of_animated_data_channels = {} # TODOPointer + + baseColorFactor_alpha_merged_already_done = False + for path in export_settings['KHR_animation_pointer'][blender_type_data][blender_id]['paths'].keys(): + + # Do not manage alpha, as it will be managaed by the baseColorFactor (merging Color and alpha) + if export_settings['KHR_animation_pointer'][blender_type_data][blender_id]['paths'][path][ + 'path'] == "/materials/XXX/pbrMetallicRoughness/baseColorFactor" and baseColorFactor_alpha_merged_already_done is True: + continue + + channel = gather_sampled_data_channel( + blender_type_data, + blender_id, + path, + blender_action_name, + path in list_of_animated_data_channels.keys(), + list_of_animated_data_channels[path] if path in list_of_animated_data_channels.keys() else get_gltf_interpolation("LINEAR"), + additional_key, + export_settings) + if channel is not None: + channels.append(channel) + + if export_settings['KHR_animation_pointer'][blender_type_data][blender_id]['paths'][path]['path'] == "/materials/XXX/pbrMetallicRoughness/baseColorFactor": + baseColorFactor_alpha_merged_already_done = True + + return channels + + +def gather_sampled_data_channel( + blender_type_data: str, + blender_id: str, + channel: str, + action_name: str, + node_channel_is_animated: bool, + node_channel_interpolation: str, + additional_key: str, # Used to differentiate between material / material node_tree + export_settings +): + + __target = __gather_target(blender_type_data, blender_id, channel, additional_key, export_settings) + if __target.path is not None: + sampler = __gather_sampler( + blender_type_data, + blender_id, + channel, + action_name, + node_channel_is_animated, + node_channel_interpolation, + additional_key, + export_settings) + + if sampler is None: + # After check, no need to animate this node for this channel + return None + + animation_channel = gltf2_io.AnimationChannel( + extensions=None, + extras=None, + sampler=sampler, + target=__target + ) + + return animation_channel + return None + + +def __gather_target( + blender_type_data: str, + blender_id: str, + channel: str, + additional_key: str, # Used to differentiate between material / material node_tree + export_settings +) -> gltf2_io.AnimationChannelTarget: + + return gather_data_sampled_channel_target( + blender_type_data, blender_id, channel, additional_key, export_settings) + + +def __gather_sampler( + blender_type_data, + blender_id, + channel, + action_name, + node_channel_is_animated, + node_channel_interpolation, + additional_key, + export_settings): + return gather_data_sampled_animation_sampler( + blender_type_data, + blender_id, + channel, + action_name, + node_channel_is_animated, + node_channel_interpolation, + additional_key, + export_settings + ) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_keyframes.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_keyframes.py new file mode 100644 index 00000000000..22bef0f4196 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_keyframes.py @@ -0,0 +1,142 @@ +# SPDX-FileCopyrightText: 2018-2023 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import typing +import math +import numpy as np +import bpy +from .....com.gltf2_blender_conversion import PBR_WATTS_TO_LUMENS +from ....gltf2_blender_gather_cache import cached +from ...gltf2_blender_gather_keyframes import Keyframe +from ..gltf2_blender_gather_animation_sampling_cache import get_cache_data + + +@cached +def gather_data_sampled_keyframes( + blender_type_data: str, + blender_id, + channel, + action_name, + node_channel_is_animated: bool, + additional_key, # Used to differentiate between material / material node_tree + export_settings): + + start_frame = export_settings['ranges'][blender_id][action_name]['start'] + end_frame = export_settings['ranges'][blender_id][action_name]['end'] + + keyframes = [] + + frame = start_frame + step = export_settings['gltf_frame_step'] + while frame <= end_frame: + + # Retrieve length of data to export + if export_settings['KHR_animation_pointer'][blender_type_data][blender_id]['paths'][channel]['path'] != "/materials/XXX/pbrMetallicRoughness/baseColorFactor": + length = export_settings['KHR_animation_pointer'][blender_type_data][blender_id]['paths'][channel]['length'] + else: + length = 4 + + key = Keyframe([None] * length, frame, 'value') + + value = get_cache_data( + 'value', + blender_id, + channel, + action_name, + frame, + step, + export_settings + ) + + # Convert data if needed + if blender_type_data == "materials": + if "attenuationDistance" in export_settings['KHR_animation_pointer']['materials'][blender_id]['paths'][channel]['path']: + value = 1.0 / value if value != 0.0 else 1e13 + + if export_settings['KHR_animation_pointer']['materials'][blender_id]['paths'][channel]['path'] == "/materials/XXX/occlusionTexture/strength": + if export_settings['KHR_animation_pointer']['materials'][blender_id]['paths'][channel]['reverse'] is True: + value = 1.0 - value + + if export_settings['KHR_animation_pointer']['materials'][blender_id]['paths'][channel]['path'] == "/materials/XXX/emissiveFactor": + # We need to retrieve the strength of the emissive too + strength = get_cache_data( + 'value', + blender_id, + export_settings['KHR_animation_pointer']['materials'][blender_id]['paths'][channel]['strength_channel'], + action_name, + frame, + step, + export_settings + ) + + value = [f * strength for f in value] + if any([i > 1.0 for i in value or []]): + # Clamp to range [0,1] + # Official glTF clamp to range [0,1] + # If we are outside, we need to use extension KHR_materials_emissive_strength + strength = max(value) + value = [f / strength for f in value] + else: + pass # Don't need to do anything, as we are in the range [0,1] + + if export_settings['KHR_animation_pointer']['materials'][blender_id]['paths'][channel][ + 'path'] == "/materials/XXX/extensions/KHR_materials_emissive_strength/emissiveStrength": + # We need to retrieve the emissive factor + factor = get_cache_data( + 'value', + blender_id, + export_settings['KHR_animation_pointer']['materials'][blender_id]['paths'][channel]['factor_channel'], + action_name, + frame, + step, + export_settings + ) + + factor = [f * value for f in factor] + if any([i > 1.0 for i in factor or []]): + # Clamp to range [0,1] + # Official glTF clamp to range [0,1] + # If we are outside, we need to use extension KHR_materials_emissive_strength + value = max(factor) + else: + value = 1.0 # no need to have an emissiveStrength extension for this frame + + # For specularFactor and specularColorFactor, we already multiplied it by 2.0, and clamp it to 1.0 (and adapt specularColor accordingly) + # This is done in cache retrieval + + elif blender_type_data == "lights": + if export_settings['KHR_animation_pointer']['lights'][blender_id]['paths'][channel]['path'] == "/extensions/KHR_lights_punctual/lights/XXX/intensity": + # Lights need conversion in case quadratic_falloff_node is used, for intensity + if 'quadratic_falloff_node' in channel: + value /= (math.pi * 4.0) + + if export_settings['gltf_lighting_mode'] == 'SPEC' \ + and export_settings['KHR_animation_pointer']['lights'][blender_id]['paths'][channel]['lamp_type'] != "SUN": + value *= PBR_WATTS_TO_LUMENS + + if export_settings['KHR_animation_pointer']['lights'][blender_id]['paths'][channel]['path'] == "/extensions/KHR_lights_punctual/lights/XXX/spot.outerConeAngle": + value *= 0.5 + + # innerConeAngle is handled in cache retrieval, as it requires spot_size and spot_blend + + # Camera yvof is calculated in cache retrieval, as it requires sensor_fit, angle, aspect ratio + + key.value_total = value + keyframes.append(key) + frame += step + + if len(keyframes) == 0: + # For example, option CROP negative frames, but all are negatives + return None + + cst = fcurve_is_constant(keyframes) + return None if cst is True else keyframes + + +def fcurve_is_constant(keyframes): + if type(keyframes[0].value).__name__ == "float": + return all([j < 0.0001 for j in np.ptp([[k.value] for k in keyframes], axis=0)]) + else: + return all([j < 0.0001 for j in np.ptp([[k.value[i] + for i in range(len(keyframes[0].value))] for k in keyframes], axis=0)]) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_sampler.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_sampler.py new file mode 100644 index 00000000000..b0e6bf5c983 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_sampler.py @@ -0,0 +1,136 @@ +# SPDX-FileCopyrightText: 2018-2023 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import typing +from ......io.com import gltf2_io +from ......io.exp import gltf2_io_binary_data +from ......io.com import gltf2_io_constants +from ....gltf2_blender_gather_cache import cached +from ....gltf2_blender_gather_accessors import gather_accessor +from .gltf2_blender_gather_data_keyframes import gather_data_sampled_keyframes + + +@cached +def gather_data_sampled_animation_sampler( + blender_type_data: str, + blender_id: str, + channel: str, + action_name: str, + node_channel_is_animated: bool, + node_channel_interpolation: str, + additional_key: str, # Used to differentiate between material / material node_tree + export_settings +): + + keyframes = __gather_keyframes( + blender_type_data, + blender_id, + channel, + action_name, + node_channel_is_animated, + additional_key, + export_settings) + + if keyframes is None: + # After check, no need to animate this node for this channel + return None + + # Now we are raw input/output, we need to convert to glTF data + input, output = __convert_keyframes(blender_type_data, blender_id, channel, keyframes, action_name, export_settings) + + sampler = gltf2_io.AnimationSampler(extensions=None, extras=None, input=input, interpolation=__gather_interpolation( + blender_type_data, node_channel_is_animated, node_channel_interpolation, keyframes, export_settings), output=output) + + return sampler + + +def __gather_keyframes( + blender_type_data, + blender_id, + channel, + action_name, + node_channel_is_animated, + additional_key, # Used to differentiate between material / material node_tree + export_settings): + + keyframes = gather_data_sampled_keyframes( + blender_type_data, + blender_id, + channel, + action_name, + node_channel_is_animated, + additional_key, + export_settings + ) + + if keyframes is None: + # After check, no need to animation this node + return None + + return keyframes + + +def __convert_keyframes(blender_type_data, blender_id, channel, keyframes, action_name, export_settings): + + # Sliding can come from: + # - option SLIDE for negative frames + # - option to start animation at frame 0 for looping + if blender_id in export_settings['slide'].keys() and action_name in export_settings['slide'][blender_id].keys(): + for k in keyframes: + k.frame += -export_settings['slide'][blender_id][action_name] + k.seconds = k.frame / bpy.context.scene.render.fps + + times = [k.seconds for k in keyframes] + input = gather_accessor( + gltf2_io_binary_data.BinaryData.from_list(times, gltf2_io_constants.ComponentType.Float), + gltf2_io_constants.ComponentType.Float, + len(times), + tuple([max(times)]), + tuple([min(times)]), + gltf2_io_constants.DataType.Scalar, + export_settings) + + values = [] + for keyframe in keyframes: + keyframe_value = __convert_to_gltf(keyframe.value) + values += keyframe_value + + # store the keyframe data in a binary buffer + component_type = gltf2_io_constants.ComponentType.Float + if type(keyframes[0].value).__name__ != "float": + data_type = gltf2_io_constants.DataType.vec_type_from_num(len(keyframes[0].value)) + else: + data_type = gltf2_io_constants.DataType.vec_type_from_num(1) + + output = gltf2_io.Accessor( + buffer_view=gltf2_io_binary_data.BinaryData.from_list(values, component_type), + byte_offset=None, + component_type=component_type, + count=len(values) // gltf2_io_constants.DataType.num_elements(data_type), + extensions=None, + extras=None, + max=None, + min=None, + name=None, + normalized=None, + sparse=None, + type=data_type + ) + + return input, output + + +def __gather_interpolation( + blender_type_data, + node_channel_is_animated, + node_channel_interpolation, + keyframes, + export_settings): + # TODOPointer + return 'LINEAR' + + +def __convert_to_gltf(value): + return value if type(value).__name__ != "float" else [value] diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/gltf2_blender_gather_animation_sampling_cache.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/gltf2_blender_gather_animation_sampling_cache.py new file mode 100644 index 00000000000..db8c4099a44 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/gltf2_blender_gather_animation_sampling_cache.py @@ -0,0 +1,662 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import mathutils +import bpy +import typing +from .....blender.com.gltf2_blender_data_path import get_sk_exported +from .....blender.com.gltf2_blender_conversion import inverted_trs_mapping_node, texture_transform_blender_to_gltf, yvof_blender_to_gltf +from ...gltf2_blender_gather_cache import datacache +from ...gltf2_blender_gather_tree import VExportNode +from ..gltf2_blender_gather_drivers import get_sk_drivers + +# Warning : If you change some parameter here, need to be changed in cache system + + +@datacache +def get_cache_data(path: str, + blender_obj_uuid: str, + bone: typing.Optional[str], + action_name: str, + current_frame: int, + step: int, + export_settings, + only_gather_provided=False + ): + + data = {} + + min_, max_ = get_range(blender_obj_uuid, action_name, export_settings) + + if only_gather_provided: + # If object is not in vtree, this is a material or light for pointers + obj_uuids = [blender_obj_uuid] if blender_obj_uuid in export_settings['vtree'].nodes.keys() else [] + else: + obj_uuids = [uid for (uid, n) in export_settings['vtree'].nodes.items() + if n.blender_type not in [VExportNode.BONE]] + + # For TRACK mode, we reset cache after each track export, so we don't need to keep others objects + if export_settings['gltf_animation_mode'] in "NLA_TRACKS": + # If object is not in vtree, this is a material or light for pointers + obj_uuids = [blender_obj_uuid] if blender_obj_uuid in export_settings['vtree'].nodes.keys() else [] + + # If there is only 1 object to cache, we can disable viewport for other objects (for performance) + # This can be on these cases: + # - TRACK mode + # - Only one object to cache (but here, no really useful for performance) + # - Action mode, where some object have multiple actions + # - For this case, on first call, we will cache active action for all objects + # - On next calls, we will cache only the action of current object, so we can disable viewport for others + # For armature : We already checked that we can disable viewport (in case + # of drivers, this is currently not possible) + + need_to_enable_again = False + if export_settings['gltf_optimize_armature_disable_viewport'] is True and len(obj_uuids) == 1: + need_to_enable_again = True + # Before baking, disabling from viewport all meshes + for obj in [n.blender_object for n in export_settings['vtree'].nodes.values() if n.blender_type in + [VExportNode.OBJECT, VExportNode.ARMATURE, VExportNode.COLLECTION]]: + if obj is None: + continue + obj.hide_viewport = True + export_settings['vtree'].nodes[obj_uuids[0]].blender_object.hide_viewport = False + + depsgraph = bpy.context.evaluated_depsgraph_get() + + frame = min_ + while frame <= max_: + bpy.context.scene.frame_set(int(frame)) + current_instance = {} # For GN instances, we are going to track instances by their order in instance iterator + + object_caching(data, obj_uuids, current_instance, action_name, frame, depsgraph, export_settings) + + # KHR_animation_pointer caching for materials, lights, cameras + if export_settings['gltf_export_anim_pointer'] is True: + material_nodetree_caching(data, action_name, frame, export_settings) + material_caching(data, action_name, frame, export_settings) + light_nodetree_caching(data, action_name, frame, export_settings) + camera_caching(data, action_name, frame, export_settings) + + frame += step + + # And now, restoring meshes in viewport + for node, obj in [(n, n.blender_object) for n in export_settings['vtree'].nodes.values() if n.blender_type in + [VExportNode.OBJECT, VExportNode.ARMATURE, VExportNode.COLLECTION]]: + obj.hide_viewport = node.default_hide_viewport + + return data + +# For perf, we may be more precise, and get a list of ranges to be exported that include all needed frames + + +def get_range(obj_uuid, key, export_settings): + if export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]: + return export_settings['ranges'][obj_uuid][key]['start'], export_settings['ranges'][obj_uuid][key]['end'] + else: + min_ = None + max_ = None + for obj in export_settings['ranges'].keys(): + for anim in export_settings['ranges'][obj].keys(): + if min_ is None or min_ > export_settings['ranges'][obj][anim]['start']: + min_ = export_settings['ranges'][obj][anim]['start'] + if max_ is None or max_ < export_settings['ranges'][obj][anim]['end']: + max_ = export_settings['ranges'][obj][anim]['end'] + return min_, max_ + + +def initialize_data_dict(data, key1, key2, key3, key4): + # No check on key1, this is already done before calling this function + if key2 not in data[key1].keys(): + data[key1][key2] = {} + data[key1][key2][key3] = {} + data[key1][key2][key3][key4] = {} + + +def material_caching(data, action_name, frame, export_settings): + for mat in export_settings['KHR_animation_pointer']['materials'].keys(): + if len(export_settings['KHR_animation_pointer']['materials'][mat]['paths']) == 0: + continue + + blender_material = [m for m in bpy.data.materials if id(m) == mat] + if len(blender_material) == 0: + # This is not a material from Blender (coming from Geometry Node for example, so no animation on it) + continue + else: + blender_material = blender_material[0] + if mat not in data.keys(): + data[mat] = {} + + if blender_material and blender_material.animation_data and blender_material.animation_data.action \ + and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]: + key1, key2, key3 = mat, blender_material.animation_data.action.name, "value" + elif export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]: + key1, key2, key3 = mat, action_name, "value" + else: + # case of baking materials (scene export). + # There is no animation, so use id as key + key1, key2, key3 = mat, mat, "value" + + if key2 not in data[key1].keys(): + data[key1][key2] = {} + data[key1][key2][key3] = {} + + for path in export_settings['KHR_animation_pointer']['materials'][mat]['paths'].keys(): + data[key1][key2][key3][path] = {} + + for path in export_settings['KHR_animation_pointer']['materials'][mat]['paths'].keys(): + + if path.startswith("node_tree"): + continue + + val = blender_material.path_resolve(path) + if type(val).__name__ == "float": + data[key1][key2][key3][path][frame] = val + else: + data[key1][key2][key3][path][frame] = list(val) + + +def material_nodetree_caching(data, action_name, frame, export_settings): + # After caching objects, caching materials, for KHR_animation_pointer + for mat in export_settings['KHR_animation_pointer']['materials'].keys(): + if len(export_settings['KHR_animation_pointer']['materials'][mat]['paths']) == 0: + continue + + blender_material = [m for m in bpy.data.materials if id(m) == mat] + if len(blender_material) == 0: + # This is not a material from Blender (coming from Geometry Node for example, so no animation on it) + continue + else: + blender_material = blender_material[0] + if mat not in data.keys(): + data[mat] = {} + + if blender_material.node_tree and blender_material.node_tree.animation_data and blender_material.node_tree.animation_data.action \ + and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]: + + key1, key2, key3 = mat, blender_material.node_tree.animation_data.action.name, "value" + elif export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]: + key1, key2, key3 = mat, action_name, "value" + else: + # case of baking materials (scene export). + # There is no animation, so use id as key + + key1, key2, key3 = mat, mat, "value" + + if key2 not in data[key1].keys(): + data[key1][key2] = {} + data[key1][key2][key3] = {} + for path in export_settings['KHR_animation_pointer']['materials'][mat]['paths'].keys(): + data[key1][key2][key3][path] = {} + + baseColorFactor_alpha_merged_already_done = False + for path in export_settings['KHR_animation_pointer']['materials'][mat]['paths'].keys(): + + if not path.startswith("node_tree"): + continue + + # Manage special case where we merge baseColorFactor and alpha + if export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'] == "/materials/XXX/pbrMetallicRoughness/baseColorFactor" \ + and export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['length'] == 3: + if baseColorFactor_alpha_merged_already_done is True: + continue + val_color = blender_material.path_resolve(path) + data_color = list(val_color)[:export_settings['KHR_animation_pointer'] + ['materials'][mat]['paths'][path]['length']] + if export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['additional_path'] is not None: + val_alpha = blender_material.path_resolve( + export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['additional_path']) + else: + val_alpha = 1.0 + data[key1][key2][key3][path][frame] = data_color + [val_alpha] + baseColorFactor_alpha_merged_already_done = True + # Manage special case where we merge baseColorFactor and alpha + elif export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'] == "/materials/XXX/pbrMetallicRoughness/baseColorFactor" \ + and export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['length'] == 1: + if baseColorFactor_alpha_merged_already_done is True: + continue + val_alpha = blender_material.path_resolve(path) + if export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['additional_path'] is not None: + val_color = blender_material.path_resolve( + export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['additional_path']) + data_color = list(val_color)[:export_settings['KHR_animation_pointer'] + ['materials'][mat]['paths']['additional_path']['length']] + else: + data_color = [1.0, 1.0, 1.0] + data[key1][key2][key3][path][frame] = data_color + [val_alpha] + baseColorFactor_alpha_merged_already_done = True + + # Manage special case for KHR_texture_transform offset, that needs + # rotation and scale too (and not only translation) + elif "KHR_texture_transform" in export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'] \ + and export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'].endswith("offset"): + + val_offset = blender_material.path_resolve(path) + rotation_path = [ + i for i in export_settings['KHR_animation_pointer']['materials'][mat]['paths'].keys() if export_settings['KHR_animation_pointer']['materials'][mat]['paths'][i]['path'].rsplit( + "/", + 1)[0] == export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'].rsplit( + "/", + 1)[0] and export_settings['KHR_animation_pointer']['materials'][mat]['paths'][i]['path'].rsplit( + "/", + 1)[1] == "rotation"][0] + val_rotation = blender_material.path_resolve(rotation_path) + scale_path = [ + i for i in export_settings['KHR_animation_pointer']['materials'][mat]['paths'].keys() if export_settings['KHR_animation_pointer']['materials'][mat]['paths'][i]['path'].rsplit( + "/", + 1)[0] == export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'].rsplit( + "/", + 1)[0] and export_settings['KHR_animation_pointer']['materials'][mat]['paths'][i]['path'].rsplit( + "/", + 1)[1] == "scale"][0] + val_scale = blender_material.path_resolve(scale_path) + + mapping_transform = {} + mapping_transform["offset"] = [val_offset[0], val_offset[1]] + mapping_transform["rotation"] = val_rotation + mapping_transform["scale"] = [val_scale[0], val_scale[1]] + + if export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['vector_type'] == "TEXTURE": + mapping_transform = inverted_trs_mapping_node(mapping_transform) + if mapping_transform is None: + # Can not be converted to TRS, so ... keeping default values + export_settings['log'].warning( + "Can not convert texture transform to TRS. Keeping default values.") + mapping_transform = {} + mapping_transform["offset"] = [0.0, 0.0] + mapping_transform["rotation"] = 0.0 + mapping_transform["scale"] = [1.0, 1.0] + elif export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['vector_type'] == "VECTOR": + # Vectors don't get translated + mapping_transform["offset"] = [0, 0] + + texture_transform = texture_transform_blender_to_gltf(mapping_transform) + + data[key1][key2][key3][path][frame] = texture_transform['offset'] + data[key1][key2][key3][rotation_path][frame] = texture_transform['rotation'] + data[key1][key2][key3][scale_path][frame] = texture_transform['scale'] + if export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['vector_type'] != "VECTOR": + # Already handled by offset + continue + else: + val = blender_material.path_resolve(path) + mapping_transform = {} + mapping_transform["offset"] = [0, 0] # Placeholder, not needed + mapping_transform["rotation"] = val + mapping_transform["scale"] = [1, 1] # Placeholder, not needed + texture_transform = texture_transform_blender_to_gltf(mapping_transform) + data[key1][key2][key3][path][frame] = texture_transform['rotation'] + elif "KHR_texture_transform" in export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'] \ + and export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'].endswith("scale"): + if export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['vector_type'] != "VECTOR": + # Already handled by offset + continue + else: + val = blender_material.path_resolve(path) + mapping_transform = {} + mapping_transform["offset"] = [0, 0] # Placeholder, not needed + mapping_transform["rotation"] = 0.0 # Placeholder, not needed + mapping_transform["scale"] = [val[0], val[1]] + texture_transform = texture_transform_blender_to_gltf(mapping_transform) + data[key1][key2][key3][path][frame] = texture_transform['rotation'] + + # Manage special cases for specularFactor & specularColorFactor + elif export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'] == "/materials/XXX/extensions/KHR_materials_specular/specularFactor": + val = blender_material.path_resolve(path) + val = val * 2.0 + if val > 1.0: + fac = val + val = 1.0 + else: + fac = 1.0 + + data[key1][key2][key3][path][frame] = val + + # Retrieve specularColorFactor + colorfactor_path = [ + i for i in export_settings['KHR_animation_pointer']['materials'][mat]['paths'].keys() if export_settings['KHR_animation_pointer']['materials'][mat]['paths'][i]['path'].rsplit( + "/", + 1)[0] == export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'].rsplit( + "/", + 1)[0] and export_settings['KHR_animation_pointer']['materials'][mat]['paths'][i]['path'].rsplit( + "/", + 1)[1] == "specularColorFactor"][0] + val_colorfactor = blender_material.path_resolve(colorfactor_path) + if fac > 1.0: + val_colorfactor = [i * fac for i in val_colorfactor] + data[key1][key2][key3][colorfactor_path][frame] = val_colorfactor + elif export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'] == "/materials/XXX/extensions/KHR_materials_specular/specularColorFactor": + # Already handled by specularFactor + continue + + # Classic case + else: + val = blender_material.path_resolve(path) + if type(val).__name__ == "float": + data[key1][key2][key3][path][frame] = val + else: + data[key1][key2][key3][path][frame] = list(val)[ + :export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['length']] + + +def armature_caching(data, obj_uuid, blender_obj, action_name, frame, export_settings): + bones = export_settings['vtree'].get_all_bones(obj_uuid) + if blender_obj.animation_data and blender_obj.animation_data.action \ + and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS", "BROADCAST"]: + key1, key2, key3 = obj_uuid, blender_obj.animation_data.action.name, "bone" + elif blender_obj.animation_data \ + and export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]: + key1, key2, key3 = obj_uuid, action_name, "bone" + else: + key1, key2, key3 = obj_uuid, obj_uuid, "bone" + + if key3 not in data[key1][key2].keys(): + data[key1][key2][key3] = {} + + for bone_uuid in [bone for bone in bones if export_settings['vtree'].nodes[bone].leaf_reference is None]: + blender_bone = export_settings['vtree'].nodes[bone_uuid].blender_bone + + if export_settings['vtree'].nodes[bone_uuid].parent_uuid is not None and export_settings['vtree'].nodes[ + export_settings['vtree'].nodes[bone_uuid].parent_uuid].blender_type == VExportNode.BONE: + blender_bone_parent = export_settings['vtree'].nodes[export_settings['vtree'] + .nodes[bone_uuid].parent_uuid].blender_bone + rest_mat = blender_bone_parent.bone.matrix_local.inverted_safe() @ blender_bone.bone.matrix_local + matrix = rest_mat.inverted_safe() @ blender_bone_parent.matrix.inverted_safe() @ blender_bone.matrix + else: + if blender_bone.parent is None: + matrix = blender_bone.bone.matrix_local.inverted_safe() @ blender_bone.matrix + else: + # Bone has a parent, but in export, after filter, is at root of armature + matrix = blender_bone.matrix.copy() + + # Because there is no armature object, we need to apply the TRS of armature to the root bone + if export_settings['gltf_armature_object_remove'] is True: + matrix = matrix @ blender_obj.matrix_world + + if blender_bone.name not in data[key1][key2][key3].keys(): + data[key1][key2][key3][blender_bone.name] = {} + data[key1][key2][key3][blender_bone.name][frame] = matrix + + +def object_caching(data, obj_uuids, current_instance, action_name, frame, depsgraph, export_settings): + for obj_uuid in obj_uuids: + blender_obj = export_settings['vtree'].nodes[obj_uuid].blender_object + if blender_obj is None: # GN instance + if export_settings['vtree'].nodes[obj_uuid].parent_uuid not in current_instance.keys(): + current_instance[export_settings['vtree'].nodes[obj_uuid].parent_uuid] = 0 + + # TODO: we may want to avoid looping on all objects, but an accurate filter must be found + + # calculate local matrix + if export_settings['vtree'].nodes[obj_uuid].parent_uuid is None: + parent_mat = mathutils.Matrix.Identity(4).freeze() + else: + if export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_type not in [ + VExportNode.BONE]: + if export_settings['vtree'].nodes[export_settings['vtree'] + .nodes[obj_uuid].parent_uuid].blender_type != VExportNode.COLLECTION: + parent_mat = export_settings['vtree'].nodes[export_settings['vtree'] + .nodes[obj_uuid].parent_uuid].blender_object.matrix_world + else: + parent_mat = export_settings['vtree'].nodes[export_settings['vtree'] + .nodes[obj_uuid].parent_uuid].matrix_world + else: + # Object animated is parented to a bone + blender_bone = export_settings['vtree'].nodes[export_settings['vtree'] + .nodes[obj_uuid].parent_bone_uuid].blender_bone + armature_object = export_settings['vtree'].nodes[export_settings['vtree'] + .nodes[export_settings['vtree'].nodes[obj_uuid].parent_bone_uuid].armature].blender_object + axis_basis_change = mathutils.Matrix( + ((1.0, 0.0, 0.0, 0.0), (0.0, 0.0, 1.0, 0.0), (0.0, -1.0, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0))) + + parent_mat = armature_object.matrix_world @ blender_bone.matrix @ axis_basis_change + + # For object inside collection (at root), matrix world is already expressed regarding collection parent + if export_settings['vtree'].nodes[obj_uuid].parent_uuid is not None and export_settings['vtree'].nodes[ + export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_type == VExportNode.INST_COLLECTION: + parent_mat = mathutils.Matrix.Identity(4).freeze() + + if blender_obj: + if export_settings['vtree'].nodes[obj_uuid].blender_type != VExportNode.COLLECTION: + mat = parent_mat.inverted_safe() @ blender_obj.matrix_world + else: + mat = parent_mat.inverted_safe() + else: + eval = export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_object.evaluated_get( + depsgraph) + cpt_inst = 0 + for inst in depsgraph.object_instances: # use only as iterator + if inst.parent == eval: + if current_instance[export_settings['vtree'].nodes[obj_uuid].parent_uuid] == cpt_inst: + mat = inst.matrix_world.copy() + current_instance[export_settings['vtree'].nodes[obj_uuid].parent_uuid] += 1 + break + cpt_inst += 1 + + if obj_uuid not in data.keys(): + data[obj_uuid] = {} + + if export_settings['vtree'].nodes[obj_uuid].blender_type != VExportNode.COLLECTION: + if blender_obj and blender_obj.animation_data and blender_obj.animation_data.action \ + and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS", "BROADCAST"]: + key1, key2, key3, key4 = obj_uuid, blender_obj.animation_data.action.name, "matrix", None + elif export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]: + key1, key2, key3, key4 = obj_uuid, action_name, "matrix", None + else: + # case of baking object. + # There is no animation, so use uuid of object as key + key1, key2, key3, key4 = obj_uuid, obj_uuid, "matrix", None + else: + key1, key2, key3, key4 = obj_uuid, obj_uuid, "matrix", None + initialize_data_dict(data, key1, key2, key3, key4) + data[key1][key2][key3][key4][frame] = mat + + # Store data for all bones, if object is an armature + + if blender_obj and blender_obj.type == "ARMATURE": + armature_caching(data, obj_uuid, blender_obj, action_name, frame, export_settings) + + elif blender_obj is None: # GN instances + # case of baking object, for GN instances + # There is no animation, so use uuid of object as key + key1, key2, key3, key4 = obj_uuid, obj_uuid, "matrix", None + initialize_data_dict(data, key1, key2, key3, key4) + data[key1][key2][key3][key4][frame] = mat + + # Check SK animation here, as we are caching data + # This will avoid to have to do it again when exporting SK animation + cache_sk = False + if export_settings['gltf_morph_anim'] and blender_obj and blender_obj.type == "MESH" \ + and blender_obj.data is not None \ + and blender_obj.data.shape_keys is not None \ + and blender_obj.data.shape_keys.animation_data is not None \ + and blender_obj.data.shape_keys.animation_data.action is not None \ + and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS", "BROADCAST"]: + + key1, key2, key3, key4 = obj_uuid, blender_obj.data.shape_keys.animation_data.action.name, "sk", None + cache_sk = True + + elif export_settings['gltf_morph_anim'] and blender_obj and blender_obj.type == "MESH" \ + and blender_obj.data is not None \ + and blender_obj.data.shape_keys is not None \ + and blender_obj.data.shape_keys.animation_data is not None \ + and export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]: + + key1, key2, key3, key4 = obj_uuid, action_name, "sk", None + cache_sk = True + + elif export_settings['gltf_morph_anim'] and blender_obj and blender_obj.type == "MESH" \ + and blender_obj.data is not None \ + and blender_obj.data.shape_keys is not None: + key1, key2, key3, key4 = obj_uuid, obj_uuid, "sk", None + cache_sk = True + + if cache_sk: + initialize_data_dict(data, key1, key2, key3, key4) + if key3 not in data[key1][key2].keys(): + data[key1][key2][key3] = {} + data[key1][key2][key3][key4] = {} + data[key1][key2][key3][key4][frame] = [ + k.value for k in get_sk_exported( + blender_obj.data.shape_keys.key_blocks)] + cache_sk = False + + # caching driver sk meshes + # This will avoid to have to do it again when exporting SK animation + if blender_obj and blender_obj.type == "ARMATURE": + sk_drivers = get_sk_drivers(obj_uuid, export_settings) + for dr_obj in sk_drivers: + cache_sk = False + driver_object = export_settings['vtree'].nodes[dr_obj].blender_object + if dr_obj not in data.keys(): + data[dr_obj] = {} + if blender_obj.animation_data and blender_obj.animation_data.action \ + and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS", "BROADCAST"]: + key1, key2, key3, key4 = dr_obj, obj_uuid + "_" + blender_obj.animation_data.action.name, "sk", None + cache_sk = True + elif blender_obj.animation_data \ + and export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]: + key1, key2, key3, key4 = dr_obj, obj_uuid + "_" + action_name, "sk", None + cache_sk = True + else: + key1, key2, key3, key4 = dr_obj, obj_uuid + "_" + obj_uuid, "sk", None + cache_sk = True + + if cache_sk: + initialize_data_dict(data, key1, key2, key3, key4) + data[key1][key2][key3][key4][frame] = [ + k.value for k in get_sk_exported( + driver_object.data.shape_keys.key_blocks)] + cache_sk = False + + +def light_nodetree_caching(data, action_name, frame, export_settings): + # After caching materials, caching lights, for KHR_animation_pointer + for light in export_settings['KHR_animation_pointer']['lights'].keys(): + if len(export_settings['KHR_animation_pointer']['lights'][light]['paths']) == 0: + continue + + blender_light = [m for m in bpy.data.lights if id(m) == light][0] + if light not in data.keys(): + data[light] = {} + + if blender_light.node_tree and blender_light.node_tree.animation_data and blender_light.node_tree.animation_data.action \ + and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]: + key1, key2, key3 = light, blender_light.node_tree.animation_data.action.name, "value" + elif export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]: + key1, key2, key3 = light, action_name, "value" + else: + # case of baking materials (scene export). + # There is no animation, so use id as key + key1, key2, key3 = light, light, "value" + + if key2 not in data[key1].keys(): + data[key1][key2] = {} + data[key1][key2][key3] = {} + for path in export_settings['KHR_animation_pointer']['lights'][light]['paths'].keys(): + data[key1][key2][key3][path] = {} + + for path in export_settings['KHR_animation_pointer']['lights'][light]['paths'].keys(): + val = blender_light.path_resolve(path) + if type(val).__name__ == "float": + data[key1][key2][key3][path][frame] = val + else: + data[key1][key2][key3][path][frame] = list(val) + + +def light_caching(data, action_name, frame, export_settings): + # After caching materials, caching lights, for KHR_animation_pointer + for light in export_settings['KHR_animation_pointer']['lights'].keys(): + if len(export_settings['KHR_animation_pointer']['lights'][light]['paths']) == 0: + continue + + blender_light = [m for m in bpy.data.lights if id(m) == light][0] + if light not in data.keys(): + data[light] = {} + + if blender_light and blender_light.animation_data and blender_light.animation_data.action \ + and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]: + key1, key2, key3 = light, blender_light.animation_data.action.name, "value" + elif export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]: + key1, key2, key3 = light, action_name, "value" + else: + # case of baking materials (scene export). + # There is no animation, so use id as key + key1, key2, key3 = light, light, "value" + + if key2 not in data[key1].keys(): + data[key1][key2] = {} + data[key1][key2][key3] = {} + for path in export_settings['KHR_animation_pointer']['lights'][light]['paths'].keys(): + data[key1][key2][key3][path] = {} + + for path in export_settings['KHR_animation_pointer']['lights'][light]['paths'].keys(): + # Manage special case for innerConeAngle because it requires spot_size & spot_blend + if export_settings['KHR_animation_pointer']['lights'][light]['paths'][path]['path'] == "/extensions/KHR_lights_punctual/lights/XXX/spot.innerConeAngle": + val = blender_light.path_resolve(path) + val_size = blender_light.path_resolve( + export_settings['KHR_animation_pointer']['lights'][light]['paths'][path]['additional_path']) + data[key1][key2][key3][path][frame] = (val_size * 0.5) - ((val_size * 0.5) * val) + else: + # classic case + val = blender_light.path_resolve(path) + if type(val).__name__ == "float": + data[key1][key2][key3][path][frame] = val + else: + # When color is coming from a node, it is 4 values (RGBA), so need to convert it to 3 values (RGB) + if export_settings['KHR_animation_pointer']['lights'][light]['paths'][path]['length'] == 3 and len( + val) == 4: + val = val[:3] + data[key1][key2][key3][path][frame] = list(val) + + +def camera_caching(data, action_name, frame, export_settings): + # After caching lights, caching cameras, for KHR_animation_pointer + for cam in export_settings['KHR_animation_pointer']['cameras'].keys(): + if len(export_settings['KHR_animation_pointer']['cameras'][cam]['paths']) == 0: + continue + + blender_camera = [m for m in bpy.data.cameras if id(m) == cam][0] + if cam not in data.keys(): + data[cam] = {} + + if blender_camera and blender_camera.animation_data and blender_camera.animation_data.action \ + and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]: + key1, key2, key3 = cam, blender_camera.animation_data.action.name, "value" + elif export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]: + key1, key2, key3 = cam, action_name, "value" + else: + # case of baking materials (scene export). + # There is no animation, so use id as key + key1, key2, key3 = cam, cam, "value" + + if key2 not in data[key1].keys(): + data[key1][key2] = {} + data[key1][key2][key3] = {} + for path in export_settings['KHR_animation_pointer']['cameras'][cam]['paths'].keys(): + data[key1][key2][key3][path] = {} + + for path in export_settings['KHR_animation_pointer']['cameras'][cam]['paths'].keys(): + _render = bpy.context.scene.render + width = _render.pixel_aspect_x * _render.resolution_x + height = _render.pixel_aspect_y * _render.resolution_y + del _render + # Manage special case for yvof because it requires sensor_fit, aspect ratio, angle + if export_settings['KHR_animation_pointer']['cameras'][cam]['paths'][path]['path'] == "/cameras/XXX/perspective/yfov": + val = yvof_blender_to_gltf(blender_camera.angle, width, height, blender_camera.sensor_fit) + data[key1][key2][key3][path][frame] = val + # Manage special case for xmag because it requires ortho_scale & scene data + elif export_settings['KHR_animation_pointer']['cameras'][cam]['paths'][path]['path'] == "/cameras/XXX/orthographic/xmag": + val = blender_camera.ortho_scale + data[key1][key2][key3][path][frame] = val * (width / max(width, height)) / 2.0 + # Manage special case for ymag because it requires ortho_scale & scene data + elif export_settings['KHR_animation_pointer']['cameras'][cam]['paths'][path]['path'] == "/cameras/XXX/orthographic/ymag": + val = blender_camera.ortho_scale + data[key1][key2][key3][path][frame] = val * (height / max(width, height)) / 2.0 + else: + # classic case + val = blender_camera.path_resolve(path) + if type(val).__name__ == "float": + data[key1][key2][key3][path][frame] = val + else: + data[key1][key2][key3][path][frame] = list(val) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_action_sampled.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_action_sampled.py new file mode 100644 index 00000000000..1a8e6821aa6 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_action_sampled.py @@ -0,0 +1,78 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import typing +from ......io.com import gltf2_io +from ......io.exp.gltf2_io_user_extensions import export_user_extensions +from .....com.gltf2_blender_extras import generate_extras +from ...fcurves.gltf2_blender_gather_fcurves_sampler import gather_animation_fcurves_sampler +from .gltf2_blender_gather_object_channels import gather_object_sampled_channels + + +def gather_action_object_sampled(object_uuid: str, + blender_action: typing.Optional[bpy.types.Action], + cache_key: str, + export_settings): + + extra_samplers = [] + + # If no animation in file, no need to bake + if len(bpy.data.actions) == 0: + return None, extra_samplers + + channels, extra_channels = __gather_channels( + object_uuid, blender_action.name if blender_action else cache_key, export_settings) + animation = gltf2_io.Animation( + channels=channels, + extensions=None, + extras=__gather_extras(blender_action, export_settings), + name=__gather_name(object_uuid, blender_action, cache_key, export_settings), + samplers=[] + ) + + if export_settings['gltf_export_extra_animations']: + for chan in [chan for chan in extra_channels.values() if len(chan['properties']) != 0]: + for channel_group_name, channel_group in chan['properties'].items(): + + # No glTF channel here, as we don't have any target + # Trying to retrieve sampler directly + sampler = gather_animation_fcurves_sampler( + object_uuid, tuple(channel_group), None, None, True, export_settings) + if sampler is not None: + extra_samplers.append((channel_group_name, sampler, "OBJECT", None)) + + if not animation.channels: + return None, extra_samplers + + blender_object = export_settings['vtree'].nodes[object_uuid].blender_object + export_user_extensions( + 'animation_action_object_sampled', + export_settings, + animation, + blender_object, + blender_action, + cache_key) + + return animation, extra_samplers + + +def __gather_name(object_uuid: str, blender_action: typing.Optional[bpy.types.Action], cache_key: str, export_settings): + if blender_action: + return blender_action.name + elif cache_key == object_uuid: + return export_settings['vtree'].nodes[object_uuid].blender_object.name + else: + return cache_key + + +def __gather_channels(object_uuid: str, blender_action_name: str, + export_settings) -> typing.List[gltf2_io.AnimationChannel]: + return gather_object_sampled_channels(object_uuid, blender_action_name, export_settings) + + +def __gather_extras(blender_action, export_settings): + if export_settings['gltf_extras']: + return generate_extras(blender_action) if blender_action else None + return None diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_channel_target.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_channel_target.py new file mode 100644 index 00000000000..c400f69ba5f --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_channel_target.py @@ -0,0 +1,51 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from ......io.exp.gltf2_io_user_extensions import export_user_extensions +from ......io.com import gltf2_io +from ....gltf2_blender_gather_cache import cached + + +@cached +def gather_object_sampled_channel_target( + obj_uuid: str, + channel: str, + export_settings +) -> gltf2_io.AnimationChannelTarget: + + blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object + + animation_channel_target = gltf2_io.AnimationChannelTarget( + extensions=__gather_extensions(obj_uuid, channel, export_settings), + extras=__gather_extras(obj_uuid, channel, export_settings), + node=__gather_node(obj_uuid, export_settings), + path=__gather_path(channel, export_settings) + ) + + export_user_extensions('gather_animation_object_sampled_channel_target_hook', + export_settings, + blender_object, + channel) + + return animation_channel_target + + +def __gather_extensions(armature_uuid, channel, export_settings): + return None + + +def __gather_extras(armature_uuid, channel, export_settings): + return None + + +def __gather_node(obj_uuid: str, export_settings): + return export_settings['vtree'].nodes[obj_uuid].node + + +def __gather_path(channel, export_settings): + return { + "location": "translation", + "rotation_quaternion": "rotation", + "scale": "scale" + }.get(channel) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_channels.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_channels.py new file mode 100644 index 00000000000..038d0973b84 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_channels.py @@ -0,0 +1,125 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import typing +from ......io.com import gltf2_io +from ......io.exp.gltf2_io_user_extensions import export_user_extensions +from ......blender.com.gltf2_blender_conversion import get_gltf_interpolation +from .....com.gltf2_blender_conversion import get_target, get_channel_from_target +from ....gltf2_blender_gather_cache import cached +from ...fcurves.gltf2_blender_gather_fcurves_channels import get_channel_groups +from .gltf2_blender_gather_object_sampler import gather_object_sampled_animation_sampler +from .gltf2_blender_gather_object_channel_target import gather_object_sampled_channel_target + + +def gather_object_sampled_channels(object_uuid: str, blender_action_name: str, + export_settings) -> typing.List[gltf2_io.AnimationChannel]: + channels = [] + extra_channels = {} + + # Bake situation does not export any extra animation channels, as we bake TRS + weights on Track or scene level, without direct + # Access to fcurve and action data + + list_of_animated_channels = {} + if object_uuid != blender_action_name and blender_action_name in bpy.data.actions: + # Not bake situation + channels_animated, to_be_sampled, extra_channels = get_channel_groups( + object_uuid, bpy.data.actions[blender_action_name], export_settings) + for chan in [chan for chan in channels_animated.values() if chan['bone'] is None]: + for prop in chan['properties'].keys(): + list_of_animated_channels[get_channel_from_target(get_target(prop))] = get_gltf_interpolation( + chan['properties'][prop][0].keyframe_points[0].interpolation) # Could be exported without sampling : keep interpolation + + for _, _, chan_prop, _ in [chan for chan in to_be_sampled if chan[1] == "OBJECT"]: + list_of_animated_channels[chan_prop] = get_gltf_interpolation( + "LINEAR") # if forced to be sampled, keep LINEAR interpolation + + for p in ["location", "rotation_quaternion", "scale"]: + channel = gather_sampled_object_channel( + object_uuid, + p, + blender_action_name, + p in list_of_animated_channels.keys(), + list_of_animated_channels[p] if p in list_of_animated_channels.keys() else get_gltf_interpolation("LINEAR"), + export_settings + ) + if channel is not None: + channels.append(channel) + + blender_object = export_settings['vtree'].nodes[object_uuid].blender_object + export_user_extensions('animation_gather_object_channel', export_settings, blender_object, blender_action_name) + + return channels if len(channels) > 0 else None, extra_channels + + +@cached +def gather_sampled_object_channel( + obj_uuid: str, + channel: str, + action_name: str, + node_channel_is_animated: bool, + node_channel_interpolation: str, + export_settings +): + + __target = __gather_target(obj_uuid, channel, export_settings) + if __target.path is not None: + sampler = __gather_sampler( + obj_uuid, + channel, + action_name, + node_channel_is_animated, + node_channel_interpolation, + export_settings) + + if sampler is None: + # After check, no need to animate this node for this channel + return None + + animation_channel = gltf2_io.AnimationChannel( + extensions=None, + extras=None, + sampler=sampler, + target=__target + ) + + export_user_extensions('gather_animation_channel_hook', + export_settings, + animation_channel, + channel, + export_settings['vtree'].nodes[obj_uuid].blender_object, + node_channel_is_animated + ) + + return animation_channel + return None + + +def __gather_target( + obj_uuid: str, + channel: str, + export_settings +): + + return gather_object_sampled_channel_target( + obj_uuid, channel, export_settings) + + +def __gather_sampler( + obj_uuid: str, + channel: str, + action_name: str, + node_channel_is_animated: bool, + node_channel_interpolation: str, + export_settings): + + return gather_object_sampled_animation_sampler( + obj_uuid, + channel, + action_name, + node_channel_is_animated, + node_channel_interpolation, + export_settings + ) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_keyframes.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_keyframes.py new file mode 100644 index 00000000000..0295127fa42 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_keyframes.py @@ -0,0 +1,86 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +from ....gltf2_blender_gather_tree import VExportNode +from ....gltf2_blender_gather_cache import cached +from ...gltf2_blender_gather_keyframes import Keyframe +from ..gltf2_blender_gather_animation_sampling_cache import get_cache_data + + +@cached +def gather_object_sampled_keyframes( + obj_uuid: str, + channel: str, + action_name: str, + node_channel_is_animated: bool, + export_settings +): + + start_frame = export_settings['ranges'][obj_uuid][action_name]['start'] + end_frame = export_settings['ranges'][obj_uuid][action_name]['end'] + + keyframes = [] + + frame = start_frame + step = export_settings['gltf_frame_step'] + + while frame <= end_frame: + key = Keyframe(None, frame, channel) + + mat = get_cache_data( + 'matrix', + obj_uuid, + None, + action_name, + frame, + step, + export_settings) + + trans, rot, sca = mat.decompose() + key.value_total = { + "location": trans, + "rotation_quaternion": rot, + "scale": sca, + }[channel] + + keyframes.append(key) + frame += step + + if len(keyframes) == 0: + # For example, option CROP negative frames, but all are negatives + return None + + if not export_settings['gltf_optimize_animation']: + # For objects, if all values are the same, keeping only if changing values, or if user want to keep data + if node_channel_is_animated is True: + return keyframes # Always keeping + else: + # baked object + if export_settings['gltf_optimize_animation_keep_object'] is False: + # Not keeping if not changing property + cst = fcurve_is_constant(keyframes) + return None if cst is True else keyframes + else: + # Keep data, as requested by user. We keep all samples, as user don't want to optimize + return keyframes + + else: + + # For objects, if all values are the same, we keep only first and last + cst = fcurve_is_constant(keyframes) + if node_channel_is_animated is True: + return [keyframes[0], keyframes[-1]] if cst is True and len(keyframes) >= 2 else keyframes + else: + # baked object + # Not keeping if not changing property if user decided to not keep + if export_settings['gltf_optimize_animation_keep_object'] is False: + return None if cst is True else keyframes + else: + # Keep at least 2 keyframes if data are not changing + return [keyframes[0], keyframes[-1]] if cst is True and len(keyframes) >= 2 else keyframes + + +def fcurve_is_constant(keyframes): + return all([j < 0.0001 for j in np.ptp([[k.value[i] for i in range(len(keyframes[0].value))] for k in keyframes], axis=0)]) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_sampler.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_sampler.py new file mode 100644 index 00000000000..89d108d9426 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_sampler.py @@ -0,0 +1,171 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import mathutils +from ......io.com import gltf2_io +from ......io.com import gltf2_io_constants +from ......io.exp import gltf2_io_binary_data +from ......io.exp.gltf2_io_user_extensions import export_user_extensions +from .....com.gltf2_blender_data_path import get_target_object_path +from .....com import gltf2_blender_math +from ....gltf2_blender_gather_tree import VExportNode +from ....gltf2_blender_gather_cache import cached +from ....gltf2_blender_gather_accessors import gather_accessor +from .gltf2_blender_gather_object_keyframes import gather_object_sampled_keyframes + + +@cached +def gather_object_sampled_animation_sampler( + obj_uuid: str, + channel: str, + action_name: str, + node_channel_is_animated: bool, + node_channel_interpolation: str, + export_settings +): + + keyframes = __gather_keyframes( + obj_uuid, + channel, + action_name, + node_channel_is_animated, + export_settings) + + if keyframes is None: + # After check, no need to animate this node for this channel + return None + + # Now we are raw input/output, we need to convert to glTF data + input, output = __convert_keyframes(obj_uuid, channel, keyframes, action_name, export_settings) + + sampler = gltf2_io.AnimationSampler( + extensions=None, + extras=None, + input=input, + interpolation=__gather_interpolation( + node_channel_is_animated, + node_channel_interpolation, + keyframes, + export_settings), + output=output) + + blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object + export_user_extensions('animation_gather_object_sampler', export_settings, blender_object, action_name) + + return sampler + + +def __gather_keyframes( + obj_uuid: str, + channel: str, + action_name: str, + node_channel_is_animated: bool, + export_settings +): + + keyframes = gather_object_sampled_keyframes( + obj_uuid, + channel, + action_name, + node_channel_is_animated, + export_settings + ) + + return keyframes + + +def __convert_keyframes(obj_uuid: str, channel: str, keyframes, action_name: str, export_settings): + + # Sliding can come from: + # - option SLIDE for negative frames + # - option to start animation at frame 0 for looping + if obj_uuid in export_settings['slide'].keys() and action_name in export_settings['slide'][obj_uuid].keys(): + for k in keyframes: + k.frame += -export_settings['slide'][obj_uuid][action_name] + k.seconds = k.frame / (bpy.context.scene.render.fps * bpy.context.scene.render.fps_base) + + times = [k.seconds for k in keyframes] + input = gather_accessor( + gltf2_io_binary_data.BinaryData.from_list(times, gltf2_io_constants.ComponentType.Float), + gltf2_io_constants.ComponentType.Float, + len(times), + tuple([max(times)]), + tuple([min(times)]), + gltf2_io_constants.DataType.Scalar, + export_settings) + + is_yup = export_settings['gltf_yup'] + + object_path = get_target_object_path(channel) + transform = mathutils.Matrix.Identity(4) + + need_rotation_correction = ( + export_settings['gltf_cameras'] and export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.CAMERA) or ( + export_settings['gltf_lights'] and export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.LIGHT) + + values = [] + fps = (bpy.context.scene.render.fps * bpy.context.scene.render.fps_base) + for keyframe in keyframes: + + # Transform the data and build gltf control points + value = gltf2_blender_math.transform(keyframe.value, channel, transform, need_rotation_correction) + if is_yup: + value = gltf2_blender_math.swizzle_yup(value, channel) + keyframe_value = gltf2_blender_math.mathutils_to_gltf(value) + + # No tangents when baking, we are using LINEAR interpolation + + values += keyframe_value + + # store the keyframe data in a binary buffer + component_type = gltf2_io_constants.ComponentType.Float + data_type = gltf2_io_constants.DataType.vec_type_from_num(len(keyframes[0].value)) + + output = gltf2_io.Accessor( + buffer_view=gltf2_io_binary_data.BinaryData.from_list(values, component_type), + byte_offset=None, + component_type=component_type, + count=len(values) // gltf2_io_constants.DataType.num_elements(data_type), + extensions=None, + extras=None, + max=None, + min=None, + name=None, + normalized=None, + sparse=None, + type=data_type + ) + + return input, output + + +def __gather_interpolation( + node_channel_is_animated: bool, + node_channel_interpolation: str, + keyframes, + export_settings): + + if len(keyframes) > 2: + # keep STEP as STEP, other become LINEAR + return { + "STEP": "STEP" + }.get(node_channel_interpolation, "LINEAR") + elif len(keyframes) == 1: + if node_channel_is_animated is False: + return "STEP" + elif node_channel_interpolation == "CUBICSPLINE": + return "LINEAR" # We can't have a single keyframe with CUBICSPLINE + else: + return node_channel_interpolation + else: + # If we only have 2 keyframes, set interpolation to STEP if baked + if node_channel_is_animated is False: + # baked => We have first and last keyframe + return "STEP" + else: + if keyframes[0].value == keyframes[1].value: + return "STEP" + else: + return "LINEAR" diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_action_sampled.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_action_sampled.py new file mode 100644 index 00000000000..cfc714dd1e9 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_action_sampled.py @@ -0,0 +1,62 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import typing +from ......io.exp.gltf2_io_user_extensions import export_user_extensions +from ......io.com import gltf2_io +from .....com.gltf2_blender_extras import generate_extras +from .gltf2_blender_gather_sk_channels import gather_sk_sampled_channels + + +def gather_action_sk_sampled(object_uuid: str, + blender_action: typing.Optional[bpy.types.Action], + cache_key: str, + export_settings): + + # If no animation in file, no need to bake + if len(bpy.data.actions) == 0: + return None + + animation = gltf2_io.Animation( + channels=__gather_channels(object_uuid, blender_action.name if blender_action else cache_key, export_settings), + extensions=None, + extras=__gather_extras(blender_action, export_settings), + name=__gather_name(object_uuid, blender_action, cache_key, export_settings), + samplers=[] + ) + + if not animation.channels: + return None + + blender_object = export_settings['vtree'].nodes[object_uuid].blender_object + export_user_extensions( + 'animation_action_sk_sampled', + export_settings, + animation, + blender_object, + blender_action, + cache_key) + + return animation + + +def __gather_name(object_uuid: str, blender_action: typing.Optional[bpy.types.Action], cache_key: str, export_settings): + if blender_action: + return blender_action.name + elif object_uuid == cache_key: + return export_settings['vtree'].nodes[object_uuid].blender_object.name + else: + return cache_key + + +def __gather_channels(object_uuid: str, blender_action_name: str, + export_settings) -> typing.List[gltf2_io.AnimationChannel]: + return gather_sk_sampled_channels(object_uuid, blender_action_name, export_settings) + + +def __gather_extras(blender_action, export_settings): + if export_settings['gltf_extras']: + return generate_extras(blender_action) if blender_action else None + return None diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_channel_target.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_channel_target.py new file mode 100644 index 00000000000..19ad610c7cf --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_channel_target.py @@ -0,0 +1,38 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from ......io.com import gltf2_io +from ......io.exp.gltf2_io_user_extensions import export_user_extensions +from ....gltf2_blender_gather_cache import cached + + +@cached +def gather_sk_sampled_channel_target( + obj_uuid: str, + export_settings +) -> gltf2_io.AnimationChannelTarget: + + animation_channel_target = gltf2_io.AnimationChannelTarget( + extensions=__gather_extensions(obj_uuid, export_settings), + extras=__gather_extras(obj_uuid, export_settings), + node=__gather_node(obj_uuid, export_settings), + path='weights' + ) + + blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object + export_user_extensions('animation_action_sk_sampled_target', export_settings, blender_object) + + return animation_channel_target + + +def __gather_extensions(armature_uuid, export_settings): + return None + + +def __gather_extras(armature_uuid, export_settings): + return None + + +def __gather_node(obj_uuid: str, export_settings): + return export_settings['vtree'].nodes[obj_uuid].node diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_channels.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_channels.py new file mode 100644 index 00000000000..e588934d6a7 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_channels.py @@ -0,0 +1,75 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from ......io.exp.gltf2_io_user_extensions import export_user_extensions +from ......io.com import gltf2_io +from ....gltf2_blender_gather_cache import cached +from .gltf2_blender_gather_sk_channel_target import gather_sk_sampled_channel_target +from .gltf2_blender_gather_sk_sampler import gather_sk_sampled_animation_sampler + + +def gather_sk_sampled_channels( + object_uuid: str, + blender_action_name: str, + export_settings): + + # Only 1 channel when exporting shape keys + + channels = [] + + channel = gather_sampled_sk_channel( + object_uuid, + blender_action_name, + export_settings + ) + + if channel is not None: + channels.append(channel) + + blender_object = export_settings['vtree'].nodes[object_uuid].blender_object + export_user_extensions('animation_gather_sk_channels', export_settings, blender_object, blender_action_name) + + return channels if len(channels) > 0 else None + + +@cached +def gather_sampled_sk_channel( + obj_uuid: str, + action_name: str, + export_settings +): + + __target = __gather_target(obj_uuid, export_settings) + if __target.path is not None: + sampler = __gather_sampler(obj_uuid, action_name, export_settings) + + if sampler is None: + # After check, no need to animate this node for this channel + return None + + animation_channel = gltf2_io.AnimationChannel( + extensions=None, + extras=None, + sampler=sampler, + target=__target + ) + + blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object + export_user_extensions('animation_gather_sk_channel', export_settings, blender_object, action_name) + + return animation_channel + return None + + +def __gather_target(obj_uuid: str, export_settings): + return gather_sk_sampled_channel_target( + obj_uuid, export_settings) + + +def __gather_sampler(obj_uuid: str, action_name: str, export_settings): + return gather_sk_sampled_animation_sampler( + obj_uuid, + action_name, + export_settings + ) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_keyframes.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_keyframes.py new file mode 100644 index 00000000000..7f86648b232 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_keyframes.py @@ -0,0 +1,109 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import typing +import numpy as np +from ......blender.com.gltf2_blender_data_path import get_sk_exported +from ....gltf2_blender_gather_cache import cached +from ...gltf2_blender_gather_keyframes import Keyframe +from ...fcurves.gltf2_blender_gather_fcurves_channels import get_channel_groups +from ...fcurves.gltf2_blender_gather_fcurves_keyframes import gather_non_keyed_values +from ..gltf2_blender_gather_animation_sampling_cache import get_cache_data + + +@cached +def gather_sk_sampled_keyframes(obj_uuid, + action_name, + export_settings): + + start_frame = export_settings['ranges'][obj_uuid][action_name]['start'] + end_frame = export_settings['ranges'][obj_uuid][action_name]['end'] + + keyframes = [] + + frame = start_frame + step = export_settings['gltf_frame_step'] + blender_obj = export_settings['vtree'].nodes[obj_uuid].blender_object + + if export_settings['gltf_optimize_armature_disable_viewport'] is True: + # Using this option, we miss the drivers :( + # No solution exists for now. In the future, we should be able to copy a driver + if action_name in bpy.data.actions: + channel_group, _ = get_channel_groups( + obj_uuid, bpy.data.actions[action_name], export_settings, no_sample_option=True) + elif blender_obj.data.shape_keys.animation_data and blender_obj.data.shape_keys.animation_data.action: + channel_group, _ = get_channel_groups( + obj_uuid, blender_obj.data.shape_keys.animation_data.action, export_settings, no_sample_option=True) + else: + channel_group = {} + channels = [None] * len(get_sk_exported(blender_obj.data.shape_keys.key_blocks)) + + # One day, if we will be able to bake drivers or evaluate it the right + # way, we can add here the driver fcurves + + for chan in channel_group.values(): + channels = chan['properties']['value'] + break + + non_keyed_values = gather_non_keyed_values(obj_uuid, channels, None, export_settings) + + while frame <= end_frame: + key = Keyframe(channels, frame, None) + key.value = [c.evaluate(frame) for c in channels if c is not None] + # Complete key with non keyed values, if needed + if len([c for c in channels if c is not None]) != key.get_target_len(): + complete_key(key, non_keyed_values) + + keyframes.append(key) + frame += step + + else: + # Full bake, we will go frame by frame. This can take time (more than using evaluate) + + while frame <= end_frame: + key = Keyframe([None] * (len(get_sk_exported(blender_obj.data.shape_keys.key_blocks))), frame, 'value') + key.value_total = get_cache_data( + 'sk', + obj_uuid, + None, + action_name, + frame, + step, + export_settings + ) + + keyframes.append(key) + frame += step + + if len(keyframes) == 0: + # For example, option CROP negative frames, but all are negatives + return None + + # In case SK has only basis + if any([len(k.value) == 0 for k in keyframes]): + return None + + if not export_settings['gltf_optimize_animation']: + return keyframes + + # For sk, if all values are the same, we keep only first and last + cst = fcurve_is_constant(keyframes) + return [keyframes[0], keyframes[-1]] if cst is True and len(keyframes) >= 2 else keyframes + + +def fcurve_is_constant(keyframes): + return all([j < 0.0001 for j in np.ptp([[k.value[i] for i in range(len(keyframes[0].value))] for k in keyframes], axis=0)]) + +# TODO de-duplicate, but import issue??? + + +def complete_key(key: Keyframe, non_keyed_values: typing.Tuple[typing.Optional[float]]): + """ + Complete keyframe with non keyed values + """ + for i in range(0, key.get_target_len()): + if i in key.get_indices(): + continue # this is a keyed array_index or a SK animated + key.set_value_index(i, non_keyed_values[i]) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_sampler.py b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_sampler.py new file mode 100644 index 00000000000..f48a447f344 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_sampler.py @@ -0,0 +1,112 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from ......io.com import gltf2_io, gltf2_io_constants +from ......io.exp import gltf2_io_binary_data +from ......io.exp.gltf2_io_user_extensions import export_user_extensions +from .....com.gltf2_blender_math import mathutils_to_gltf +from ....gltf2_blender_gather_accessors import gather_accessor +from .gltf2_blender_gather_sk_keyframes import gather_sk_sampled_keyframes + + +def gather_sk_sampled_animation_sampler( + obj_uuid, + action_name, + export_settings +): + + keyframes = __gather_keyframes( + obj_uuid, + action_name, + export_settings) + + if keyframes is None: + # After check, no need to animate this node for this channel + return None + + # Now we are raw input/output, we need to convert to glTF data + input, output = __convert_keyframes(obj_uuid, keyframes, action_name, export_settings) + + sampler = gltf2_io.AnimationSampler( + extensions=None, + extras=None, + input=input, + interpolation=__gather_interpolation(export_settings), + output=output + ) + + blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object + export_user_extensions('animation_gather_sk_channels', export_settings, blender_object, action_name) + + return sampler + + +def __gather_keyframes( + obj_uuid, + action_name, + export_settings): + + keyframes = gather_sk_sampled_keyframes( + obj_uuid, + action_name, + export_settings + ) + + if keyframes is None: + # After check, no need to animation this node + return None + + return keyframes + + +def __convert_keyframes(obj_uuid, keyframes, action_name: str, export_settings): + + # Sliding can come from: + # - option SLIDE for negative frames + # - option to start animation at frame 0 for looping + if obj_uuid in export_settings['slide'].keys() and action_name in export_settings['slide'][obj_uuid].keys(): + for k in keyframes: + k.frame += -export_settings['slide'][obj_uuid][action_name] + k.seconds = k.frame / (bpy.context.scene.render.fps * bpy.context.scene.render.fps_base) + + times = [k.seconds for k in keyframes] + input = gather_accessor( + gltf2_io_binary_data.BinaryData.from_list(times, gltf2_io_constants.ComponentType.Float), + gltf2_io_constants.ComponentType.Float, + len(times), + tuple([max(times)]), + tuple([min(times)]), + gltf2_io_constants.DataType.Scalar, + export_settings) + + values = [] + for keyframe in keyframes: + keyframe_value = mathutils_to_gltf(keyframe.value) + values += keyframe_value + + component_type = gltf2_io_constants.ComponentType.Float + data_type = gltf2_io_constants.DataType.Scalar + + output = gltf2_io.Accessor( + buffer_view=gltf2_io_binary_data.BinaryData.from_list(values, component_type), + byte_offset=None, + component_type=component_type, + count=len(values) // gltf2_io_constants.DataType.num_elements(data_type), + extensions=None, + extras=None, + max=None, + min=None, + name=None, + normalized=None, + sparse=None, + type=data_type + ) + + return input, output + + +def __gather_interpolation(export_settings): + # TODO: check if the SK was animated with CONSTANT + return 'LINEAR' diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_export.py b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_export.py new file mode 100755 index 00000000000..40ac95ce241 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_export.py @@ -0,0 +1,402 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import os +import subprocess +import time + +import bpy +import sys +import traceback + +from ...io.exp import gltf2_io_export +from ...io.exp import gltf2_io_draco_compression_extension +from ...io.exp.gltf2_io_user_extensions import export_user_extensions +from ..com import gltf2_blender_json +from . import gltf2_blender_gather +from .gltf2_blender_gltf2_exporter import GlTF2Exporter + + +def save(context, export_settings): + """Start the glTF 2.0 export and saves to content either to a .gltf or .glb file.""" + if bpy.context.active_object is not None: + if bpy.context.active_object.mode != "OBJECT": # For linked object, you can't force OBJECT mode + bpy.ops.object.mode_set(mode='OBJECT') + + original_frame = bpy.context.scene.frame_current + if not export_settings['gltf_current_frame']: + bpy.context.scene.frame_set(0) + + __notify_start(context, export_settings) + start_time = time.time() + pre_export_callbacks = export_settings["pre_export_callbacks"] + for callback in pre_export_callbacks: + callback(export_settings) + + json, buffer = __export(export_settings) + + post_export_callbacks = export_settings["post_export_callbacks"] + for callback in post_export_callbacks: + callback(export_settings) + __write_file(json, buffer, export_settings) + + end_time = time.time() + __notify_end(context, end_time - start_time, export_settings) + + if not export_settings['gltf_current_frame']: + bpy.context.scene.frame_set(int(original_frame)) + + return {'FINISHED'} + + +def __export(export_settings): + exporter = GlTF2Exporter(export_settings) + __gather_gltf(exporter, export_settings) + buffer = __create_buffer(exporter, export_settings) + exporter.finalize_images() + + export_user_extensions('gather_gltf_extensions_hook', export_settings, exporter.glTF) + exporter.traverse_extensions() + + # Detect extensions that are animated + # If they are not animated, we can remove the extension if it is empty (all default values), and if default values don't change the shader + # But if they are animated, we need to keep the extension, even if it is empty + __detect_animated_extensions(exporter.glTF.to_dict(), export_settings) + + # now that addons possibly add some fields in json, we can fix if needed + # Also deleting no more needed extensions, based on what we detected above + json = __fix_json(exporter.glTF.to_dict(), export_settings) + + # IOR is a special case where we need to export only if some other extensions are used + __check_ior(json, export_settings) + + # Volum is a special case where we need to export only if transmission is used + __check_volume(json, export_settings) + + __manage_extension_declaration(json, export_settings) + + # We need to run it again, as we can now have some "extensions" dict that are empty + # Or extensionsUsed / extensionsRequired that are empty + # (because we removed some extensions) + json = __fix_json(json, export_settings) + + # Convert additional data if needed + if export_settings['gltf_unused_textures'] is True: + additional_json_textures = __fix_json([i.to_dict() + for i in exporter.additional_data.additional_textures], export_settings) + + # Now that we have the final json, we can add the additional data + # We can not do that for all people, because we don't want this extra to become "a standard" + # So let's use the "extras" field filled by a user extension + + export_user_extensions('gather_gltf_additional_textures_hook', export_settings, json, additional_json_textures) + + # if len(additional_json_textures) > 0: + # if json.get('extras') is None: + # json['extras'] = {} + # json['extras']['additionalTextures'] = additional_json_textures + + return json, buffer + + +def __check_ior(json, export_settings): + if 'materials' not in json.keys(): + return + for mat in json['materials']: + if 'extensions' not in mat.keys(): + continue + if 'KHR_materials_ior' not in mat['extensions'].keys(): + continue + # We keep IOR only if some other extensions are used + # And because we may have deleted some extensions, we need to check again + need_to_export_ior = [ + 'KHR_materials_transmission', + 'KHR_materials_volume', + 'KHR_materials_specular' + ] + + if not any([e in mat['extensions'].keys() for e in need_to_export_ior]): + del mat['extensions']['KHR_materials_ior'] + + # Check if we need to keep the extension declaration + ior_found = False + for mat in json['materials']: + if 'extensions' not in mat.keys(): + continue + if 'KHR_materials_ior' not in mat['extensions'].keys(): + continue + ior_found = True + break + if not ior_found: + export_settings['gltf_need_to_keep_extension_declaration'] = [ + e for e in export_settings['gltf_need_to_keep_extension_declaration'] if e != 'KHR_materials_ior'] + + +def __check_volume(json, export_settings): + if 'materials' not in json.keys(): + return + for mat in json['materials']: + if 'extensions' not in mat.keys(): + continue + if 'KHR_materials_volume' not in mat['extensions'].keys(): + continue + # We keep volume only if transmission is used + # And because we may have deleted some extensions, we need to check again + if 'KHR_materials_transmission' not in mat['extensions'].keys(): + del mat['extensions']['KHR_materials_volume'] + + # Check if we need to keep the extension declaration + volume_found = False + for mat in json['materials']: + if 'extensions' not in mat.keys(): + continue + if 'KHR_materials_volume' not in mat['extensions'].keys(): + continue + volume_found = True + break + if not volume_found: + export_settings['gltf_need_to_keep_extension_declaration'] = [ + e for e in export_settings['gltf_need_to_keep_extension_declaration'] if e != 'KHR_materials_volume'] + + +def __detect_animated_extensions(obj, export_settings): + export_settings['gltf_animated_extensions'] = [] + export_settings['gltf_need_to_keep_extension_declaration'] = [] + if 'animations' not in obj.keys(): + return + for anim in obj['animations']: + if 'extensions' in anim.keys(): + for channel in anim['channels']: + if not channel['target']['path'] == "pointer": + continue + pointer = channel['target']['extensions']['KHR_animation_pointer']['pointer'] + if "/KHR" not in pointer: + continue + tab = pointer.split("/") + tab = [i for i in tab if i.startswith("KHR_")] + if len(tab) == 0: + continue + if tab[-1] not in export_settings['gltf_animated_extensions']: + export_settings['gltf_animated_extensions'].append(tab[-1]) + + +def __manage_extension_declaration(json, export_settings): + if 'extensionsUsed' in json.keys(): + new_ext_used = [] + for ext in json['extensionsUsed']: + if ext not in export_settings['gltf_need_to_keep_extension_declaration']: + continue + new_ext_used.append(ext) + json['extensionsUsed'] = new_ext_used + if 'extensionsRequired' in json.keys(): + new_ext_required = [] + for ext in json['extensionsRequired']: + if ext not in export_settings['gltf_need_to_keep_extension_declaration']: + continue + new_ext_required.append(ext) + json['extensionsRequired'] = new_ext_required + + +def __gather_gltf(exporter, export_settings): + active_scene_idx, scenes, animations = gltf2_blender_gather.gather_gltf2(export_settings) + + unused_skins = export_settings['vtree'].get_unused_skins() + + if export_settings['gltf_draco_mesh_compression']: + gltf2_io_draco_compression_extension.encode_scene_primitives(scenes, export_settings) + exporter.add_draco_extension() + + export_user_extensions('gather_gltf_hook', export_settings, active_scene_idx, scenes, animations) + + for idx, scene in enumerate(scenes): + exporter.add_scene(scene, idx == active_scene_idx, export_settings=export_settings) + for animation in animations: + exporter.add_animation(animation) + exporter.manage_gpu_instancing_nodes(export_settings) + exporter.traverse_unused_skins(unused_skins) + exporter.traverse_additional_textures() + exporter.traverse_additional_images() + + +def __create_buffer(exporter, export_settings): + buffer = bytes() + if export_settings['gltf_format'] == 'GLB': + buffer = exporter.finalize_buffer(export_settings['gltf_filedirectory'], is_glb=True) + else: + if export_settings['gltf_format'] == 'GLTF_EMBEDDED': + exporter.finalize_buffer(export_settings['gltf_filedirectory']) + else: + exporter.finalize_buffer(export_settings['gltf_filedirectory'], + export_settings['gltf_binaryfilename']) + + return buffer + + +def __postprocess_with_gltfpack(export_settings): + + gltfpack_binary_file_path = bpy.context.preferences.addons['io_scene_gltf2'].preferences.gltfpack_path_ui + + gltf_file_path = export_settings['gltf_filepath'] + gltf_file_base = os.path.splitext(os.path.basename(gltf_file_path))[0] + gltf_file_extension = os.path.splitext(os.path.basename(gltf_file_path))[1] + gltf_file_directory = os.path.dirname(gltf_file_path) + gltf_output_file_directory = os.path.join(gltf_file_directory, "gltfpacked") + if (os.path.exists(gltf_output_file_directory) is False): + os.makedirs(gltf_output_file_directory) + + gltf_input_file_path = gltf_file_path + gltf_output_file_path = os.path.join(gltf_output_file_directory, gltf_file_base + gltf_file_extension) + + options = [] + + if (export_settings['gltf_gltfpack_tc']): + options.append("-tc") + + if (export_settings['gltf_gltfpack_tq']): + options.append("-tq") + options.append(f"{export_settings['gltf_gltfpack_tq']}") + + if (export_settings['gltf_gltfpack_si'] != 1.0): + options.append("-si") + options.append(f"{export_settings['gltf_gltfpack_si']}") + + if (export_settings['gltf_gltfpack_sa']): + options.append("-sa") + + if (export_settings['gltf_gltfpack_slb']): + options.append("-slb") + + if (export_settings['gltf_gltfpack_noq']): + options.append("-noq") + else: + options.append("-vp") + options.append(f"{export_settings['gltf_gltfpack_vp']}") + options.append("-vt") + options.append(f"{export_settings['gltf_gltfpack_vt']}") + options.append("-vn") + options.append(f"{export_settings['gltf_gltfpack_vn']}") + options.append("-vc") + options.append(f"{export_settings['gltf_gltfpack_vc']}") + + match export_settings['gltf_gltfpack_vpi']: + case "Integer": + options.append("-vpi") + case "Normalized": + options.append("-vpn") + case "Floating-point": + options.append("-vpf") + + parameters = [] + parameters.append("-i") + parameters.append(gltf_input_file_path) + parameters.append("-o") + parameters.append(gltf_output_file_path) + + try: + subprocess.run([gltfpack_binary_file_path] + options + parameters, check=True) + except subprocess.CalledProcessError as e: + export_settings['log'].error("Calling gltfpack was not successful") + + +def __fix_json(obj, export_settings): + # TODO: move to custom JSON encoder + fixed = obj + if isinstance(obj, dict): + fixed = {} + for key, value in obj.items(): + if key == 'extras' and value is not None: + fixed[key] = value + continue + if not __should_include_json_value(key, value, export_settings): + continue + fixed[key] = __fix_json(value, export_settings) + elif isinstance(obj, list): + fixed = [] + for value in obj: + fixed.append(__fix_json(value, export_settings)) + elif isinstance(obj, float): + # force floats to int, if they are integers (prevent INTEGER_WRITTEN_AS_FLOAT validator warnings) + if int(obj) == obj: + return int(obj) + return fixed + + +def __should_include_json_value(key, value, export_settings): + allowed_empty_collections = ["KHR_materials_unlit"] + allowed_empty_collections_if_animated = \ + [ + "KHR_materials_specular", + "KHR_materials_clearcoat", + "KHR_texture_transform", + "KHR_materials_emissive_strength", + "KHR_materials_ior", + # "KHR_materials_iridescence", + "KHR_materials_sheen", + "KHR_materials_specular", + "KHR_materials_transmission", + "KHR_materials_volume", + "KHR_lights_punctual", + "KHR_materials_anisotropy" + ] + + if value is None: + return False + elif __is_empty_collection(value) and key not in allowed_empty_collections: + # Empty collection is not allowed, except if it is animated + if key in allowed_empty_collections_if_animated: + if key in export_settings['gltf_animated_extensions']: + # There is an animation, so we can keep this empty collection, and store + # that this extension declaration needs to be kept + export_settings['gltf_need_to_keep_extension_declaration'].append(key) + return True + else: + # There is no animation, so we will not keep this empty collection + return False + # We can't have this empty collection, because it can't be animated + return False + elif not __is_empty_collection(value): + if key.startswith("KHR_") or key.startswith("EXT_"): + export_settings['gltf_need_to_keep_extension_declaration'].append(key) + elif __is_empty_collection(value) and key in allowed_empty_collections: + # We can have this empty collection for this extension. So keeping it, and + # store that this extension declaration needs to be kept + export_settings['gltf_need_to_keep_extension_declaration'].append(key) + return True + + +def __is_empty_collection(value): + return (isinstance(value, dict) or isinstance(value, list)) and len(value) == 0 + + +def __write_file(json, buffer, export_settings): + try: + gltf2_io_export.save_gltf( + json, + export_settings, + gltf2_blender_json.BlenderJSONEncoder, + buffer) + if (export_settings['gltf_use_gltfpack']): + __postprocess_with_gltfpack(export_settings) + + except AssertionError as e: + _, _, tb = sys.exc_info() + traceback.print_tb(tb) # Fixed format + tb_info = traceback.extract_tb(tb) + for tbi in tb_info: + filename, line, func, text = tbi + export_settings['log'].error('An error occurred on line {} in statement {}'.format(line, text)) + export_settings['log'].error(str(e)) + raise e + + +def __notify_start(context, export_settings): + export_settings['log'].info('Starting glTF 2.0 export') + context.window_manager.progress_begin(0, 100) + context.window_manager.progress_update(0) + + +def __notify_end(context, elapsed, export_settings): + export_settings['log'].info('Finished glTF 2.0 export in {} s'.format(elapsed)) + context.window_manager.progress_end() + print() diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather.py b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather.py new file mode 100755 index 00000000000..07adc30e59b --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather.py @@ -0,0 +1,126 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy + +from ...io.com import gltf2_io +from ...io.exp.gltf2_io_user_extensions import export_user_extensions +from ..com.gltf2_blender_extras import generate_extras +from .gltf2_blender_gather_cache import cached +from . import gltf2_blender_gather_nodes +from . import gltf2_blender_gather_joints +from . import gltf2_blender_gather_tree +from .animation.sampled.object.gltf2_blender_gather_object_keyframes import get_cache_data +from .animation.gltf2_blender_gather_animations import gather_animations + + +def gather_gltf2(export_settings): + """ + Gather glTF properties from the current state of blender. + + :return: list of scene graphs to be added to the glTF export + """ + scenes = [] + animations = [] # unfortunately animations in gltf2 are just as 'root' as scenes. + active_scene = None + store_user_scene = bpy.context.scene + scenes_to_export = bpy.data.scenes if export_settings['gltf_active_scene'] is False else [ + scene for scene in bpy.data.scenes if scene.name == store_user_scene.name] + for blender_scene in scenes_to_export: + scenes.append(__gather_scene(blender_scene, export_settings)) + if export_settings['gltf_animations']: + # resetting object cache + get_cache_data.reset_cache() + animations += gather_animations(export_settings) + if bpy.context.scene.name == store_user_scene.name: + active_scene = len(scenes) - 1 + + # restore user scene + bpy.context.window.scene = store_user_scene + return active_scene, scenes, animations + + +@cached +def __gather_scene(blender_scene, export_settings): + scene = gltf2_io.Scene( + extensions=None, + extras=__gather_extras(blender_scene, export_settings), + name=blender_scene.name, + nodes=[] + ) + + # Initialize some data needed for animation pointer + export_settings['KHR_animation_pointer'] = {} + export_settings['KHR_animation_pointer']['materials'] = {} + export_settings['KHR_animation_pointer']['lights'] = {} + export_settings['KHR_animation_pointer']['cameras'] = {} + + vtree = gltf2_blender_gather_tree.VExportTree(export_settings) + vtree.construct(blender_scene) + vtree.search_missing_armature() # In case armature are no parented correctly + if export_settings['gltf_armature_object_remove'] is True: + vtree.check_if_we_can_remove_armature() # Check if we can remove the armatures objects + + export_user_extensions('vtree_before_filter_hook', export_settings, vtree) + + # Now, we can filter tree if needed + vtree.filter() + + vtree.bake_armature_bone_list() # Used in case we remove the armature. Doing it after filter, as filter can remove some bones + + if export_settings['gltf_flatten_bones_hierarchy'] is True: + vtree.break_bone_hierarchy() + if export_settings['gltf_flatten_obj_hierarchy'] is True: + vtree.break_obj_hierarchy() + + vtree.variants_reset_to_original() + + export_user_extensions('vtree_after_filter_hook', export_settings, vtree) + + export_settings['vtree'] = vtree + + # If we don't remove armature object, we can't have bones directly at root of scene + # So looping only on root nodes, as they are all nodes, not bones + if export_settings['gltf_armature_object_remove'] is False: + for r in [vtree.nodes[r] for r in vtree.roots]: + node = gltf2_blender_gather_nodes.gather_node( + r, export_settings) + if node is not None: + scene.nodes.append(node) + else: + # If we remove armature objects, we can have bone at root of scene + armature_root_joints = {} + for r in [vtree.nodes[r] for r in vtree.roots]: + # Classic Object/node case + if r.blender_type != gltf2_blender_gather_tree.VExportNode.BONE: + node = gltf2_blender_gather_nodes.gather_node( + r, export_settings) + if node is not None: + scene.nodes.append(node) + else: + # We can have bone are root of scene because we remove the armature object + # and the armature was at root of scene + node = gltf2_blender_gather_joints.gather_joint_vnode( + r.uuid, export_settings) + if node is not None: + scene.nodes.append(node) + if r.armature not in armature_root_joints.keys(): + armature_root_joints[r.armature] = [] + armature_root_joints[r.armature].append(node) + + # Manage objects parented to bones, now we go through all root objects + for k, v in armature_root_joints.items(): + gltf2_blender_gather_nodes.get_objects_parented_to_bones(k, v, export_settings) + + vtree.add_neutral_bones() + + export_user_extensions('gather_scene_hook', export_settings, scene, blender_scene) + + return scene + + +def __gather_extras(blender_object, export_settings): + if export_settings['gltf_extras']: + return generate_extras(blender_object) + return None diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_accessors.py b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_accessors.py new file mode 100644 index 00000000000..f02012398e2 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_accessors.py @@ -0,0 +1,189 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +from ...io.com import gltf2_io +from ...io.com import gltf2_io_constants +from ...io.exp import gltf2_io_binary_data +from .gltf2_blender_gather_cache import cached + + +@cached +def gather_accessor(buffer_view: gltf2_io_binary_data.BinaryData, + component_type: gltf2_io_constants.ComponentType, + count, + max, + min, + type: gltf2_io_constants.DataType, + export_settings) -> gltf2_io.Accessor: + return gltf2_io.Accessor( + buffer_view=buffer_view, + byte_offset=None, + component_type=component_type, + count=count, + extensions=None, + extras=None, + max=list(max) if max is not None else None, + min=list(min) if min is not None else None, + name=None, + normalized=None, + sparse=None, + type=type + ) + + +def array_to_accessor( + array, + export_settings, + component_type, + data_type, + include_max_and_min=False, + sparse_type=None, + normalized=None, +): + + # Not trying to check if sparse is better + if sparse_type is None: + + buffer_view = gltf2_io_binary_data.BinaryData( + array.tobytes(), + gltf2_io_constants.BufferViewTarget.ARRAY_BUFFER, + ) + + amax = None + amin = None + if include_max_and_min: + amax = np.amax(array, axis=0).tolist() + amin = np.amin(array, axis=0).tolist() + + return gltf2_io.Accessor( + buffer_view=buffer_view, + byte_offset=None, + component_type=component_type, + count=len(array), + extensions=None, + extras=None, + max=amax, + min=amin, + name=None, + normalized=normalized, + sparse=None, + type=data_type, + ) + + # Trying to check if sparse is better (if user want it) + buffer_view = None + sparse = None + + try_sparse = False + if sparse_type == "SK": + try_sparse = export_settings['gltf_try_sparse_sk'] + + if try_sparse: + sparse, omit_sparse = __try_sparse_accessor(array) + else: + omit_sparse = False + if not sparse and omit_sparse is False: + buffer_view = gltf2_io_binary_data.BinaryData( + array.tobytes(), + gltf2_io_constants.BufferViewTarget.ARRAY_BUFFER, + ) + elif omit_sparse is True: + if sparse_type == "SK" and export_settings['gltf_try_omit_sparse_sk'] is True: + sparse = None # sparse will be None, buffer_view too + + amax = None + amin = None + if include_max_and_min: + amax = np.amax(array, axis=0).tolist() + amin = np.amin(array, axis=0).tolist() + + return gltf2_io.Accessor( + buffer_view=buffer_view, + byte_offset=None, + component_type=component_type, + count=len(array), + extensions=None, + extras=None, + max=amax, + min=amin, + name=None, + normalized=None, + sparse=sparse, + type=data_type, + ) + + +def __try_sparse_accessor(array): + """ + Returns an AccessorSparse for array, or None if + writing a dense accessor would be better. + Return True if we can omit sparse accessor + """ + + omit_sparse = False + + # Find indices of non-zero elements + nonzero_indices = np.where(np.any(array, axis=1))[0] + + # For all-zero arrays, omitting sparse entirely is legal but poorly + # supported, so force nonzero_indices to be nonempty. + if len(nonzero_indices) == 0: + omit_sparse = True + nonzero_indices = np.array([0]) + + # How big of indices do we need? + if nonzero_indices[-1] <= 255: + indices_type = gltf2_io_constants.ComponentType.UnsignedByte + elif nonzero_indices[-1] <= 65535: + indices_type = gltf2_io_constants.ComponentType.UnsignedShort + else: + indices_type = gltf2_io_constants.ComponentType.UnsignedInt + + # Cast indices to appropiate type (if needed) + nonzero_indices = nonzero_indices.astype( + gltf2_io_constants.ComponentType.to_numpy_dtype(indices_type), + copy=False, + ) + + # Calculate size if we don't use sparse + one_elem_size = len(array[:1].tobytes()) + dense_size = len(array) * one_elem_size + + # Calculate approximate size if we do use sparse + indices_size = ( + len(nonzero_indices[:1].tobytes()) * + len(nonzero_indices) + ) + values_size = len(nonzero_indices) * one_elem_size + json_increase = 170 # sparse makes the JSON about this much bigger + penalty = 64 # further penalty avoids sparse in marginal cases + sparse_size = indices_size + values_size + json_increase + penalty + + if sparse_size >= dense_size: + return None, omit_sparse + + return gltf2_io.AccessorSparse( + count=len(nonzero_indices), + extensions=None, + extras=None, + indices=gltf2_io.AccessorSparseIndices( + buffer_view=gltf2_io_binary_data.BinaryData( + nonzero_indices.tobytes() + ), + byte_offset=None, + component_type=indices_type, + extensions=None, + extras=None, + ), + values=gltf2_io.AccessorSparseValues( + buffer_view=gltf2_io_binary_data.BinaryData( + array[nonzero_indices].tobytes() + ), + byte_offset=None, + extensions=None, + extras=None, + ), + ), omit_sparse diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_cache.py b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_cache.py new file mode 100755 index 00000000000..249666e8a31 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_cache.py @@ -0,0 +1,153 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import functools + + +def cached_by_key(key): + """ + Decorates functions whose result should be cached. Use it like: + @cached_by_key(key=...) + def func(..., export_settings): + ... + The decorated function, func, must always take an "export_settings" arg + (the cache is stored here). + The key argument to the decorator is a function that computes the key to + cache on. It is passed all the arguments to func. + """ + def inner(func): + @functools.wraps(func) + def wrapper_cached(*args, **kwargs): + if kwargs.get("export_settings"): + export_settings = kwargs["export_settings"] + else: + export_settings = args[-1] + + cache_key = key(*args, **kwargs) + + # invalidate cache if export settings have changed + if not hasattr(func, "__export_settings") or export_settings != func.__export_settings: + func.__cache = {} + func.__export_settings = export_settings + # use or fill cache + if cache_key in func.__cache: + return func.__cache[cache_key] + else: + result = func(*args, **kwargs) + func.__cache[cache_key] = result + return result + + return wrapper_cached + + return inner + + +def default_key(*args, **kwargs): + """ + Default cache key for @cached functions. + Cache on all arguments (except export_settings). + """ + assert len(args) >= 2 and 0 <= len(kwargs) <= 1, "Wrong signature for cached function" + cache_key_args = args + # make a shallow copy of the keyword arguments so that 'export_settings' can be removed + cache_key_kwargs = dict(kwargs) + if kwargs.get("export_settings"): + del cache_key_kwargs["export_settings"] + else: + cache_key_args = args[:-1] + + cache_key = () + for i in cache_key_args: + cache_key += (i,) + for i in cache_key_kwargs.values(): + cache_key += (i,) + + return cache_key + + +def cached(func): + return cached_by_key(key=default_key)(func) + + +def datacache(func): + + def reset_all_cache(): + func.__cache = {} + + func.reset_cache = reset_all_cache + + @functools.wraps(func) + def wrapper_objectcache(*args, **kwargs): + + # 0 : path + # 1 : object_uuid + # 2 : bone (can be, of course, None for path other than 'bone') + # 3 : action_name + # 4 : current_frame + # 5 : step + # 6 : export_settings + # only_gather_provided : only_gather_provided + + cache_key_args = args + cache_key_args = args[:-1] + + if not hasattr(func, "__cache"): + func.reset_cache() + + # object is not cached yet + if cache_key_args[1] not in func.__cache.keys(): + result = func(*args) + func.__cache = result + # Here are the key used: result[obj_uuid][action_name][path][bone][frame] + return result[cache_key_args[1]][cache_key_args[3]][cache_key_args[0]][cache_key_args[2]][cache_key_args[4]] + # object is in cache, but not this action + # We need to not erase other actions of this object + elif cache_key_args[3] not in func.__cache[cache_key_args[1]].keys(): + result = func(*args, only_gather_provided=True) + # The result can contains multiples animations, in case this is an armature with drivers + # Need to create all newly retrieved animations + func.__cache.update(result) + # Here are the key used: result[obj_uuid][action_name][path][bone][frame] + return result[cache_key_args[1]][cache_key_args[3]][cache_key_args[0]][cache_key_args[2]][cache_key_args[4]] + # all is already cached + else: + # Here are the key used: result[obj_uuid][action_name][path][bone][frame] + return func.__cache[cache_key_args[1]][cache_key_args[3] + ][cache_key_args[0]][cache_key_args[2]][cache_key_args[4]] + return wrapper_objectcache + + +# TODO: replace "cached" with "unique" in all cases where the caching is functional and not only for performance reasons +call_or_fetch = cached +unique = cached + + +def skdriverdiscovercache(func): + + def reset_cache_skdriverdiscovercache(): + func.__current_armature_uuid = None + func.__skdriverdiscover = {} + + func.reset_cache = reset_cache_skdriverdiscovercache + + @functools.wraps(func) + def wrapper_skdriverdiscover(*args, **kwargs): + + # 0 : armature_uuid + # 1 : export_settings + + cache_key_args = args + cache_key_args = args[:-1] + + if not hasattr(func, "__current_armature_uuid") or func.__current_armature_uuid is None: + func.reset_cache() + + if cache_key_args[0] != func.__current_armature_uuid: + result = func(*args) + func.__skdriverdiscover[cache_key_args[0]] = result + func.__current_armature_uuid = cache_key_args[0] + return result + else: + return func.__skdriverdiscover[cache_key_args[0]] + return wrapper_skdriverdiscover diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_cameras.py b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_cameras.py new file mode 100755 index 00000000000..3cffa531a64 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_cameras.py @@ -0,0 +1,152 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import math +from ...io.com import gltf2_io +from ...blender.com.gltf2_blender_conversion import yvof_blender_to_gltf +from ...io.exp.gltf2_io_user_extensions import export_user_extensions +from ..com.gltf2_blender_extras import generate_extras +from .gltf2_blender_gather_cache import cached + + +@cached +def gather_camera(blender_camera, export_settings): + if not __filter_camera(blender_camera, export_settings): + return None + + export_settings['current_paths'] = {} # For KHR_animation_pointer + + camera = gltf2_io.Camera( + extensions=__gather_extensions(blender_camera, export_settings), + extras=__gather_extras(blender_camera, export_settings), + name=__gather_name(blender_camera, export_settings), + orthographic=__gather_orthographic(blender_camera, export_settings), + perspective=__gather_perspective(blender_camera, export_settings), + type=__gather_type(blender_camera, export_settings) + ) + + export_user_extensions('gather_camera_hook', export_settings, camera, blender_camera) + + return camera + + +def __filter_camera(blender_camera, export_settings): + return bool(__gather_type(blender_camera, export_settings)) + + +def __gather_extensions(blender_camera, export_settings): + return None + + +def __gather_extras(blender_camera, export_settings): + if export_settings['gltf_extras']: + return generate_extras(blender_camera) + return None + + +def __gather_name(blender_camera, export_settings): + return blender_camera.name + + +def __gather_orthographic(blender_camera, export_settings): + if __gather_type(blender_camera, export_settings) == "orthographic": + orthographic = gltf2_io.CameraOrthographic( + extensions=None, + extras=None, + xmag=None, + ymag=None, + zfar=None, + znear=None + ) + + _render = bpy.context.scene.render + scene_x = _render.resolution_x * _render.pixel_aspect_x + scene_y = _render.resolution_y * _render.pixel_aspect_y + scene_square = max(scene_x, scene_y) + del _render + + # `Camera().ortho_scale` (and also FOV FTR) maps to the maximum of either image width or image height— This is the box that gets shown from camera view with the checkbox `.show_sensor = True`. + + orthographic.xmag = blender_camera.ortho_scale * (scene_x / scene_square) / 2 + orthographic.ymag = blender_camera.ortho_scale * (scene_y / scene_square) / 2 + + orthographic.znear = blender_camera.clip_start + orthographic.zfar = blender_camera.clip_end + + # Store data for KHR_animation_pointer + path_ = {} + path_['length'] = 1 + path_['path'] = "/cameras/XXX/orthographic/xmag" + export_settings['current_paths']['ortho_scale_x'] = path_ + + path_ = {} + path_['length'] = 1 + path_['path'] = "/cameras/XXX/orthographic/ymag" + export_settings['current_paths']['ortho_scale_y'] = path_ + + path_ = {} + path_['length'] = 1 + path_['path'] = "/cameras/XXX/orthographic/zfar" + export_settings['current_paths']['clip_end'] = path_ + + path_ = {} + path_['length'] = 1 + path_['path'] = "/cameras/XXX/orthographic/znear" + export_settings['current_paths']['clip_start'] = path_ + + return orthographic + return None + + +def __gather_perspective(blender_camera, export_settings): + if __gather_type(blender_camera, export_settings) == "perspective": + perspective = gltf2_io.CameraPerspective( + aspect_ratio=None, + extensions=None, + extras=None, + yfov=None, + zfar=None, + znear=None + ) + + _render = bpy.context.scene.render + width = _render.pixel_aspect_x * _render.resolution_x + height = _render.pixel_aspect_y * _render.resolution_y + perspective.aspect_ratio = width / height + del _render + + perspective.yfov = yvof_blender_to_gltf(blender_camera.angle, width, height, blender_camera.sensor_fit) + + perspective.znear = blender_camera.clip_start + perspective.zfar = blender_camera.clip_end + + path_ = {} + path_['length'] = 1 + path_['path'] = "/cameras/XXX/perspective/zfar" + export_settings['current_paths']['clip_end'] = path_ + + path_ = {} + path_['length'] = 1 + path_['path'] = "/cameras/XXX/perspective/znear" + export_settings['current_paths']['clip_start'] = path_ + + path_ = {} + path_['length'] = 1 + path_['path'] = "/cameras/XXX/perspective/yfov" + path_['sensor_fit'] = 'sensor_fit' + export_settings['current_paths']['angle'] = path_ + + # aspect ratio is not animatable in blender + + return perspective + return None + + +def __gather_type(blender_camera, export_settings): + if blender_camera.type == 'PERSP': + return "perspective" + elif blender_camera.type == 'ORTHO': + return "orthographic" + return None diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_joints.py b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_joints.py new file mode 100755 index 00000000000..0f3b25f3763 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_joints.py @@ -0,0 +1,110 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from mathutils import Quaternion, Vector +from ...io.com import gltf2_io +from ...io.exp.gltf2_io_user_extensions import export_user_extensions +from ..com.gltf2_blender_extras import generate_extras +from .gltf2_blender_gather_cache import cached +from . import gltf2_blender_gather_tree + + +# TODO these 3 functions move to shared file +def __convert_swizzle_location(loc, export_settings): + """Convert a location from Blender coordinate system to glTF coordinate system.""" + if export_settings['gltf_yup']: + return Vector((loc[0], loc[2], -loc[1])) + else: + return Vector((loc[0], loc[1], loc[2])) + + +def __convert_swizzle_rotation(rot, export_settings): + """ + Convert a quaternion rotation from Blender coordinate system to glTF coordinate system. + + 'w' is still at first position. + """ + if export_settings['gltf_yup']: + return Quaternion((rot[0], rot[1], rot[3], -rot[2])) + else: + return Quaternion((rot[0], rot[1], rot[2], rot[3])) + + +def __convert_swizzle_scale(scale, export_settings): + """Convert a scale from Blender coordinate system to glTF coordinate system.""" + if export_settings['gltf_yup']: + return Vector((scale[0], scale[2], scale[1])) + else: + return Vector((scale[0], scale[1], scale[2])) + + +@cached +def gather_joint_vnode(vnode, export_settings): + """ + Generate a glTF2 node from a blender bone, as joints in glTF2 are simply nodes. + + :param blender_bone: a blender PoseBone + :param export_settings: the settings for this export + :return: a glTF2 node (acting as a joint) + """ + vtree = export_settings['vtree'] + blender_bone = vtree.nodes[vnode].blender_bone + + if export_settings['gltf_armature_object_remove'] is True: + if vtree.nodes[vnode].parent_uuid is not None: + mat = vtree.nodes[vtree.nodes[vnode].parent_uuid].matrix_world.inverted_safe( + ) @ vtree.nodes[vnode].matrix_world + else: + mat = vtree.nodes[vnode].matrix_world + else: + mat = vtree.nodes[vtree.nodes[vnode].parent_uuid].matrix_world.inverted_safe() @ vtree.nodes[vnode].matrix_world + + trans, rot, sca = mat.decompose() + + trans = __convert_swizzle_location(trans, export_settings) + rot = __convert_swizzle_rotation(rot, export_settings) + sca = __convert_swizzle_scale(sca, export_settings) + + translation, rotation, scale = (None, None, None) + if trans[0] != 0.0 or trans[1] != 0.0 or trans[2] != 0.0: + translation = [trans[0], trans[1], trans[2]] + if rot[0] != 1.0 or rot[1] != 0.0 or rot[2] != 0.0 or rot[3] != 0.0: + rotation = [rot[1], rot[2], rot[3], rot[0]] + if sca[0] != 1.0 or sca[1] != 1.0 or sca[2] != 1.0: + scale = [sca[0], sca[1], sca[2]] + + # traverse into children + children = [] + + for bone_uuid in [c for c in vtree.nodes[vnode].children if vtree.nodes[c].blender_type == + gltf2_blender_gather_tree.VExportNode.BONE]: + children.append(gather_joint_vnode(bone_uuid, export_settings)) + + # finally add to the joints array containing all the joints in the hierarchy + node = gltf2_io.Node( + camera=None, + children=children, + extensions=None, + extras=__gather_extras(blender_bone, export_settings), + matrix=None, + mesh=None, + name=blender_bone.name if vtree.nodes[vnode].leaf_reference is None else vtree.nodes[vtree.nodes[vnode].leaf_reference].blender_bone.name + '_leaf', + rotation=rotation, + scale=scale, + skin=None, + translation=translation, + weights=None + ) + + export_user_extensions('gather_joint_hook', export_settings, node, blender_bone) + + vtree.nodes[vnode].node = node + + return node + + +def __gather_extras(blender_bone, export_settings): + if export_settings['gltf_extras']: + return generate_extras(blender_bone.bone) + return None diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_light_spots.py b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_light_spots.py new file mode 100644 index 00000000000..a6407905870 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_light_spots.py @@ -0,0 +1,47 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional +from ...io.com import gltf2_io_lights_punctual + + +def gather_light_spot(blender_lamp, export_settings) -> Optional[gltf2_io_lights_punctual.LightSpot]: + + if not __filter_light_spot(blender_lamp, export_settings): + return None + + spot = gltf2_io_lights_punctual.LightSpot( + inner_cone_angle=__gather_inner_cone_angle(blender_lamp, export_settings), + outer_cone_angle=__gather_outer_cone_angle(blender_lamp, export_settings) + ) + return spot + + +def __filter_light_spot(blender_lamp, _) -> bool: + if blender_lamp.type != "SPOT": + return False + + return True + + +def __gather_inner_cone_angle(blender_lamp, export_settings) -> Optional[float]: + angle = blender_lamp.spot_size * 0.5 + + path_ = {} + path_['length'] = 1 + path_['path'] = "/extensions/KHR_lights_punctual/lights/XXX/spot.innerConeAngle" + path_['additional_path'] = "spot_size" + export_settings['current_paths']["spot_blend"] = path_ + + return angle - angle * blender_lamp.spot_blend + + +def __gather_outer_cone_angle(blender_lamp, export_settings) -> Optional[float]: + + path_ = {} + path_['length'] = 1 + path_['path'] = "/extensions/KHR_lights_punctual/lights/XXX/spot.outerConeAngle" + export_settings['current_paths']["spot_size"] = path_ + + return blender_lamp.spot_size * 0.5 diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_lights.py b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_lights.py new file mode 100644 index 00000000000..0f26c9a8247 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_lights.py @@ -0,0 +1,196 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import math +from typing import Optional, List, Dict, Any +from ...io.com import gltf2_io_lights_punctual +from ...io.com import gltf2_io_debug +from ..com.gltf2_blender_extras import generate_extras +from ..com.gltf2_blender_conversion import PBR_WATTS_TO_LUMENS +from ..com.gltf2_blender_default import LIGHTS +from .gltf2_blender_gather_cache import cached +from . import gltf2_blender_gather_light_spots +from .material import gltf2_blender_search_node_tree + + +@cached +def gather_lights_punctual(blender_lamp, export_settings) -> Optional[Dict[str, Any]]: + + export_settings['current_paths'] = {} # For KHR_animation_pointer + + if not __filter_lights_punctual(blender_lamp, export_settings): + return None + + light = gltf2_io_lights_punctual.Light( + color=__gather_color(blender_lamp, export_settings), + intensity=__gather_intensity(blender_lamp, export_settings), + spot=__gather_spot(blender_lamp, export_settings), + type=__gather_type(blender_lamp, export_settings), + range=__gather_range(blender_lamp, export_settings), + name=__gather_name(blender_lamp, export_settings), + extensions=__gather_extensions(blender_lamp, export_settings), + extras=__gather_extras(blender_lamp, export_settings) + ) + + return light.to_dict() + + +def __filter_lights_punctual(blender_lamp, export_settings) -> bool: + if blender_lamp.type in ["HEMI", "AREA"]: + export_settings['log'].warning("Unsupported light source {}".format(blender_lamp.type)) + return False + + return True + + +def __gather_color(blender_lamp, export_settings) -> Optional[List[float]]: + emission_node = __get_cycles_emission_node(blender_lamp) + if emission_node is not None: + + # Store data for KHR_animation_pointer + path_ = {} + path_['length'] = 3 + path_['path'] = "/extensions/KHR_lights_punctual/lights/XXX/color" + export_settings['current_paths']["node_tree." + emission_node.inputs["Color"].path_from_id() + + ".default_value"] = path_ + + return list(emission_node.inputs["Color"].default_value)[:3] + + # Store data for KHR_animation_pointer + path_ = {} + path_['length'] = 3 + path_['path'] = "/extensions/KHR_lights_punctual/lights/XXX/color" + export_settings['current_paths']['color'] = path_ + + return list(blender_lamp.color) + + +def __gather_intensity(blender_lamp, export_settings) -> Optional[float]: + emission_node = __get_cycles_emission_node(blender_lamp) + if emission_node is not None: + if blender_lamp.type != 'SUN': + # When using cycles, the strength should be influenced by a LightFalloff node + result = gltf2_blender_search_node_tree.from_socket( + gltf2_blender_search_node_tree.NodeSocket(emission_node.inputs.get("Strength"), blender_lamp.node_tree), + gltf2_blender_search_node_tree.FilterByType(bpy.types.ShaderNodeLightFalloff) + ) + if result: + quadratic_falloff_node = result[0].shader_node + emission_strength = quadratic_falloff_node.inputs["Strength"].default_value / (math.pi * 4.0) + + # Store data for KHR_animation_pointer + path_ = {} + path_['length'] = 1 + path_['path'] = "/extensions/KHR_lights_punctual/lights/XXX/intensity" + path_['lamp_type'] = blender_lamp.type + export_settings['current_paths']["node_tree." + + quadratic_falloff_node.inputs["Strength"].path_from_id() + + ".default_value"] = path_ + + else: + export_settings['log'].warning('No quadratic light falloff node attached to emission strength property') + + path_ = {} + path_['length'] = 1 + path_['path'] = "/extensions/KHR_lights_punctual/lights/XXX/intensity" + path_['lamp_type'] = blender_lamp.type + export_settings['current_paths']["energy"] = path_ + + emission_strength = blender_lamp.energy + else: + emission_strength = emission_node.inputs["Strength"].default_value + + path_ = {} + path_['length'] = 1 + path_['path'] = "/extensions/KHR_lights_punctual/lights/XXX/intensity" + path_['lamp_type'] = blender_lamp.type + export_settings['current_paths']["node_tree." + + emission_node.inputs["Strength"].path_from_id() + + ".default_value"] = path_ + + else: + emission_strength = blender_lamp.energy + + path_ = {} + path_['length'] = 1 + path_['path'] = "/extensions/KHR_lights_punctual/lights/XXX/intensity" + path_['lamp_type'] = blender_lamp.type + export_settings['current_paths']["energy"] = path_ + + if export_settings['gltf_lighting_mode'] == 'RAW': + return emission_strength + else: + # Assume at this point the computed strength is still in the appropriate + # watt-related SI unit, which if everything up to here was done with + # physical basis it hopefully should be. + if blender_lamp.type == 'SUN': # W/m^2 in Blender to lm/m^2 for GLTF/KHR_lights_punctual. + emission_luminous = emission_strength + else: + # Other than directional, only point and spot lamps are supported by GLTF. + # In Blender, points are omnidirectional W, and spots are specified as if they're points. + # Point and spot should both be lm/r^2 in GLTF. + emission_luminous = emission_strength / (4 * math.pi) + if export_settings['gltf_lighting_mode'] == 'SPEC': + emission_luminous *= PBR_WATTS_TO_LUMENS + elif export_settings['gltf_lighting_mode'] == 'COMPAT': + pass # Just so we have an exhaustive tree to catch bugged values. + else: + raise ValueError(export_settings['gltf_lighting_mode']) + return emission_luminous + + +def __gather_spot(blender_lamp, export_settings) -> Optional[gltf2_io_lights_punctual.LightSpot]: + if blender_lamp.type == "SPOT": + return gltf2_blender_gather_light_spots.gather_light_spot(blender_lamp, export_settings) + return None + + +def __gather_type(blender_lamp, _) -> str: + return LIGHTS[blender_lamp.type] + + +def __gather_range(blender_lamp, export_settings) -> Optional[float]: + if blender_lamp.use_custom_distance: + + path_ = {} + path_['length'] = 1 + path_['path'] = "/extensions/KHR_lights_punctual/lights/XXX/range" + export_settings['current_paths']["cutoff_distance"] = path_ + + return blender_lamp.cutoff_distance + return None + + +def __gather_name(blender_lamp, export_settings) -> Optional[str]: + return blender_lamp.name + + +def __gather_extensions(blender_lamp, export_settings) -> Optional[dict]: + return None + + +def __gather_extras(blender_lamp, export_settings) -> Optional[Any]: + if export_settings['gltf_extras']: + return generate_extras(blender_lamp) + return None + + +def __get_cycles_emission_node(blender_lamp) -> Optional[bpy.types.ShaderNodeEmission]: + if blender_lamp.use_nodes and blender_lamp.node_tree: + for currentNode in blender_lamp.node_tree.nodes: + is_shadernode_output = isinstance(currentNode, bpy.types.ShaderNodeOutputLight) + if is_shadernode_output: + if not currentNode.is_active_output: + continue + result = gltf2_blender_search_node_tree.from_socket( + gltf2_blender_search_node_tree.NodeSocket( + currentNode.inputs.get("Surface"), + blender_lamp.node_tree), + gltf2_blender_search_node_tree.FilterByType( + bpy.types.ShaderNodeEmission)) + if not result: + continue + return result[0].shader_node + return None diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_mesh.py b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_mesh.py new file mode 100755 index 00000000000..e9f47bb991b --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_mesh.py @@ -0,0 +1,160 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from typing import Optional, Dict, List, Any, Tuple +from ...io.com import gltf2_io +from ...blender.com.gltf2_blender_data_path import get_sk_exported +from ...io.exp.gltf2_io_user_extensions import export_user_extensions +from ..com.gltf2_blender_extras import generate_extras +from . import gltf2_blender_gather_primitives +from .gltf2_blender_gather_cache import cached_by_key + + +def get_mesh_cache_key(blender_mesh, + blender_object, + vertex_groups, + modifiers, + materials, + original_mesh, + export_settings): + # Use id of original mesh + # Do not use bpy.types that can be unhashable + # Do not use mesh name, that can be not unique (when linked) + + # If materials are not exported, no need to cache by material + if export_settings['gltf_materials'] is None: + mats = None + else: + mats = tuple(id(m) if m is not None else None for m in materials) + + # TODO check what is really needed for modifiers + + mesh_to_id_cache = blender_mesh if original_mesh is None else original_mesh + return ( + (id(mesh_to_id_cache),), + (modifiers,), + mats + ) + + +@cached_by_key(key=get_mesh_cache_key) +def gather_mesh(blender_mesh: bpy.types.Mesh, + uuid_for_skined_data, + vertex_groups: bpy.types.VertexGroups, + modifiers: Optional[bpy.types.ObjectModifiers], + materials: Tuple[bpy.types.Material], + original_mesh: bpy.types.Mesh, + export_settings + ) -> Optional[gltf2_io.Mesh]: + if not __filter_mesh(blender_mesh, vertex_groups, modifiers, export_settings): + return None + + mesh = gltf2_io.Mesh( + extensions=__gather_extensions( + blender_mesh, vertex_groups, modifiers, export_settings), extras=__gather_extras( + blender_mesh, vertex_groups, modifiers, export_settings), name=__gather_name( + blender_mesh, vertex_groups, modifiers, export_settings), weights=__gather_weights( + blender_mesh, vertex_groups, modifiers, export_settings), primitives=__gather_primitives( + blender_mesh, uuid_for_skined_data, vertex_groups, modifiers, materials, export_settings), ) + + if len(mesh.primitives) == 0: + export_settings['log'].warning("Mesh '{}' has no primitives and will be omitted.".format(mesh.name)) + return None + + blender_object = None + if uuid_for_skined_data: + blender_object = export_settings['vtree'].nodes[uuid_for_skined_data].blender_object + + export_user_extensions('gather_mesh_hook', + export_settings, + mesh, + blender_mesh, + blender_object, + vertex_groups, + modifiers, + materials) + + return mesh + + +def __filter_mesh(blender_mesh: bpy.types.Mesh, + vertex_groups: bpy.types.VertexGroups, + modifiers: Optional[bpy.types.ObjectModifiers], + export_settings + ) -> bool: + return True + + +def __gather_extensions(blender_mesh: bpy.types.Mesh, + vertex_groups: bpy.types.VertexGroups, + modifiers: Optional[bpy.types.ObjectModifiers], + export_settings + ) -> Any: + return None + + +def __gather_extras(blender_mesh: bpy.types.Mesh, + vertex_groups: bpy.types.VertexGroups, + modifiers: Optional[bpy.types.ObjectModifiers], + export_settings + ) -> Optional[Dict[Any, Any]]: + + extras = {} + + if export_settings['gltf_extras']: + extras = generate_extras(blender_mesh) or {} + + # Not for GN Instances + if export_settings['gltf_morph'] and blender_mesh.shape_keys and blender_mesh.users != 0: + morph_max = len(blender_mesh.shape_keys.key_blocks) - 1 + if morph_max > 0: + extras['targetNames'] = [k.name for k in get_sk_exported(blender_mesh.shape_keys.key_blocks)] + + if extras: + return extras + + return None + + +def __gather_name(blender_mesh: bpy.types.Mesh, + vertex_groups: bpy.types.VertexGroups, + modifiers: Optional[bpy.types.ObjectModifiers], + export_settings + ) -> str: + return blender_mesh.name + + +def __gather_primitives(blender_mesh: bpy.types.Mesh, + uuid_for_skined_data, + vertex_groups: bpy.types.VertexGroups, + modifiers: Optional[bpy.types.ObjectModifiers], + materials: Tuple[bpy.types.Material], + export_settings + ) -> List[gltf2_io.MeshPrimitive]: + return gltf2_blender_gather_primitives.gather_primitives(blender_mesh, + uuid_for_skined_data, + vertex_groups, + modifiers, + materials, + export_settings) + + +def __gather_weights(blender_mesh: bpy.types.Mesh, + vertex_groups: bpy.types.VertexGroups, + modifiers: Optional[bpy.types.ObjectModifiers], + export_settings + ) -> Optional[List[float]]: + if not export_settings['gltf_morph'] or not blender_mesh.shape_keys: + return None + + # Not for GN Instances + if blender_mesh.users == 0: + return None + + morph_max = len(blender_mesh.shape_keys.key_blocks) - 1 + if morph_max <= 0: + return None + + return [k.value for k in get_sk_exported(blender_mesh.shape_keys.key_blocks)] diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_nodes.py b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_nodes.py new file mode 100755 index 00000000000..c6165340228 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_nodes.py @@ -0,0 +1,530 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import math +import bpy +from mathutils import Matrix, Quaternion, Vector + +from ...io.com import gltf2_io +from ...io.com import gltf2_io_extensions +from ...io.exp.gltf2_io_user_extensions import export_user_extensions +from ..com.gltf2_blender_extras import generate_extras +from ..com.gltf2_blender_default import LIGHTS +from ..com import gltf2_blender_math +from . import gltf2_blender_gather_tree +from . import gltf2_blender_gather_skins +from . import gltf2_blender_gather_cameras +from . import gltf2_blender_gather_mesh +from . import gltf2_blender_gather_joints +from . import gltf2_blender_gather_lights +from .gltf2_blender_gather_tree import VExportNode + + +def gather_node(vnode, export_settings): + + blender_object = vnode.blender_object + + skin = gather_skin(vnode.uuid, export_settings) + if skin is not None: + vnode.skin = skin + + # Hook to check if we should export mesh or not (force it to None) + + class GltfHookNodeMesh: + def __init__(self): + self.export_mesh = True + + gltf_hook_node_mesh = GltfHookNodeMesh() + + export_user_extensions('gather_node_mesh_hook', export_settings, gltf_hook_node_mesh, blender_object) + if gltf_hook_node_mesh.export_mesh is True: + mesh = __gather_mesh(vnode, blender_object, export_settings) + else: + mesh = None + + node = gltf2_io.Node( + camera=__gather_camera(vnode, export_settings), + children=__gather_children(vnode, export_settings), + extensions=__gather_extensions(vnode, export_settings), + extras=__gather_extras(blender_object, export_settings), + matrix=__gather_matrix(blender_object, export_settings), + mesh=mesh, + name=__gather_name(blender_object, export_settings), + rotation=None, + scale=None, + skin=skin, + translation=None, + weights=__gather_weights(blender_object, export_settings) + ) + + # If node mesh is skined, transforms should be ignored at import, so no need to set them here + if node.skin is None: + node.translation, node.rotation, node.scale = __gather_trans_rot_scale(vnode, export_settings) + + export_user_extensions('gather_node_hook', export_settings, node, blender_object) + + vnode.node = node + + return node + + +def __gather_camera(vnode, export_settings): + if not vnode.blender_object: + return + if vnode.blender_type == VExportNode.COLLECTION: + return None + if vnode.blender_object.type != 'CAMERA': + return None + + cam = gltf2_blender_gather_cameras.gather_camera(vnode.blender_object.data, export_settings) + + if len(export_settings['current_paths']) > 0: + export_settings['KHR_animation_pointer']['cameras'][id(vnode.blender_object.data)] = {} + export_settings['KHR_animation_pointer']['cameras'][id( + vnode.blender_object.data)]['paths'] = export_settings['current_paths'].copy() + export_settings['KHR_animation_pointer']['cameras'][id(vnode.blender_object.data)]['glTF_camera'] = cam + + export_settings['current_paths'] = {} + + return cam + + +def __gather_children(vnode, export_settings): + children = [] + + vtree = export_settings['vtree'] + + armature_object_uuid = None + + # Standard Children / Collection + if export_settings['gltf_armature_object_remove'] is False: + for c in [vtree.nodes[c] + for c in vnode.children if vtree.nodes[c].blender_type != gltf2_blender_gather_tree.VExportNode.BONE]: + node = gather_node(c, export_settings) + if node is not None: + children.append(node) + else: + root_joints = [] + for c in [vtree.nodes[c] for c in vnode.children]: + if c.blender_type != gltf2_blender_gather_tree.VExportNode.BONE: + node = gather_node(c, export_settings) + if node is not None: + children.append(node) + else: + # We come here because armature was remove, and bone can be a child of any object + joint = gltf2_blender_gather_joints.gather_joint_vnode(c.uuid, export_settings) + children.append(joint) + armature_object_uuid = c.armature + root_joints.append(joint) + + # Now got all bone children (that are root joints), we can get object parented to bones + + # Armature --> Retrieve Blender bones + # This can't happen if we remove the Armature Object + if vnode.blender_type == gltf2_blender_gather_tree.VExportNode.ARMATURE: + armature_object_uuid = vnode.uuid + root_joints = [] + root_bones_uuid = export_settings['vtree'].get_root_bones_uuid(vnode.uuid) + for bone_uuid in root_bones_uuid: + joint = gltf2_blender_gather_joints.gather_joint_vnode(bone_uuid, export_settings) + children.append(joint) + root_joints.append(joint) + + if vnode.blender_type == gltf2_blender_gather_tree.VExportNode.ARMATURE \ + or armature_object_uuid is not None: + + # Object parented to bones + get_objects_parented_to_bones(armature_object_uuid, root_joints, export_settings) + + return children + + +def get_objects_parented_to_bones(armature_object_uuid, root_joints, export_settings): + vtree = export_settings['vtree'] + direct_bone_children = [] + for n in [vtree.nodes[i] for i in vtree.get_all_bones(armature_object_uuid)]: + direct_bone_children.extend([c for c in n.children if vtree.nodes[c].blender_type != + gltf2_blender_gather_tree.VExportNode.BONE]) + + for child in direct_bone_children: # List of object that are parented to bones + # find parent joint + parent_joint = __find_parent_joint(root_joints, vtree.nodes[child].blender_object.parent_bone) + if not parent_joint: + continue + child_node = gather_node(vtree.nodes[child], export_settings) + if child_node is None: + continue + + mat = vtree.nodes[vtree.nodes[child].parent_bone_uuid].matrix_world.inverted_safe( + ) @ vtree.nodes[child].matrix_world + loc, rot_quat, scale = mat.decompose() + + trans = __convert_swizzle_location(loc, export_settings) + rot = __convert_swizzle_rotation(rot_quat, export_settings) + sca = __convert_swizzle_scale(scale, export_settings) + + translation, rotation, scale = (None, None, None) + if trans[0] != 0.0 or trans[1] != 0.0 or trans[2] != 0.0: + translation = [trans[0], trans[1], trans[2]] + if rot[0] != 1.0 or rot[1] != 0.0 or rot[2] != 0.0 or rot[3] != 0.0: + rotation = [rot[1], rot[2], rot[3], rot[0]] + if sca[0] != 1.0 or sca[1] != 1.0 or sca[2] != 1.0: + scale = [sca[0], sca[1], sca[2]] + + child_node.translation = translation + child_node.rotation = rotation + child_node.scale = scale + + parent_joint.children.append(child_node) + + +def __find_parent_joint(joints, name): + for joint in joints: + if joint.name == name: + return joint + parent_joint = __find_parent_joint(joint.children, name) + if parent_joint: + return parent_joint + return None + + +def __gather_extensions(vnode, export_settings): + blender_object = vnode.blender_object + extensions = {} + + blender_lamp = None + if vnode.blender_type == VExportNode.COLLECTION: + return None + + if export_settings["gltf_lights"] and vnode.blender_type == VExportNode.INSTANCE and vnode.data is not None: + if vnode.data.type in LIGHTS: + blender_lamp = vnode.data + elif export_settings["gltf_lights"] and blender_object is not None and (blender_object.type == "LAMP" or blender_object.type == "LIGHT"): + blender_lamp = blender_object.data + + if blender_lamp is not None: + light = gltf2_blender_gather_lights.gather_lights_punctual( + blender_lamp, + export_settings + ) + if light is not None: + light_extension = gltf2_io_extensions.ChildOfRootExtension( + name="KHR_lights_punctual", + path=["lights"], + extension=light + ) + extensions["KHR_lights_punctual"] = gltf2_io_extensions.Extension( + name="KHR_lights_punctual", + extension={ + "light": light_extension + } + ) + if len(export_settings['current_paths']) > 0: + export_settings['KHR_animation_pointer']['lights'][id(blender_lamp)] = {} + export_settings['KHR_animation_pointer']['lights'][id( + blender_lamp)]['paths'] = export_settings['current_paths'].copy() + export_settings['KHR_animation_pointer']['lights'][id(blender_lamp)]['glTF_light'] = light_extension + + export_settings['current_paths'] = {} + + return extensions if extensions else None + + +def __gather_extras(blender_object, export_settings): + if export_settings['gltf_extras']: + return generate_extras(blender_object) + return None + + +def __gather_matrix(blender_object, export_settings): + # return blender_object.matrix_local + return [] + + +def __gather_mesh(vnode, blender_object, export_settings): + if vnode.blender_type == VExportNode.COLLECTION: + return None + if blender_object and blender_object.type in ['CURVE', 'SURFACE', 'FONT']: + return __gather_mesh_from_nonmesh(blender_object, export_settings) + if blender_object is None and type(vnode.data).__name__ not in ["Mesh"]: + return None # TODO + if blender_object is None: + # GN instance + blender_mesh = vnode.data + # Keep materials from the tmp mesh, but if no material, keep from object + materials = tuple(mat for mat in blender_mesh.materials) + if len(materials) == 1 and materials[0] is None: + materials = tuple(ms.material for ms in vnode.original_object.material_slots) + + uuid_for_skined_data = None + modifiers = None + + if blender_mesh is None: + return None + + else: + if blender_object.type != "MESH": + return None + # For duplis instancer, when show is off -> export as empty + if vnode.force_as_empty is True: + return None + # Be sure that object is valid (no NaN for example) + res = blender_object.data.validate() + if res is True: + export_settings['log'].warning("Mesh " + blender_object.data.name + + " is not valid, and may be exported wrongly") + + modifiers = blender_object.modifiers + if len(modifiers) == 0: + modifiers = None + + if export_settings['gltf_apply']: + if modifiers is None: # If no modifier, use original mesh, it will instance all shared mesh in a single glTF mesh + blender_mesh = blender_object.data + # Keep materials from object, as no modifiers are applied, so no risk that + # modifiers changed them + materials = tuple(ms.material for ms in blender_object.material_slots) + else: + armature_modifiers = {} + if export_settings['gltf_skins']: + # temporarily disable Armature modifiers if exporting skins + for idx, modifier in enumerate(blender_object.modifiers): + if modifier.type == 'ARMATURE': + armature_modifiers[idx] = modifier.show_viewport + modifier.show_viewport = False + + depsgraph = bpy.context.evaluated_depsgraph_get() + blender_mesh_owner = blender_object.evaluated_get(depsgraph) + blender_mesh = blender_mesh_owner.to_mesh(preserve_all_data_layers=True, depsgraph=depsgraph) + for prop in blender_object.data.keys(): + blender_mesh[prop] = blender_object.data[prop] + + if export_settings['gltf_skins']: + # restore Armature modifiers + for idx, show_viewport in armature_modifiers.items(): + blender_object.modifiers[idx].show_viewport = show_viewport + + # Keep materials from the newly created tmp mesh, but if no materials, keep from object + materials = tuple(mat for mat in blender_mesh.materials) + if len(materials) == 1 and materials[0] is None: + materials = tuple(ms.material for ms in blender_object.material_slots) + + else: + blender_mesh = blender_object.data + if not export_settings['gltf_skins']: + modifiers = None + else: + # Check if there is an armature modidier + if len([mod for mod in blender_object.modifiers if mod.type == "ARMATURE"]) == 0: + modifiers = None + + # Keep materials from object, as no modifiers are applied, so no risk that + # modifiers changed them + materials = tuple(ms.material for ms in blender_object.material_slots) + + # retrieve armature + # Because mesh data will be transforms to skeleton space, + # we can't instantiate multiple object at different location, skined by same armature + uuid_for_skined_data = None + if export_settings['gltf_skins']: + for idx, modifier in enumerate(blender_object.modifiers): + if modifier.type == 'ARMATURE': + uuid_for_skined_data = vnode.uuid + + result = gltf2_blender_gather_mesh.gather_mesh(blender_mesh, + uuid_for_skined_data, + blender_object.vertex_groups if blender_object else None, + modifiers, + materials, + None, + export_settings) + + if export_settings['gltf_apply'] and modifiers is not None: + blender_mesh_owner.to_mesh_clear() + + return result + + +def __gather_mesh_from_nonmesh(blender_object, export_settings): + """Handles curves, surfaces, text, etc.""" + needs_to_mesh_clear = False + try: + # Convert to a mesh + try: + if export_settings['gltf_apply']: + depsgraph = bpy.context.evaluated_depsgraph_get() + blender_mesh_owner = blender_object.evaluated_get(depsgraph) + blender_mesh = blender_mesh_owner.to_mesh(preserve_all_data_layers=True, depsgraph=depsgraph) + # TODO: do we need preserve_all_data_layers? + + else: + blender_mesh_owner = blender_object + blender_mesh = blender_mesh_owner.to_mesh() + + # In some cases (for example curve with single vertice), no blender_mesh is created (without crash) + if blender_mesh is None: + return None + + except Exception: + return None + + needs_to_mesh_clear = True + + materials = tuple([ms.material for ms in blender_object.material_slots if ms.material is not None]) + modifiers = None + blender_object_for_skined_data = None + + result = gltf2_blender_gather_mesh.gather_mesh(blender_mesh, + blender_object_for_skined_data, + blender_object.vertex_groups, + modifiers, + materials, + blender_object.data, + export_settings) + + finally: + if needs_to_mesh_clear: + blender_mesh_owner.to_mesh_clear() + + return result + + +def __gather_name(blender_object, export_settings): + + new_name = blender_object.name if blender_object else "GN Instance" + + class GltfHookName: + def __init__(self, name): + self.name = name + gltf_hook_name = GltfHookName(new_name) + + export_user_extensions('gather_node_name_hook', export_settings, gltf_hook_name, blender_object) + return gltf_hook_name.name + + +def __gather_trans_rot_scale(vnode, export_settings): + if vnode.parent_uuid is None: + # No parent, so matrix is world matrix + trans, rot, sca = vnode.matrix_world.decompose() + else: + # calculate local matrix + if export_settings['vtree'].nodes[vnode.parent_uuid].skin is None: + trans, rot, sca = ( + export_settings['vtree'].nodes[vnode.parent_uuid].matrix_world.inverted_safe() @ vnode.matrix_world).decompose() + else: + # But ... if parent has skin, the parent TRS are not taken into account, so don't get local from parent, but from armature + # It also depens if skined mesh is parented to armature or not + if export_settings['vtree'].nodes[vnode.parent_uuid].parent_uuid is not None and export_settings['vtree'].nodes[ + export_settings['vtree'].nodes[vnode.parent_uuid].parent_uuid].blender_type == VExportNode.ARMATURE: + trans, rot, sca = (export_settings['vtree'].nodes[export_settings['vtree'].nodes[vnode.parent_uuid].armature].matrix_world.inverted_safe( + ) @ vnode.matrix_world).decompose() + else: + trans, rot, sca = vnode.matrix_world.decompose() + + # make sure the rotation is normalized + rot.normalize() + + trans = __convert_swizzle_location(trans, export_settings) + rot = __convert_swizzle_rotation(rot, export_settings) + sca = __convert_swizzle_scale(sca, export_settings) + + if vnode.blender_object and vnode.blender_type != VExportNode.COLLECTION and vnode.blender_object.instance_type == 'COLLECTION' and vnode.blender_object.instance_collection: + offset = -__convert_swizzle_location( + vnode.blender_object.instance_collection.instance_offset, export_settings) + + s = Matrix.Diagonal(sca).to_4x4() + r = rot.to_matrix().to_4x4() + t = Matrix.Translation(trans).to_4x4() + o = Matrix.Translation(offset).to_4x4() + m = t @ r @ s @ o + + trans = m.translation + + translation, rotation, scale = (None, None, None) + trans[0], trans[1], trans[2] = gltf2_blender_math.round_if_near( + trans[0], 0.0), gltf2_blender_math.round_if_near( + trans[1], 0.0), gltf2_blender_math.round_if_near( + trans[2], 0.0) + rot[0], rot[1], rot[2], rot[3] = gltf2_blender_math.round_if_near( + rot[0], 1.0), gltf2_blender_math.round_if_near( + rot[1], 0.0), gltf2_blender_math.round_if_near( + rot[2], 0.0), gltf2_blender_math.round_if_near( + rot[3], 0.0) + sca[0], sca[1], sca[2] = gltf2_blender_math.round_if_near( + sca[0], 1.0), gltf2_blender_math.round_if_near( + sca[1], 1.0), gltf2_blender_math.round_if_near( + sca[2], 1.0) + if trans[0] != 0.0 or trans[1] != 0.0 or trans[2] != 0.0: + translation = [trans[0], trans[1], trans[2]] + if rot[0] != 1.0 or rot[1] != 0.0 or rot[2] != 0.0 or rot[3] != 0.0: + rotation = [rot[1], rot[2], rot[3], rot[0]] + if sca[0] != 1.0 or sca[1] != 1.0 or sca[2] != 1.0: + scale = [sca[0], sca[1], sca[2]] + return translation, rotation, scale + + +def gather_skin(vnode, export_settings): + + if export_settings['vtree'].nodes[vnode].blender_type == VExportNode.COLLECTION: + return None + + blender_object = export_settings['vtree'].nodes[vnode].blender_object + modifiers = {m.type: m for m in blender_object.modifiers} if blender_object else {} + if "ARMATURE" not in modifiers or modifiers["ARMATURE"].object is None: + return None + + # no skin needed when the modifier is linked without having a vertex group + if len(blender_object.vertex_groups) == 0: + return None + + # check if any vertices in the mesh are part of a vertex group + depsgraph = bpy.context.evaluated_depsgraph_get() + blender_mesh_owner = blender_object.evaluated_get(depsgraph) + blender_mesh = blender_mesh_owner.to_mesh(preserve_all_data_layers=True, depsgraph=depsgraph) + if not any(vertex.groups is not None and len(vertex.groups) > 0 for vertex in blender_mesh.vertices): + return None + + # Prevent infinite recursive error. A mesh can't have an Armature modifier + # and be bone parented to a bone of this armature + # In that case, ignore the armature modifier, keep only the bone parenting + if blender_object.parent is not None \ + and blender_object.parent_type == 'BONE' \ + and blender_object.parent.name == modifiers["ARMATURE"].object.name: + + return None + + # Skins and meshes must be in the same glTF node, which is different from how blender handles armatures + return gltf2_blender_gather_skins.gather_skin(export_settings['vtree'].nodes[vnode].armature, export_settings) + + +def __gather_weights(blender_object, export_settings): + return None + + +def __convert_swizzle_location(loc, export_settings): + """Convert a location from Blender coordinate system to glTF coordinate system.""" + if export_settings['gltf_yup']: + return Vector((loc[0], loc[2], -loc[1])) + else: + return Vector((loc[0], loc[1], loc[2])) + + +def __convert_swizzle_rotation(rot, export_settings): + """ + Convert a quaternion rotation from Blender coordinate system to glTF coordinate system. + + 'w' is still at first position. + """ + if export_settings['gltf_yup']: + return Quaternion((rot[0], rot[1], rot[3], -rot[2])) + else: + return Quaternion((rot[0], rot[1], rot[2], rot[3])) + + +def __convert_swizzle_scale(scale, export_settings): + """Convert a scale from Blender coordinate system to glTF coordinate system.""" + if export_settings['gltf_yup']: + return Vector((scale[0], scale[2], scale[1])) + else: + return Vector((scale[0], scale[1], scale[2])) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitive_attributes.py b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitive_attributes.py new file mode 100755 index 00000000000..6f302e33584 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitive_attributes.py @@ -0,0 +1,222 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +from math import ceil + +from ...io.com import gltf2_io, gltf2_io_constants, gltf2_io_debug +from ...io.exp import gltf2_io_binary_data +from ...io.exp.gltf2_io_user_extensions import export_user_extensions +from .gltf2_blender_gather_accessors import array_to_accessor + + +def gather_primitive_attributes(blender_primitive, export_settings): + """ + Gathers the attributes, such as POSITION, NORMAL, TANGENT, and all custom attributes from a blender primitive + + :return: a dictionary of attributes + """ + attributes = {} + + # loop on each attribute extracted + # for skinning, all linked attributes (WEIGHTS_ and JOINTS_) need to be calculated + # in one shot (because of normalization), so we need to check that it is called only once. + + skin_done = False + + for attribute in blender_primitive["attributes"]: + if (attribute.startswith("JOINTS_") or attribute.startswith("WEIGHTS_")) and skin_done is True: + continue + if attribute.startswith("MORPH_"): + continue # Target for morphs will be managed later + attributes.update(__gather_attribute(blender_primitive, attribute, export_settings)) + if (attribute.startswith("JOINTS_") or attribute.startswith("WEIGHTS_")): + skin_done = True + + return attributes + + +def __gather_skins(blender_primitive, export_settings): + attributes = {} + + if not export_settings['gltf_skins']: + return attributes + + # Retrieve max set index + max_bone_set_index = 0 + while blender_primitive["attributes"].get( + 'JOINTS_' + + str(max_bone_set_index)) and blender_primitive["attributes"].get( + 'WEIGHTS_' + + str(max_bone_set_index)): + max_bone_set_index += 1 + max_bone_set_index -= 1 + + # Here, a set represents a group of 4 weights. + # So max_bone_set_index value: + # if -1 => No weights + # if 0 => Max 4 weights + # if 1 => Max 8 weights + # etc... + + # If no skinning + if max_bone_set_index < 0: + return attributes + + # Retrieve the wanted by user max set index + if export_settings['gltf_all_vertex_influences']: + wanted_max_bone_set_index = max_bone_set_index + else: + wanted_max_bone_set_index = ceil(export_settings['gltf_vertex_influences_nb'] / 4) - 1 + + # No need to create a set with only zero if user asked more than requested group set. + if wanted_max_bone_set_index > max_bone_set_index: + wanted_max_bone_set_index = max_bone_set_index + + # Set warning, for the case where there are more group of 4 weights needed + # Warning for the case where we are in the same group, will be done later + # (for example, 3 weights needed, but 2 wanted by user) + if max_bone_set_index > wanted_max_bone_set_index: + export_settings['log'].warning( + "There are more than {} joint vertex influences." + "The {} with highest weight will be used (and normalized).".format( + export_settings['gltf_vertex_influences_nb'], + export_settings['gltf_vertex_influences_nb'])) + + # Take into account only the first set of 4 weights + max_bone_set_index = wanted_max_bone_set_index + + # Convert weights to numpy arrays, and setting joints + weight_arrs = [] + for s in range(0, max_bone_set_index + 1): + + weight_id = 'WEIGHTS_' + str(s) + weight = blender_primitive["attributes"][weight_id] + weight = np.array(weight, dtype=np.float32) + weight = weight.reshape(len(weight) // 4, 4) + + # Set warning for the case where we are in the same group, will be done later (for example, 3 weights needed, but 2 wanted by user) + # And then, remove no more needed weights + if s == max_bone_set_index and not export_settings['gltf_all_vertex_influences']: + # Check how many to remove + to_remove = (wanted_max_bone_set_index + 1) * 4 - export_settings['gltf_vertex_influences_nb'] + if to_remove > 0: + warning_done = False + for i in range(0, to_remove): + idx = 4 - 1 - i + if not all(weight[:, idx]): + if warning_done is False: + export_settings['log'].warning( + "There are more than {} joint vertex influences." + "The {} with highest weight will be used (and normalized).".format( + export_settings['gltf_vertex_influences_nb'], + export_settings['gltf_vertex_influences_nb'])) + warning_done = True + weight[:, idx] = 0.0 + + weight_arrs.append(weight) + + # joints + joint_id = 'JOINTS_' + str(s) + internal_joint = blender_primitive["attributes"][joint_id] + component_type = gltf2_io_constants.ComponentType.UnsignedShort + if max(internal_joint) < 256: + component_type = gltf2_io_constants.ComponentType.UnsignedByte + joints = np.array(internal_joint, dtype=gltf2_io_constants.ComponentType.to_numpy_dtype(component_type)) + joints = joints.reshape(-1, 4) + + if s == max_bone_set_index and not export_settings['gltf_all_vertex_influences']: + # Check how many to remove + to_remove = (wanted_max_bone_set_index + 1) * 4 - export_settings['gltf_vertex_influences_nb'] + if to_remove > 0: + for i in range(0, to_remove): + idx = 4 - 1 - i + joints[:, idx] = 0.0 + + joint = array_to_accessor( + joints, + export_settings, + component_type, + data_type=gltf2_io_constants.DataType.Vec4, + ) + attributes[joint_id] = joint + + # Sum weights for each vertex + for s in range(0, max_bone_set_index + 1): + sums = weight_arrs[s].sum(axis=1) + if s == 0: + weight_total = sums + else: + weight_total += sums + + # Normalize weights so they sum to 1 + weight_total = weight_total.reshape(-1, 1) + for s in range(0, max_bone_set_index + 1): + weight_id = 'WEIGHTS_' + str(s) + weight_arrs[s] /= weight_total + + weight = array_to_accessor( + weight_arrs[s], + export_settings, + component_type=gltf2_io_constants.ComponentType.Float, + data_type=gltf2_io_constants.DataType.Vec4, + ) + attributes[weight_id] = weight + + return attributes + + +def __gather_attribute(blender_primitive, attribute, export_settings): + data = blender_primitive["attributes"][attribute] + + include_max_and_mins = { + "POSITION": True + } + + if (attribute.startswith("_") or attribute.startswith("COLOR_") + ) and blender_primitive["attributes"][attribute]['component_type'] == gltf2_io_constants.ComponentType.UnsignedShort: + # Byte Color vertex color, need to normalize + + data['data'] *= 65535 + data['data'] += 0.5 # bias for rounding + data['data'] = data['data'].astype(np.uint16) + + export_user_extensions('gather_attribute_change', export_settings, attribute, data, True) + + return { + attribute: gltf2_io.Accessor( + buffer_view=gltf2_io_binary_data.BinaryData( + data['data'].tobytes(), + gltf2_io_constants.BufferViewTarget.ARRAY_BUFFER), + byte_offset=None, + component_type=data['component_type'], + count=len( + data['data']), + extensions=None, + extras=None, + max=None, + min=None, + name=None, + normalized=True, + sparse=None, + type=data['data_type'], + )} + + elif attribute.startswith("JOINTS_") or attribute.startswith("WEIGHTS_"): + return __gather_skins(blender_primitive, export_settings) + + else: + + export_user_extensions('gather_attribute_change', export_settings, attribute, data, False) + + return { + attribute: array_to_accessor( + data['data'], + export_settings, + component_type=data['component_type'], + data_type=data['data_type'], + include_max_and_min=include_max_and_mins.get(attribute, False), + normalized=data.get('normalized') + ) + } diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitives.py b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitives.py new file mode 100755 index 00000000000..0f75222954e --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitives.py @@ -0,0 +1,351 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from typing import List, Optional, Tuple +import numpy as np +from ...io.com import gltf2_io, gltf2_io_constants, gltf2_io_extensions +from ...blender.com.gltf2_blender_data_path import get_sk_exported +from ...io.exp import gltf2_io_binary_data +from .gltf2_blender_gather_cache import cached, cached_by_key +from . import gltf2_blender_gather_primitives_extract +from . import gltf2_blender_gather_primitive_attributes +from .gltf2_blender_gather_accessors import gather_accessor, array_to_accessor +from .material.gltf2_blender_gather_materials import get_final_material, gather_material, get_base_material, get_material_from_idx +from .material.extensions import gltf2_blender_gather_materials_variants + + +@cached +def gather_primitive_cache_key( + blender_mesh, + uuid_for_skined_data, + vertex_groups, + modifiers, + materials, + export_settings): + + # Use id of mesh + # Do not use bpy.types that can be unhashable + # Do not use mesh name, that can be not unique (when linked) + + # TODO check what is really needed for modifiers + + return ( + (id(blender_mesh),), + (modifiers,), + tuple(id(m) if m is not None else None for m in materials) + ) + + +@cached_by_key(key=gather_primitive_cache_key) +def gather_primitives( + blender_mesh: bpy.types.Mesh, + uuid_for_skined_data, + vertex_groups: bpy.types.VertexGroups, + modifiers: Optional[bpy.types.ObjectModifiers], + materials: Tuple[bpy.types.Material], + export_settings +) -> List[gltf2_io.MeshPrimitive]: + """ + Extract the mesh primitives from a blender object + + :return: a list of glTF2 primitives + """ + primitives = [] + + blender_primitives, addional_materials_udim = __gather_cache_primitives( + materials, blender_mesh, uuid_for_skined_data, vertex_groups, modifiers, export_settings) + + for internal_primitive, udim_material in zip(blender_primitives, addional_materials_udim): + + if udim_material is None: # classic case, not an udim material + # We already call this function, in order to retrieve uvmap info, if any + # So here, only the cache will be used + base_material, material_info = get_base_material(internal_primitive['material'], materials, export_settings) + + # Now, we can retrieve the real material, by checking attributes and active maps + blender_mat = get_material_from_idx(internal_primitive['material'], materials, export_settings) + material = get_final_material( + blender_mesh, + blender_mat, + internal_primitive['uvmap_attributes_index'], + base_material, + material_info["uv_info"], + export_settings) + else: + # UDIM case + base_material, material_info, unique_material_id = udim_material + material = get_final_material( + blender_mesh, + unique_material_id, + internal_primitive['uvmap_attributes_index'], + base_material, + material_info["uv_info"], + export_settings) + + primitive = gltf2_io.MeshPrimitive( + attributes=internal_primitive['attributes'], + extensions=__gather_extensions( + blender_mesh, + internal_primitive['material'], + internal_primitive['uvmap_attributes_index'], + export_settings), + extras=None, + indices=internal_primitive['indices'], + material=material, + mode=internal_primitive['mode'], + targets=internal_primitive['targets']) + primitives.append(primitive) + + return primitives + + +@cached +def get_primitive_cache_key( + materials, + blender_mesh, + uuid_for_skined_data, + vertex_groups, + modifiers, + export_settings): + + # Use id of mesh + # Do not use bpy.types that can be unhashable + # Do not use mesh name, that can be not unique (when linked) + # Do not use materials here + + # TODO check what is really needed for modifiers + + return ( + (id(blender_mesh),), + (modifiers,) + ) + + +@cached_by_key(key=get_primitive_cache_key) +def __gather_cache_primitives( + materials, + blender_mesh: bpy.types.Mesh, + uuid_for_skined_data, + vertex_groups: bpy.types.VertexGroups, + modifiers: Optional[bpy.types.ObjectModifiers], + export_settings +) -> List[dict]: + """ + Gather parts that are identical for instances, i.e. excluding materials + """ + primitives = [] + + blender_primitives, additional_materials_udim, shared_attributes = gltf2_blender_gather_primitives_extract.extract_primitives( + materials, blender_mesh, uuid_for_skined_data, vertex_groups, modifiers, export_settings) + + if shared_attributes is not None: + + if len(blender_primitives) > 0: + shared = {} + shared["attributes"] = shared_attributes + + attributes = __gather_attributes(shared, blender_mesh, modifiers, export_settings) + targets = __gather_targets(shared, blender_mesh, modifiers, export_settings) + + for internal_primitive in blender_primitives: + if internal_primitive.get('mode') is None: + + primitive = { + "attributes": attributes, + "indices": __gather_indices(internal_primitive, blender_mesh, modifiers, export_settings), + "mode": internal_primitive.get('mode'), + "material": internal_primitive.get('material'), + "targets": targets, + "uvmap_attributes_index": internal_primitive.get('uvmap_attributes_index') + } + + else: + # Edges & points, no shared attributes + primitive = { + "attributes": __gather_attributes(internal_primitive, blender_mesh, modifiers, export_settings), + "indices": __gather_indices(internal_primitive, blender_mesh, modifiers, export_settings), + "mode": internal_primitive.get('mode'), + "material": internal_primitive.get('material'), + "targets": __gather_targets(internal_primitive, blender_mesh, modifiers, export_settings), + "uvmap_attributes_index": internal_primitive.get('uvmap_attributes_index') + } + primitives.append(primitive) + + else: + + for internal_primitive in blender_primitives: + primitive = { + "attributes": __gather_attributes(internal_primitive, blender_mesh, modifiers, export_settings), + "indices": __gather_indices(internal_primitive, blender_mesh, modifiers, export_settings), + "mode": internal_primitive.get('mode'), + "material": internal_primitive.get('material'), + "targets": __gather_targets(internal_primitive, blender_mesh, modifiers, export_settings), + "uvmap_attributes_index": internal_primitive.get('uvmap_attributes_index') + } + primitives.append(primitive) + + return primitives, additional_materials_udim + + +def __gather_indices(blender_primitive, blender_mesh, modifiers, export_settings): + indices = blender_primitive.get('indices') + if indices is None: + return None + + # NOTE: Values used by some graphics APIs as "primitive restart" values are disallowed. + # Specifically, the values 65535 (in UINT16) and 4294967295 (in UINT32) cannot be used as indices. + # https://github.com/KhronosGroup/glTF/issues/1142 + # https://github.com/KhronosGroup/glTF/pull/1476/files + # Also, UINT8 mode is not supported: + # https://github.com/KhronosGroup/glTF/issues/1471 + max_index = indices.max() + if max_index < 65535: + component_type = gltf2_io_constants.ComponentType.UnsignedShort + indices = indices.astype(np.uint16, copy=False) + elif max_index < 4294967295: + component_type = gltf2_io_constants.ComponentType.UnsignedInt + indices = indices.astype(np.uint32, copy=False) + else: + export_settings['log'].error( + 'A mesh contains too many vertices (' + + str(max_index) + + ') and needs to be split before export.') + return None + + element_type = gltf2_io_constants.DataType.Scalar + binary_data = gltf2_io_binary_data.BinaryData( + indices.tobytes(), bufferViewTarget=gltf2_io_constants.BufferViewTarget.ELEMENT_ARRAY_BUFFER) + return gather_accessor( + binary_data, + component_type, + len(indices), + None, + None, + element_type, + export_settings + ) + + +def __gather_attributes(blender_primitive, blender_mesh, modifiers, export_settings): + return gltf2_blender_gather_primitive_attributes.gather_primitive_attributes(blender_primitive, export_settings) + + +def __gather_targets(blender_primitive, blender_mesh, modifiers, export_settings): + if export_settings['gltf_morph']: + targets = [] + if blender_mesh.shape_keys is not None: + morph_index = 0 + for blender_shape_key in get_sk_exported(blender_mesh.shape_keys.key_blocks): + + target_position_id = 'MORPH_POSITION_' + str(morph_index) + target_normal_id = 'MORPH_NORMAL_' + str(morph_index) + target_tangent_id = 'MORPH_TANGENT_' + str(morph_index) + + if blender_primitive["attributes"].get(target_position_id) is not None: + target = {} + internal_target_position = blender_primitive["attributes"][target_position_id]["data"] + target["POSITION"] = array_to_accessor( + internal_target_position, + export_settings, + component_type=gltf2_io_constants.ComponentType.Float, + data_type=gltf2_io_constants.DataType.Vec3, + include_max_and_min=True, + sparse_type='SK' + ) + + if export_settings['gltf_normals'] \ + and export_settings['gltf_morph_normal'] \ + and blender_primitive["attributes"].get(target_normal_id) is not None: + + internal_target_normal = blender_primitive["attributes"][target_normal_id]["data"] + target['NORMAL'] = array_to_accessor( + internal_target_normal, + export_settings, + component_type=gltf2_io_constants.ComponentType.Float, + data_type=gltf2_io_constants.DataType.Vec3, + sparse_type='SK' + ) + + if export_settings['gltf_tangents'] \ + and export_settings['gltf_morph_tangent'] \ + and blender_primitive["attributes"].get(target_tangent_id) is not None: + internal_target_tangent = blender_primitive["attributes"][target_tangent_id]["data"] + target['TANGENT'] = array_to_accessor( + internal_target_tangent, + export_settings, + component_type=gltf2_io_constants.ComponentType.Float, + data_type=gltf2_io_constants.DataType.Vec3, + sparse_type='SK' + ) + targets.append(target) + morph_index += 1 + return targets + return None + + +def __gather_extensions(blender_mesh, + material_idx: int, + attr_indices: dict, + export_settings): + extensions = {} + + if bpy.context.preferences.addons['io_scene_gltf2'].preferences.KHR_materials_variants_ui is False: + return None + + if bpy.data.scenes[0].get('gltf2_KHR_materials_variants_variants') is None: + return None + if len(bpy.data.scenes[0]['gltf2_KHR_materials_variants_variants']) == 0: + return None + + # Material idx is the slot idx. Retrieve associated variant, if any + mapping = [] + for i in [v for v in blender_mesh.gltf2_variant_mesh_data if v.material_slot_index == material_idx]: + variants = [] + for idx, v in enumerate(i.variants): + if v.variant.variant_idx in [o.variant.variant_idx for o in i.variants[:idx]]: + # Avoid duplicates + continue + vari = gltf2_blender_gather_materials_variants.gather_variant(v.variant.variant_idx, export_settings) + if vari is not None: + variant_extension = gltf2_io_extensions.ChildOfRootExtension( + name="KHR_materials_variants", + path=["variants"], + extension=vari + ) + variants.append(variant_extension) + if len(variants) > 0: + if i.material: + export_settings['current_paths'] = {} # Used for KHR_animation_pointer. + base_material, material_info = gather_material( + i.material, + export_settings + ) + else: + # empty slot + base_material = None + + if base_material is not None: + # Now, we can retrieve the real material, by checking attributes and active maps + mat = get_final_material( + blender_mesh, + i.material, + attr_indices, + base_material, + material_info["uv_info"], + export_settings) + else: + mat = None + + mapping.append({'material': mat, 'variants': variants}) + + if len(mapping) > 0: + extensions["KHR_materials_variants"] = gltf2_io_extensions.Extension( + name="KHR_materials_variants", + extension={ + "mappings": mapping + } + ) + + return extensions if extensions else None diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitives_extract.py b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitives_extract.py new file mode 100644 index 00000000000..fbe7da4d78a --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitives_extract.py @@ -0,0 +1,1435 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +from copy import deepcopy +from mathutils import Vector +from ...blender.com.gltf2_blender_data_path import get_sk_exported +from ...io.com.gltf2_io_constants import ROUNDING_DIGIT +from ...io.exp.gltf2_io_user_extensions import export_user_extensions +from ...io.com import gltf2_io_constants +from ..com import gltf2_blender_conversion +from ..com.gltf2_blender_utils import fast_structured_np_unique +from .material.gltf2_blender_gather_materials import get_base_material, get_material_from_idx, get_active_uvmap_index, get_new_material_texture_shared +from .material.gltf2_blender_gather_texture_info import gather_udim_texture_info +from . import gltf2_blender_gather_skins + + +def extract_primitives( + materials, + blender_mesh, + uuid_for_skined_data, + blender_vertex_groups, + modifiers, + export_settings): + """Extract primitives from a mesh.""" + export_settings['log'].info("Extracting primitive: " + blender_mesh.name) + + primitive_creator = PrimitiveCreator( + materials, + blender_mesh, + uuid_for_skined_data, + blender_vertex_groups, + modifiers, + export_settings) + primitive_creator.prepare_data() + primitive_creator.define_attributes() + primitive_creator.create_dots_data_structure() + primitive_creator.populate_dots_data() + primitive_creator.primitive_split() + primitive_creator.manage_material_info() # UVMap & Vertex Color + if export_settings['gltf_shared_accessors'] is False: + return primitive_creator.primitive_creation_not_shared(), primitive_creator.additional_materials, None + else: + return primitive_creator.primitive_creation_shared() + + +class PrimitiveCreator: + def __init__( + self, + materials, + blender_mesh, + uuid_for_skined_data, + blender_vertex_groups, + modifiers, + export_settings): + self.blender_mesh = blender_mesh + self.uuid_for_skined_data = uuid_for_skined_data + self.blender_vertex_groups = blender_vertex_groups + self.modifiers = modifiers + self.materials = materials + self.export_settings = export_settings + + @classmethod + def apply_mat_to_all(cls, matrix, vectors): + """Given matrix m and vectors [v1,v2,...], computes [m@v1,m@v2,...]""" + # Linear part + m = matrix.to_3x3() if len(matrix) == 4 else matrix + res = np.matmul(vectors, np.array(m.transposed())) + # Translation part + if len(matrix) == 4: + res += np.array(matrix.translation) + return res + + @classmethod + def normalize_vecs(cls, vectors): + norms = np.linalg.norm(vectors, axis=1, keepdims=True) + np.divide(vectors, norms, out=vectors, where=norms != 0) + + @classmethod + def zup2yup(cls, array): + # x,y,z -> x,z,-y + array[:, [1, 2]] = array[:, [2, 1]] # x,z,y + array[:, 2] *= -1 # x,z,-y + + def prepare_data(self): + self.blender_object = None + if self.uuid_for_skined_data: + self.blender_object = self.export_settings['vtree'].nodes[self.uuid_for_skined_data].blender_object + + self.use_normals = self.export_settings['gltf_normals'] + + self.use_tangents = False + if self.use_normals and self.export_settings['gltf_tangents']: + if self.blender_mesh.uv_layers.active and len(self.blender_mesh.uv_layers) > 0: + try: + self.blender_mesh.calc_tangents() + self.use_tangents = True + except Exception: + self.export_settings['log'].warning( + "{}: Could not calculate tangents. Please try to triangulate the mesh first.".format( + self.blender_mesh.name), popup=True) + + self.tex_coord_max = 0 + if self.export_settings['gltf_texcoords']: + if self.blender_mesh.uv_layers.active: + self.tex_coord_max = len(self.blender_mesh.uv_layers) + + self.use_morph_normals = self.use_normals and self.export_settings['gltf_morph_normal'] + self.use_morph_tangents = self.use_morph_normals and self.use_tangents and self.export_settings[ + 'gltf_morph_tangent'] + + self.use_materials = self.export_settings['gltf_materials'] + + self.blender_attributes = [] + + # Check if we have to export skin + self.armature = None + self.skin = None + if self.export_settings['gltf_skins']: + if self.modifiers is not None: + modifiers_dict = {m.type: m for m in self.modifiers} + if "ARMATURE" in modifiers_dict: + modifier = modifiers_dict["ARMATURE"] + self.armature = modifier.object + + # Skin must be ignored if the object is parented to a bone of the armature + # (This creates an infinite recursive error) + # So ignoring skin in that case + is_child_of_arma = ( + self.armature and + self.blender_object and + self.blender_object.parent_type == "BONE" and + self.blender_object.parent.name == self.armature.name + ) + if is_child_of_arma: + self.armature = None + + if self.armature: + self.skin = gltf2_blender_gather_skins.gather_skin( + self.export_settings['vtree'].nodes[self.uuid_for_skined_data].armature, self.export_settings) + if not self.skin: + self.armature = None + + self.key_blocks = [] + # List of SK that are going to be exported, actually + # We need to check if we are in a GN Instance, because for GN instances, it seems that shape keys are preserved, + # even if we apply modifiers + # (For classic objects, shape keys are not preserved if we apply modifiers) + # We can check it by checking if the mesh is used by a user + if self.blender_mesh.shape_keys and self.export_settings['gltf_morph'] and self.blender_mesh.users != 0: + self.key_blocks = get_sk_exported(self.blender_mesh.shape_keys.key_blocks) + + # Fetch vert positions and bone data (joint,weights) + + self.locs = None + self.morph_locs = None + self.__get_positions() + + if self.skin: + self.__get_bone_data() + if self.need_neutral_bone is True: + # Need to create a fake joint at root of armature + # In order to assign not assigned vertices to it + # But for now, this is not yet possible, we need to wait the armature node is created + # Just store this, to be used later + armature_uuid = self.export_settings['vtree'].nodes[self.uuid_for_skined_data].armature + self.export_settings['vtree'].nodes[armature_uuid].need_neutral_bone = True + + def define_attributes(self): + + class KeepAttribute: + def __init__(self, attr_name): + self.attr_name = attr_name + self.keep = attr_name.startswith("_") + + # Manage attributes + for blender_attribute_index, blender_attribute in enumerate(self.blender_mesh.attributes): + + attr = {} + attr['blender_attribute_index'] = blender_attribute_index + attr['blender_name'] = blender_attribute.name + attr['blender_domain'] = blender_attribute.domain + attr['blender_data_type'] = blender_attribute.data_type + + # For now, we don't export edge data, because I need to find how to + # get from edge data to dots data + if attr['blender_domain'] == "EDGE": + continue + + # Some type are not exportable (example : String) + if gltf2_blender_conversion.get_component_type(blender_attribute.data_type) is None or \ + gltf2_blender_conversion.get_data_type(blender_attribute.data_type) is None: + + continue + + # Custom attributes + # Keep only attributes that starts with _ + # As Blender create lots of attributes that are internal / not needed are + # as duplicated of standard glTF accessors (position, uv, + # material_index...) + if self.export_settings['gltf_attributes'] is False: + continue + # Check if there is an extension that want to keep this attribute, or change the exported name + keep_attribute = KeepAttribute(blender_attribute.name) + + export_user_extensions('gather_attribute_keep', self.export_settings, keep_attribute) + + if keep_attribute.keep is False: + continue + + attr['gltf_attribute_name'] = keep_attribute.attr_name.upper() + attr['get'] = self.get_function() + + # Seems we sometime can have name collision about attributes + # Avoid crash and ignoring one of duplicated attribute name + if attr['gltf_attribute_name'] in [a['gltf_attribute_name'] for a in self.blender_attributes]: + self.export_settings['log'].warning( + 'Attribute collision name: ' + + blender_attribute.name + + ", ignoring one of them") + continue + + self.blender_attributes.append(attr) + + # Manage POSITION + attr = {} + attr['blender_data_type'] = 'FLOAT_VECTOR' + attr['blender_domain'] = 'POINT' + attr['gltf_attribute_name'] = 'POSITION' + attr['set'] = self.set_function() + attr['skip_getting_to_dots'] = True + self.blender_attributes.append(attr) + + # Manage NORMALS + if self.use_normals: + attr = {} + attr['blender_data_type'] = 'FLOAT_VECTOR' + attr['blender_domain'] = 'CORNER' + attr['gltf_attribute_name'] = 'NORMAL' + attr['gltf_attribute_name_morph'] = 'MORPH_NORMAL_' + attr['get'] = self.get_function() + self.blender_attributes.append(attr) + + # Manage uvs TEX_COORD_x + for tex_coord_i in range(self.tex_coord_max): + attr = {} + attr['blender_data_type'] = 'FLOAT2' + attr['blender_domain'] = 'CORNER' + attr['gltf_attribute_name'] = 'TEXCOORD_' + str(tex_coord_i) + attr['get'] = self.get_function() + self.blender_attributes.append(attr) + + # Manage TANGENT + if self.use_tangents: + attr = {} + attr['blender_data_type'] = 'FLOAT_VECTOR_4' + attr['blender_domain'] = 'CORNER' + attr['gltf_attribute_name'] = 'TANGENT' + attr['get'] = self.get_function() + self.blender_attributes.append(attr) + + # Manage MORPH_POSITION_x + for morph_i, vs in enumerate(self.morph_locs): + attr = {} + attr['blender_attribute_index'] = morph_i + attr['blender_data_type'] = 'FLOAT_VECTOR' + attr['blender_domain'] = 'POINT' + attr['gltf_attribute_name'] = 'MORPH_POSITION_' + str(morph_i) + attr['skip_getting_to_dots'] = True + attr['set'] = self.set_function() + self.blender_attributes.append(attr) + + # Manage MORPH_NORMAL_x + if self.use_morph_normals: + attr = {} + attr['blender_attribute_index'] = morph_i + attr['blender_data_type'] = 'FLOAT_VECTOR' + attr['blender_domain'] = 'CORNER' + attr['gltf_attribute_name'] = 'MORPH_NORMAL_' + str(morph_i) + # No get function is set here, because data are set from NORMALS + self.blender_attributes.append(attr) + + # Manage MORPH_TANGENT_x + # This is a particular case, where we need to have the following data already calculated + # - NORMAL + # - MORPH_NORMAL + # - TANGENT + # So, the following needs to be AFTER the 3 others. + if self.use_morph_tangents: + attr = {} + attr['blender_attribute_index'] = morph_i + attr['blender_data_type'] = 'FLOAT_VECTOR' + attr['blender_domain'] = 'CORNER' + attr['gltf_attribute_name'] = 'MORPH_TANGENT_' + str(morph_i) + attr['gltf_attribute_name_normal'] = "NORMAL" + attr['gltf_attribute_name_morph_normal'] = "MORPH_NORMAL_" + str(morph_i) + attr['gltf_attribute_name_tangent'] = "TANGENT" + attr['skip_getting_to_dots'] = True + attr['set'] = self.set_function() + self.blender_attributes.append(attr) + + for attr in self.blender_attributes: + attr['len'] = gltf2_blender_conversion.get_data_length(attr['blender_data_type']) + attr['type'] = gltf2_blender_conversion.get_numpy_type(attr['blender_data_type']) + + # Now we have all attribtues, we can change order if we want + # Note that the glTF specification doesn't say anything about order + # Attributes are defined only by name + # But if user want it in a particular order, he can use this hook to perform it + export_user_extensions('gather_attributes_change', self.export_settings, self.blender_attributes) + + def create_dots_data_structure(self): + # Now that we get all attributes that are going to be exported, create numpy array that will store them + dot_fields = [('vertex_index', np.uint32)] + if self.export_settings['gltf_loose_edges']: + dot_fields_edges = [('vertex_index', np.uint32)] + if self.export_settings['gltf_loose_points']: + dot_fields_points = [('vertex_index', np.uint32)] + for attr in self.blender_attributes: + if 'skip_getting_to_dots' in attr: + continue + for i in range(attr['len']): + dot_fields.append((attr['gltf_attribute_name'] + str(i), attr['type'])) + if attr['blender_domain'] != 'POINT': + continue + if self.export_settings['gltf_loose_edges']: + dot_fields_edges.append((attr['gltf_attribute_name'] + str(i), attr['type'])) + if self.export_settings['gltf_loose_points']: + dot_fields_points.append((attr['gltf_attribute_name'] + str(i), attr['type'])) + + # In Blender there is both per-vert data, like position, and also per-loop + # (loop=corner-of-poly) data, like normals or UVs. glTF only has per-vert + # data, so we need to split Blender verts up into potentially-multiple glTF + # verts. + # + # First, we'll collect a "dot" for every loop: a struct that stores all the + # attributes at that loop, namely the vertex index (which determines all + # per-vert data), and all the per-loop data like UVs, etc. + # + # Each unique dot will become one unique glTF vert. + + self.dots = np.empty(len(self.blender_mesh.loops), dtype=np.dtype(dot_fields)) + + # Find loose edges + if self.export_settings['gltf_loose_edges']: + loose_edges = [e for e in self.blender_mesh.edges if e.is_loose] + self.blender_idxs_edges = [vi for e in loose_edges for vi in e.vertices] + self.blender_idxs_edges = np.array(self.blender_idxs_edges, dtype=np.uint32) + + self.dots_edges = np.empty(len(self.blender_idxs_edges), dtype=np.dtype(dot_fields_edges)) + self.dots_edges['vertex_index'] = self.blender_idxs_edges + + # Find loose points + if self.export_settings['gltf_loose_points']: + verts_in_edge = set(vi for e in self.blender_mesh.edges for vi in e.vertices) + self.blender_idxs_points = [ + vi for vi, _ in enumerate(self.blender_mesh.vertices) + if vi not in verts_in_edge + ] + self.blender_idxs_points = np.array(self.blender_idxs_points, dtype=np.uint32) + + self.dots_points = np.empty(len(self.blender_idxs_points), dtype=np.dtype(dot_fields_points)) + self.dots_points['vertex_index'] = self.blender_idxs_points + + def populate_dots_data(self): + corner_vertex_indices = gltf2_blender_conversion.get_attribute( + self.blender_mesh.attributes, '.corner_vert', 'INT', 'CORNER') + if corner_vertex_indices: + vidxs = np.empty(len(self.blender_mesh.loops), dtype=np.intc) + corner_vertex_indices.data.foreach_get('value', vidxs) + self.dots['vertex_index'] = vidxs + del vidxs + + for attr in self.blender_attributes: + if 'skip_getting_to_dots' in attr: + continue + if 'get' not in attr: + continue + attr['get'](attr) + + def primitive_split(self): + # Calculate triangles and sort them into primitives. + + try: + self.blender_mesh.calc_loop_triangles() + loop_indices = np.empty(len(self.blender_mesh.loop_triangles) * 3, dtype=np.uint32) + self.blender_mesh.loop_triangles.foreach_get('loops', loop_indices) + except: + # For some not valid meshes, we can't get loops without errors + # We already displayed a Warning message after validate() check, so here + # we can return without a new one + self.prim_indices = {} + return + + self.prim_indices = {} # maps material index to TRIANGLES-style indices into dots + + if self.use_materials == "NONE": # Only for None. For placeholder and export, keep primitives + # Put all vertices into one primitive + self.prim_indices[-1] = loop_indices + + else: + # Bucket by material index. + + tri_material_idxs = np.empty(len(self.blender_mesh.loop_triangles), dtype=np.uint32) + self.blender_mesh.loop_triangles.foreach_get('material_index', tri_material_idxs) + loop_material_idxs = np.repeat(tri_material_idxs, 3) # material index for every loop + unique_material_idxs = np.unique(tri_material_idxs) + del tri_material_idxs + + for material_idx in unique_material_idxs: + self.prim_indices[material_idx] = loop_indices[loop_material_idxs == material_idx] + + def manage_material_info(self): + # If user defined UVMap as a custom attribute, we need to add it/them in the dots structure and populate data + # So we need to get, for each material, what are these custom attribute + # No choice : We need to retrieve materials here. Anyway, this will be baked, and next call will be quick + # We also need to shuffle Vertex Color data if needed + + new_prim_indices = {} + self.additional_materials = [] # In case of UDIM + + self.uvmap_attribute_list = [] # Initialize here, in case we don't have any triangle primitive + + materials_use_vc = None + warning_already_displayed = False + for material_idx in self.prim_indices.keys(): + base_material, material_info = get_base_material(material_idx, self.materials, self.export_settings) + + # UVMaps + self.uvmap_attribute_list = list( + set([i['value'] for i in material_info["uv_info"].values() if 'type' in i.keys() and i['type'] == "Attribute"])) + + additional_fields = [] + for attr in self.uvmap_attribute_list: + if attr + str(0) not in self.dots.dtype.names: # In case user exports custom attributes, we may have it already + additional_fields.append((attr + str(0), gltf2_blender_conversion.get_numpy_type('FLOAT2'))) + additional_fields.append((attr + str(1), gltf2_blender_conversion.get_numpy_type('FLOAT2'))) + + if len(additional_fields) > 0: + new_dt = np.dtype(self.dots.dtype.descr + additional_fields) + dots = np.zeros(self.dots.shape, dtype=new_dt) + for f in self.dots.dtype.names: + dots[f] = self.dots[f] + + # Now we need to get data and populate + for attr in self.uvmap_attribute_list: + if attr + str(0) not in self.dots.dtype.names: # In case user exports custom attributes, we may have it already + # Vector in custom Attributes are Vector2 or Vector3 (but keeping only the first two data) + if self.blender_mesh.attributes[attr].data_type == "FLOAT_VECTOR": + data = np.empty(len(self.blender_mesh.loops) * + 3, gltf2_blender_conversion.get_numpy_type('FLOAT2')) + self.blender_mesh.attributes[attr].data.foreach_get('vector', data) + data = data.reshape(-1, 3) + data = data[:, :2] + elif self.blender_mesh.attributes[attr].data_type == "FLOAT2": + data = np.empty(len(self.blender_mesh.loops) * + 2, gltf2_blender_conversion.get_numpy_type('FLOAT2')) + self.blender_mesh.attributes[attr].data.foreach_get('vector', data) + data = data.reshape(-1, 2) + else: + self.export_settings['log'].warning( + 'We are not managing this case (UVMap as custom attribute for unknown type)') + continue + # Blender UV space -> glTF UV space + # u,v -> u,1-v + data[:, 1] *= -1 + data[:, 1] += 1 + + dots[attr + '0'] = data[:, 0] + dots[attr + '1'] = data[:, 1] + del data + + if len(additional_fields) > 0: + self.dots = dots + + # There are multiple case to take into account for VC + + # The simplier test is when no vertex color are used + if material_info['vc_info']['color_type'] is None and material_info['vc_info']['alpha_type'] is None: + # Nothing to do + pass + + elif material_info['vc_info']['color_type'] is None and material_info['vc_info']['alpha_type'] is not None: + self.export_settings['log'].warning('We are not managing this case (Vertex Color alpha without color)') + + else: + vc_color_name = None + vc_alpha_name = None + if material_info['vc_info']['color_type'] == "name": + vc_color_name = material_info['vc_info']['color'] + elif material_info['vc_info']['color_type'] == "active": + # Get active (render) Vertex Color + if self.blender_mesh.color_attributes.render_color_index != -1: + vc_color_name = self.blender_mesh.color_attributes[self.blender_mesh.color_attributes.render_color_index].name + + if material_info['vc_info']['alpha_type'] == "name": + vc_alpha_name = material_info['vc_info']['alpha'] + elif material_info['vc_info']['alpha_type'] == "active": + # Get active (render) Vertex Color + if self.blender_mesh.color_attributes.render_color_index != -1: + vc_alpha_name = self.blender_mesh.color_attributes[self.blender_mesh.color_attributes.render_color_index].name + + if vc_color_name is not None: + + vc_key = "" + vc_key += vc_color_name if vc_color_name is not None else "" + vc_key += vc_alpha_name if vc_alpha_name is not None else "" + + if materials_use_vc is not None and materials_use_vc != vc_key: + if warning_already_displayed is False: + self.export_settings['log'].warning( + 'glTF specification does not allow this case (multiple materials with different Vertex Color)') + warning_already_displayed = True + materials_use_vc = vc_key + + elif materials_use_vc is None: + materials_use_vc = vc_key + + # We need to check if we need to add alpha + add_alpha = vc_alpha_name is not None + mat = get_material_from_idx(material_idx, self.materials, self.export_settings) + add_alpha = mat is not None and add_alpha and not ( + mat.blend_method is None or mat.blend_method == 'OPAQUE') + # Manage Vertex Color (RGB and Alpha if needed) + self.__manage_color_attribute(vc_color_name, vc_alpha_name if add_alpha else None) + else: + pass # Using the same Vertex Color + + ##### UDIM ##### + + if len(material_info['udim_info'].keys()) == 0: + new_prim_indices[material_idx] = self.prim_indices[material_idx] + self.additional_materials.append(None) + continue + + # We have some UDIM for some texture of this material + # We need to split the mesh into multiple primitives + # We manage only case where all texture are using the same UVMap + # And where UDIM have exactly the same number of tiles (TODO to check?) + + # So, retrieve all uvmaps used by this material + all_uvmaps = {} + for tex in material_info['udim_info'].keys(): + if material_info['uv_info'][tex]['type'] == "Active": + index_uvmap = get_active_uvmap_index(self.blender_mesh) + uvmap_name = "TEXCOORD_" + str(index_uvmap) + elif material_info['uv_info'][tex]['type'] == "Fixed": + index_uvmap = self.blender_mesh.uv_layers.find(material_info['uv_info'][tex]['value']) + if index_uvmap < 0: + # Using active index + index_uvmap = get_active_uvmap_index(self.blender_mesh) + uvmap_name = "TEXCOORD_" + str(index_uvmap) + else: # Attribute + uvmap_name = material_info['uv_info'][tex]['value'] + all_uvmaps[tex] = uvmap_name + + if len(set(all_uvmaps.values())) > 1: + self.export_settings['log'].warning('We are not managing this case (multiple UVMap for UDIM)') + new_prim_indices[material_idx] = self.prim_indices[material_idx] + self.additional_materials.append(None) + continue + + self.export_settings['log'].info('Splitting UDIM tiles into different primitives/materials') + # Retrieve UDIM images + tex = list(material_info['udim_info'].keys())[0] + image = material_info['udim_info'][tex]['image'] + + new_material_index = len(self.prim_indices.keys()) + + # Get UVMap used for UDIM + uvmap_name = all_uvmaps[list(all_uvmaps.keys())[0]] + + # Retrieve tiles number + tiles = [t.number for t in image.tiles] + u_tiles = max([int(str(t)[3:]) for t in tiles]) + v_tiles = max([int(str(t)[2:3]) for t in tiles]) + 1 + + # We are now going to split the mesh into multiple primitives, based on tiles + # We need to create a new primitive for each tile + + for u in range(u_tiles): + for v in range(v_tiles): + + if u != u_tiles - 1 and v != v_tiles - 1: + indices = np.where((self.dots[uvmap_name + '0'] >= u) & (self.dots[uvmap_name + '0'] < (u + 1)) & ( + self.dots[uvmap_name + '1'] <= (1 - v)) & (self.dots[uvmap_name + '1'] > 1 - (v + 1)))[0] + elif u == u_tiles - 1 and v != v_tiles - 1: + indices = np.where((self.dots[uvmap_name + '0'] >= u) & (self.dots[uvmap_name + '0'] <= (u + 1)) & ( + self.dots[uvmap_name + '1'] <= (1 - v)) & (self.dots[uvmap_name + '1'] > 1 - (v + 1)))[0] + elif u != u_tiles - 1 and v == v_tiles - 1: + indices = np.where((self.dots[uvmap_name + '0'] >= u) & (self.dots[uvmap_name + '0'] < (u + 1)) & ( + self.dots[uvmap_name + '1'] <= (1 - v)) & (self.dots[uvmap_name + '1'] >= 1 - (v + 1)))[0] + else: + indices = np.where((self.dots[uvmap_name + '0'] >= u) & (self.dots[uvmap_name + '0'] <= (u + 1)) & ( + self.dots[uvmap_name + '1'] <= (1 - v)) & (self.dots[uvmap_name + '1'] >= 1 - (v + 1)))[0] + + # Reset UVMap to 0-1 : reset to Blener UVMAP => slide to 0-1 => go to glTF UVMap + self.dots[uvmap_name + '1'][indices] -= 1 + self.dots[uvmap_name + '1'][indices] *= -1 + self.dots[uvmap_name + '0'][indices] -= u + self.dots[uvmap_name + '1'][indices] -= v + self.dots[uvmap_name + '1'][indices] *= -1 + self.dots[uvmap_name + '1'][indices] += 1 + + # Now, get every triangle, and check that it belongs to this tile + # Assume that we can check only the first vertex of each triangle (=> No + # management of triangle on multiple tiles) + new_triangle_indices = [] + for idx, i in enumerate(self.prim_indices[material_idx]): + if idx % 3 == 0 and i in indices: + new_triangle_indices.append(self.prim_indices[material_idx][idx]) + new_triangle_indices.append(self.prim_indices[material_idx][idx + 1]) + new_triangle_indices.append(self.prim_indices[material_idx][idx + 2]) + new_prim_indices[new_material_index] = np.array(new_triangle_indices, dtype=np.uint32) + new_material_index += 1 + + # Now we have to create a new material for this tile + # This will be the existing material, but with new textures + # We need to duplicate the material, and add these new textures + new_material = deepcopy(base_material) + get_new_material_texture_shared(base_material, new_material) + + for tex in material_info['udim_info'].keys(): + new_tex = gather_udim_texture_info( + material_info['udim_info'][tex]['sockets'][0], + material_info['udim_info'][tex]['sockets'], + { + 'tile': "10" + str(v) + str(u + 1), + 'image': material_info['udim_info'][tex]['image'] + }, + tex, + self.export_settings) + + if tex == "baseColorTexture": + new_material.pbr_metallic_roughness.base_color_texture = new_tex + elif tex == "normalTexture": + new_material.normal_texture = new_tex + elif tex == "emissiveTexture": + new_material.emissive_texture = new_tex + elif tex == "metallicRoughnessTexture": + new_material.pbr_metallic_roughness.metallic_roughness_texture = new_tex + elif tex == "occlusionTexture": + new_material.occlusion_texture = new_tex + elif tex == "clearcoatTexture": + new_material.extensions["KHR_materials_clearcoat"].extension['clearcoatTexture'] = new_tex + elif tex == "clearcoatRoughnessTexture": + new_material.extensions["KHR_materials_clearcoat"].extension['clearcoatRoughnessTexture'] = new_tex + elif tex == "clearcoatNormalTexture": + new_material.extensions["KHR_materials_clearcoat"].extension['clearcoatNormalTexture'] = new_tex + elif tex == "sheenColorTexture": + new_material.extensions["KHR_materials_sheen"].extension['sheenColorTexture'] = new_tex + elif tex == "sheenRoughnessTexture": + new_material.extensions["KHR_materials_sheen"].extension['sheenRoughnessTexture'] = new_tex + elif tex == "transmissionTexture": + new_material.extensions["KHR_materials_transmission"].extension['transmissionTexture'] = new_tex + elif tex == "thicknessTexture": + new_material.extensions["KHR_materials_volume"].extension['thicknessTexture'] = new_tex + elif tex == "specularTexture": + new_material.extensions["KHR_materials_specular"].extension['specularTexture'] = new_tex + elif tex == "specularColorTexture": + new_material.extensions["KHR_materials_specular"].extension['specularColorTexture'] = new_tex + elif tex == "anisotropyTexture": + new_material.extensions["KHR_materials_anisotropy"].extension['anisotropyTexture'] = new_tex + else: + self.export_settings['log'].warning( + 'We are not managing this case (UDIM for {})'.format(tex)) + + self.additional_materials.append( + (new_material, material_info, int(str(id(base_material)) + str(u) + str(v)))) + + self.prim_indices = new_prim_indices + + def primitive_creation_shared(self): + primitives = [] + self.dots, shared_dot_indices = fast_structured_np_unique(self.dots, return_inverse=True) + + self.blender_idxs = self.dots['vertex_index'] + + self.attributes = {} + + next_texcoor_idx = self.tex_coord_max + uvmap_attributes_index = {} + for attr in self.uvmap_attribute_list: + res = np.empty((len(self.dots), 2), dtype=gltf2_blender_conversion.get_numpy_type('FLOAT2')) + for i in range(2): + res[:, i] = self.dots[attr + str(i)] + + self.attributes["TEXCOORD_" + str(next_texcoor_idx)] = {} + self.attributes["TEXCOORD_" + str(next_texcoor_idx)]["data"] = res + self.attributes["TEXCOORD_" + str(next_texcoor_idx) + ]["component_type"] = gltf2_io_constants.ComponentType.Float + self.attributes["TEXCOORD_" + str(next_texcoor_idx)]["data_type"] = gltf2_io_constants.DataType.Vec2 + uvmap_attributes_index[attr] = next_texcoor_idx + next_texcoor_idx += 1 + + for attr in self.blender_attributes: + if 'set' in attr: + attr['set'](attr) + else: + self.__set_regular_attribute(self.dots, attr) + + if self.skin: + joints = [[] for _ in range(self.num_joint_sets)] + weights = [[] for _ in range(self.num_joint_sets)] + + for vi in self.blender_idxs: + bones = self.vert_bones[vi] + for j in range(0, 4 * self.num_joint_sets): + if j < len(bones): + joint, weight = bones[j] + else: + joint, weight = 0, 0.0 + joints[j // 4].append(joint) + weights[j // 4].append(weight) + + for i, (js, ws) in enumerate(zip(joints, weights)): + self.attributes['JOINTS_%d' % i] = js + self.attributes['WEIGHTS_%d' % i] = ws + + for material_idx, dot_indices in self.prim_indices.items(): + indices = shared_dot_indices[dot_indices] + + if len(indices) == 0: + continue + + primitives.append({ + # No attribute here, as they are shared accross all primitives + 'indices': indices, + 'material': material_idx, + 'uvmap_attributes_index': uvmap_attributes_index + }) + + # Manage edges & points primitives. + # One for edges, one for points + # No material for them, so only one primitive for each + has_triangle_primitive = len(primitives) != 0 + primitives.extend(self.primitive_creation_edges_and_points()) + + self.export_settings['log'].info('Primitives created: %d' % len(primitives)) + + return primitives, [None] * len(primitives), self.attributes if has_triangle_primitive else None + + def primitive_creation_not_shared(self): + primitives = [] + + for material_idx, dot_indices in self.prim_indices.items(): + # Extract just dots used by this primitive, deduplicate them, and + # calculate indices into this deduplicated list. + self.prim_dots = self.dots[dot_indices] + self.prim_dots, indices = fast_structured_np_unique(self.prim_dots, return_inverse=True) + + if len(self.prim_dots) == 0: + continue + + # Now just move all the data for prim_dots into attribute arrays + + self.attributes = {} + + self.blender_idxs = self.prim_dots['vertex_index'] + + for attr in self.blender_attributes: + if 'set' in attr: + attr['set'](attr) + else: # Regular case + self.__set_regular_attribute(self.prim_dots, attr) + + next_texcoor_idx = self.tex_coord_max + uvmap_attributes_index = {} + for attr in self.uvmap_attribute_list: + res = np.empty((len(self.prim_dots), 2), dtype=gltf2_blender_conversion.get_numpy_type('FLOAT2')) + for i in range(2): + res[:, i] = self.prim_dots[attr + str(i)] + + self.attributes["TEXCOORD_" + str(next_texcoor_idx)] = {} + self.attributes["TEXCOORD_" + str(next_texcoor_idx)]["data"] = res + self.attributes["TEXCOORD_" + + str(next_texcoor_idx)]["component_type"] = gltf2_io_constants.ComponentType.Float + self.attributes["TEXCOORD_" + str(next_texcoor_idx)]["data_type"] = gltf2_io_constants.DataType.Vec2 + uvmap_attributes_index[attr] = next_texcoor_idx + next_texcoor_idx += 1 + + if self.skin: + joints = [[] for _ in range(self.num_joint_sets)] + weights = [[] for _ in range(self.num_joint_sets)] + + for vi in self.blender_idxs: + bones = self.vert_bones[vi] + for j in range(0, 4 * self.num_joint_sets): + if j < len(bones): + joint, weight = bones[j] + else: + joint, weight = 0, 0.0 + joints[j // 4].append(joint) + weights[j // 4].append(weight) + + for i, (js, ws) in enumerate(zip(joints, weights)): + self.attributes['JOINTS_%d' % i] = js + self.attributes['WEIGHTS_%d' % i] = ws + + primitives.append({ + 'attributes': self.attributes, + 'indices': indices, + 'material': material_idx, + 'uvmap_attributes_index': uvmap_attributes_index + }) + + # Manage edges & points primitives. + # One for edges, one for points + # No material for them, so only one primitive for each + primitives.extend(self.primitive_creation_edges_and_points()) + + self.export_settings['log'].info('Primitives created: %d' % len(primitives)) + + return primitives + + def primitive_creation_edges_and_points(self): + primitives_edges_points = [] + + if self.export_settings['gltf_loose_edges']: + + if self.blender_idxs_edges.shape[0] > 0: + # Export one glTF vert per unique Blender vert in a loose edge + self.blender_idxs = self.blender_idxs_edges + dots_edges, indices = fast_structured_np_unique(self.dots_edges, return_inverse=True) + self.blender_idxs = np.unique(self.blender_idxs_edges) + + self.attributes_edges_points = {} + + for attr in self.blender_attributes: + if attr['blender_domain'] != 'POINT': + continue + if 'set' in attr: + attr['set'](attr, edges_points=True) + else: + res = np.empty((len(dots_edges), attr['len']), dtype=attr['type']) + for i in range(attr['len']): + res[:, i] = dots_edges[attr['gltf_attribute_name'] + str(i)] + self.attributes_edges_points[attr['gltf_attribute_name']] = {} + self.attributes_edges_points[attr['gltf_attribute_name']]["data"] = res + self.attributes_edges_points[attr['gltf_attribute_name']]["component_type"] = gltf2_blender_conversion.get_component_type( + attr['blender_data_type']) + self.attributes_edges_points[attr['gltf_attribute_name']]["data_type"] = gltf2_blender_conversion.get_data_type( + attr['blender_data_type']) + + if self.skin: + joints = [[] for _ in range(self.num_joint_sets)] + weights = [[] for _ in range(self.num_joint_sets)] + + for vi in self.blender_idxs: + bones = self.vert_bones[vi] + for j in range(0, 4 * self.num_joint_sets): + if j < len(bones): + joint, weight = bones[j] + else: + joint, weight = 0, 0.0 + joints[j // 4].append(joint) + weights[j // 4].append(weight) + + for i, (js, ws) in enumerate(zip(joints, weights)): + self.attributes_edges_points['JOINTS_%d' % i] = js + self.attributes_edges_points['WEIGHTS_%d' % i] = ws + + primitives_edges_points.append({ + 'attributes': self.attributes_edges_points, + 'indices': indices, + 'mode': 1, # LINES + 'material': 0, + 'uvmap_attributes_index': {} + }) + self.additional_materials.append(None) + + if self.export_settings['gltf_loose_points']: + + if self.blender_idxs_points.shape[0] > 0: + self.blender_idxs = self.blender_idxs_points + + self.attributes_edges_points = {} + + for attr in self.blender_attributes: + if attr['blender_domain'] != 'POINT': + continue + if 'set' in attr: + attr['set'](attr, edges_points=True) + else: + res = np.empty((len(self.blender_idxs), attr['len']), dtype=attr['type']) + for i in range(attr['len']): + res[:, i] = self.dots_points[attr['gltf_attribute_name'] + str(i)] + self.attributes_edges_points[attr['gltf_attribute_name']] = {} + self.attributes_edges_points[attr['gltf_attribute_name']]["data"] = res + self.attributes_edges_points[attr['gltf_attribute_name']]["component_type"] = gltf2_blender_conversion.get_component_type( + attr['blender_data_type']) + self.attributes_edges_points[attr['gltf_attribute_name']]["data_type"] = gltf2_blender_conversion.get_data_type( + attr['blender_data_type']) + + if self.skin: + joints = [[] for _ in range(self.num_joint_sets)] + weights = [[] for _ in range(self.num_joint_sets)] + + for vi in self.blender_idxs: + bones = self.vert_bones[vi] + for j in range(0, 4 * self.num_joint_sets): + if j < len(bones): + joint, weight = bones[j] + else: + joint, weight = 0, 0.0 + joints[j // 4].append(joint) + weights[j // 4].append(weight) + + for i, (js, ws) in enumerate(zip(joints, weights)): + self.attributes_edges_points['JOINTS_%d' % i] = js + self.attributes_edges_points['WEIGHTS_%d' % i] = ws + + primitives_edges_points.append({ + 'attributes': self.attributes_edges_points, + 'mode': 0, # POINTS + 'material': 0, + 'uvmap_attributes_index': {} + }) + self.additional_materials.append(None) + + return primitives_edges_points + +################################## Get ################################################## + + def __get_positions(self): + self.locs = np.empty(len(self.blender_mesh.vertices) * 3, dtype=np.float32) + if self.key_blocks: + source = self.key_blocks[0].relative_key.points + foreach_attribute = 'co' + else: + position_attribute = gltf2_blender_conversion.get_attribute( + self.blender_mesh.attributes, 'position', 'FLOAT_VECTOR', 'POINT') + source = position_attribute.data if position_attribute else None + foreach_attribute = 'vector' + if source: + source.foreach_get(foreach_attribute, self.locs) + self.locs = self.locs.reshape(len(self.blender_mesh.vertices), 3) + + self.morph_locs = [] + for key_block in self.key_blocks: + vs = np.empty(len(self.blender_mesh.vertices) * 3, dtype=np.float32) + key_block.points.foreach_get('co', vs) + vs = vs.reshape(len(self.blender_mesh.vertices), 3) + self.morph_locs.append(vs) + + # Transform for skinning + if self.armature and self.blender_object: + # apply_matrix = armature.matrix_world.inverted_safe() @ blender_object.matrix_world + # loc_transform = armature.matrix_world @ apply_matrix + + loc_transform = self.blender_object.matrix_world + self.locs[:] = PrimitiveCreator.apply_mat_to_all(loc_transform, self.locs) + for vs in self.morph_locs: + vs[:] = PrimitiveCreator.apply_mat_to_all(loc_transform, vs) + + # glTF stores deltas in morph targets + for vs in self.morph_locs: + vs -= self.locs + # Some invalid mesh can have NaN value in SK, so replace them by 0, avoid crash + np.nan_to_num(vs, copy=False) + + if self.export_settings['gltf_yup']: + PrimitiveCreator.zup2yup(self.locs) + for vs in self.morph_locs: + PrimitiveCreator.zup2yup(vs) + + def get_function(self): + + def getting_function(attr): + if attr['gltf_attribute_name'].startswith("_"): + self.__get_layer_attribute(attr) + elif attr['gltf_attribute_name'].startswith("TEXCOORD_"): + self.__get_uvs_attribute(int(attr['gltf_attribute_name'].split("_")[-1]), attr) + elif attr['gltf_attribute_name'] == "NORMAL": + self.__get_normal_attribute(attr) + elif attr['gltf_attribute_name'] == "TANGENT": + self.__get_tangent_attribute(attr) + + return getting_function + + def __manage_color_attribute(self, attr_name, attr_name_alpha): + blender_color_idx = self.blender_mesh.color_attributes.find(attr_name) + if blender_color_idx < 0: + return None + + # Add COLOR_0 in dots data + + attr = self.blender_mesh.color_attributes[blender_color_idx] + + # Get data + data_dots, data_dots_edges, data_dots_points = self.__get_color_attribute_data(attr) + + # Get data for alpha if needed + if attr_name_alpha is not None and attr_name_alpha != attr_name: + blender_alpha_idx = self.blender_mesh.color_attributes.find(attr_name_alpha) + if blender_alpha_idx >= 0: + attr_alpha = self.blender_mesh.color_attributes[blender_alpha_idx] + data_dots_alpha, data_dots_edges_alpha, data_dots_points_alpha = self.__get_color_attribute_data( + attr_alpha) + # Merging data + data_dots[:, 3] = data_dots_alpha[:, 3] + if data_dots_edges is not None: + data_dots_edges[:, 3] = data_dots_edges_alpha[:, 3] + if data_dots_points is not None: + data_dots_points[:, 3] = data_dots_points_alpha[:, 3] + + # Check if we need to get alpha (the 4th channel) here + max_index = 4 if attr_name_alpha is not None else 3 + + # Add this data to dots structure + additional_fields = [] + for i in range(max_index): + # Must calculate the type of the field : FLOAT_COLOR or BYTE_COLOR + additional_fields.append( + ('COLOR_0' + str(i), + gltf2_blender_conversion.get_numpy_type( + 'FLOAT_COLOR' if max_index == 3 else 'BYTE_COLOR'))) + + if self.export_settings['gltf_loose_edges']: + additional_fields_edges = [] + for i in range(max_index): + # Must calculate the type of the field : FLOAT_COLOR or BYTE_COLOR + additional_fields_edges.append( + ('COLOR_0' + str(i), + gltf2_blender_conversion.get_numpy_type( + 'FLOAT_COLOR' if max_index == 3 else 'BYTE_COLOR'))) + + new_dt = np.dtype(self.dots_edges.dtype.descr + additional_fields_edges) + dots_edges = np.zeros(self.dots_edges.shape, dtype=new_dt) + for f in self.dots_edges.dtype.names: + dots_edges[f] = self.dots_edges[f] + + self.dots_edges = dots_edges + + if self.export_settings['gltf_loose_points']: + additional_fields_points = [] + for i in range(max_index): + # Must calculate the type of the field : FLOAT_COLOR or BYTE_COLOR + additional_fields_points.append( + ('COLOR_0' + str(i), + gltf2_blender_conversion.get_numpy_type( + 'FLOAT_COLOR' if max_index == 3 else 'BYTE_COLOR'))) + + new_dt = np.dtype(self.dots_points.dtype.descr + additional_fields_points) + dots_points = np.zeros(self.dots_points.shape, dtype=new_dt) + for f in self.dots_points.dtype.names: + dots_points[f] = self.dots_points[f] + + self.dots_points = dots_points + + # Keep the existing custom attribute + # Data will be exported twice, one for COLOR_O, one for the custom attribute + new_dt = np.dtype(self.dots.dtype.descr + additional_fields) + dots = np.zeros(self.dots.shape, dtype=new_dt) + for f in self.dots.dtype.names: + dots[f] = self.dots[f] + + self.dots = dots + + # colors are already linear, no need to switch color space + for i in range(max_index): + self.dots['COLOR_0' + str(i)] = data_dots[:, i] + if self.export_settings['gltf_loose_edges'] and attr.domain == "POINT": + self.dots_edges['COLOR_0' + str(i)] = data_dots_edges[:, i] + if self.export_settings['gltf_loose_points'] and attr.domain == "POINT": + self.dots_points['COLOR_0' + str(i)] = data_dots_points[:, i] + + # Add COLOR_0 in attribute list + attr_color_0 = {} + attr_color_0['blender_data_type'] = 'FLOAT_COLOR' if max_index == 3 else 'BYTE_COLOR' + attr_color_0['blender_domain'] = attr.domain + attr_color_0['gltf_attribute_name'] = 'COLOR_0' + attr_color_0['len'] = max_index # 3 or 4, depending if we have alpha + attr_color_0['type'] = gltf2_blender_conversion.get_numpy_type(attr_color_0['blender_data_type']) + attr_color_0['component_type'] = gltf2_blender_conversion.get_component_type(attr_color_0['blender_data_type']) + attr_color_0['data_type'] = gltf2_io_constants.DataType.Vec3 if max_index == 3 else gltf2_io_constants.DataType.Vec4 + + self.blender_attributes.append(attr_color_0) + + def __get_color_attribute_data(self, attr): + data_dots_edges = None + data_dots_points = None + + if attr.domain == "POINT": + colors = np.empty(len(self.blender_mesh.vertices) * 4, dtype=np.float32) + elif attr.domain == "CORNER": + colors = np.empty(len(self.blender_mesh.loops) * 4, dtype=np.float32) + attr.data.foreach_get('color', colors) + if attr.domain == "POINT": + colors = colors.reshape(-1, 4) + data_dots = colors[self.dots['vertex_index']] + if self.export_settings['gltf_loose_edges']: + data_dots_edges = colors[self.dots_edges['vertex_index']] + if self.export_settings['gltf_loose_points']: + data_dots_points = colors[self.dots_points['vertex_index']] + + elif attr.domain == "CORNER": + colors = colors.reshape(-1, 4) + data_dots = colors + + del colors + + return data_dots, data_dots_edges, data_dots_points + + def __get_layer_attribute(self, attr): + if attr['blender_domain'] in ['CORNER']: + data = np.empty(len(self.blender_mesh.loops) * attr['len'], dtype=attr['type']) + elif attr['blender_domain'] in ['POINT']: + data = np.empty(len(self.blender_mesh.vertices) * attr['len'], dtype=attr['type']) + elif attr['blender_domain'] in ['EDGE']: + data = np.empty(len(self.blender_mesh.edges) * attr['len'], dtype=attr['type']) + elif attr['blender_domain'] in ['FACE']: + data = np.empty(len(self.blender_mesh.polygons) * attr['len'], dtype=attr['type']) + else: + self.export_settings['log'].error("domain not known") + + if attr['blender_data_type'] == "BYTE_COLOR": + self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('color', data) + data = data.reshape(-1, attr['len']) + elif attr['blender_data_type'] == "INT8": + self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('value', data) + data = data.reshape(-1, attr['len']) + elif attr['blender_data_type'] == "FLOAT2": + self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('vector', data) + data = data.reshape(-1, attr['len']) + elif attr['blender_data_type'] == "BOOLEAN": + self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('value', data) + data = data.reshape(-1, attr['len']) + elif attr['blender_data_type'] == "STRING": + self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('value', data) + data = data.reshape(-1, attr['len']) + elif attr['blender_data_type'] == "FLOAT_COLOR": + self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('color', data) + data = data.reshape(-1, attr['len']) + elif attr['blender_data_type'] == "FLOAT_VECTOR": + self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('vector', data) + data = data.reshape(-1, attr['len']) + elif attr['blender_data_type'] == "FLOAT_VECTOR_4": # Specific case for tangent + pass + elif attr['blender_data_type'] == "INT": + self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('value', data) + data = data.reshape(-1, attr['len']) + elif attr['blender_data_type'] == "FLOAT": + self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('value', data) + data = data.reshape(-1, attr['len']) + else: + self.export_settings['log'].error("blender type not found " + attr['blender_data_type']) + + if attr['blender_domain'] in ['CORNER']: + for i in range(attr['len']): + self.dots[attr['gltf_attribute_name'] + str(i)] = data[:, i] + elif attr['blender_domain'] in ['POINT']: + if attr['len'] > 1: + data = data.reshape(-1, attr['len']) + data_dots = data[self.dots['vertex_index']] + if self.export_settings['gltf_loose_edges']: + data_dots_edges = data[self.dots_edges['vertex_index']] + if self.export_settings['gltf_loose_points']: + data_dots_points = data[self.dots_points['vertex_index']] + for i in range(attr['len']): + self.dots[attr['gltf_attribute_name'] + str(i)] = data_dots[:, i] + if self.export_settings['gltf_loose_edges']: + self.dots_edges[attr['gltf_attribute_name'] + str(i)] = data_dots_edges[:, i] + if self.export_settings['gltf_loose_points']: + self.dots_points[attr['gltf_attribute_name'] + str(i)] = data_dots_points[:, i] + elif attr['blender_domain'] in ['EDGE']: + # No edge attribute exports + pass + elif attr['blender_domain'] in ['FACE']: + if attr['len'] > 1: + data = data.reshape(-1, attr['len']) + # data contains face attribute, and is len(faces) long + # We need to dispatch these len(faces) attribute in each dots lines + data_attr = np.empty(self.dots.shape[0] * attr['len'], dtype=attr['type']) + data_attr = data_attr.reshape(-1, attr['len']) + for idx, poly in enumerate(self.blender_mesh.polygons): + data_attr[list(poly.loop_indices)] = data[idx] + data_attr = data_attr.reshape(-1, attr['len']) + for i in range(attr['len']): + self.dots[attr['gltf_attribute_name'] + str(i)] = data_attr[:, i] + + else: + self.export_settings['log'].error("domain not known") + + def __get_uvs_attribute(self, blender_uv_idx, attr): + layer = self.blender_mesh.uv_layers[blender_uv_idx] + uvs = np.empty(len(self.blender_mesh.loops) * 2, dtype=np.float32) + layer.uv.foreach_get('vector', uvs) + uvs = uvs.reshape(len(self.blender_mesh.loops), 2) + + # Blender UV space -> glTF UV space + # u,v -> u,1-v + uvs[:, 1] *= -1 + uvs[:, 1] += 1 + + self.dots[attr['gltf_attribute_name'] + '0'] = uvs[:, 0] + self.dots[attr['gltf_attribute_name'] + '1'] = uvs[:, 1] + del uvs + + def __get_normals(self): + """Get normal for each loop.""" + key_blocks = self.key_blocks if self.use_morph_normals else [] + if key_blocks: + self.normals = key_blocks[0].relative_key.normals_split_get() + self.normals = np.array(self.normals, dtype=np.float32) + else: + self.normals = np.empty(len(self.blender_mesh.loops) * 3, dtype=np.float32) + self.blender_mesh.corner_normals.foreach_get('vector', self.normals) + + self.normals = self.normals.reshape(len(self.blender_mesh.loops), 3) + + self.normals = np.round(self.normals, ROUNDING_DIGIT) + # Force normalization of normals in case some normals are not (why ?) + PrimitiveCreator.normalize_vecs(self.normals) + + self.morph_normals = [] + for key_block in key_blocks: + ns = np.array(key_block.normals_split_get(), dtype=np.float32) + ns = ns.reshape(len(self.blender_mesh.loops), 3) + ns = np.round(ns, ROUNDING_DIGIT) + self.morph_normals.append(ns) + + # Transform for skinning + if self.armature and self.blender_object: + apply_matrix = (self.armature.matrix_world.inverted_safe() @ self.blender_object.matrix_world) + apply_matrix = apply_matrix.to_3x3().inverted_safe().transposed() + normal_transform = self.armature.matrix_world.to_3x3() @ apply_matrix + + self.normals[:] = PrimitiveCreator.apply_mat_to_all(normal_transform, self.normals) + PrimitiveCreator.normalize_vecs(self.normals) + for ns in self.morph_normals: + ns[:] = PrimitiveCreator.apply_mat_to_all(normal_transform, ns) + PrimitiveCreator.normalize_vecs(ns) + + for ns in [self.normals, *self.morph_normals]: + # Replace zero normals with the unit UP vector. + # Seems to happen sometimes with degenerate tris? + is_zero = ~ns.any(axis=1) + ns[is_zero, 2] = 1 + + # glTF stores deltas in morph targets + for ns in self.morph_normals: + ns -= self.normals + + if self.export_settings['gltf_yup']: + PrimitiveCreator.zup2yup(self.normals) + for ns in self.morph_normals: + PrimitiveCreator.zup2yup(ns) + + def __get_normal_attribute(self, attr): + self.__get_normals() + self.dots[attr['gltf_attribute_name'] + "0"] = self.normals[:, 0] + self.dots[attr['gltf_attribute_name'] + "1"] = self.normals[:, 1] + self.dots[attr['gltf_attribute_name'] + "2"] = self.normals[:, 2] + + if self.use_morph_normals: + for morph_i, ns in enumerate(self.morph_normals): + self.dots[attr['gltf_attribute_name_morph'] + str(morph_i) + "0"] = ns[:, 0] + self.dots[attr['gltf_attribute_name_morph'] + str(morph_i) + "1"] = ns[:, 1] + self.dots[attr['gltf_attribute_name_morph'] + str(morph_i) + "2"] = ns[:, 2] + del self.normals + del self.morph_normals + + def __get_tangent_attribute(self, attr): + self.__get_tangents() + self.dots[attr['gltf_attribute_name'] + "0"] = self.tangents[:, 0] + self.dots[attr['gltf_attribute_name'] + "1"] = self.tangents[:, 1] + self.dots[attr['gltf_attribute_name'] + "2"] = self.tangents[:, 2] + del self.tangents + self.__get_bitangent_signs() + self.dots[attr['gltf_attribute_name'] + "3"] = self.signs + del self.signs + + def __get_tangents(self): + """Get an array of the tangent for each loop.""" + self.tangents = np.empty(len(self.blender_mesh.loops) * 3, dtype=np.float32) + self.blender_mesh.loops.foreach_get('tangent', self.tangents) + self.tangents = self.tangents.reshape(len(self.blender_mesh.loops), 3) + self.tangents = np.round(self.tangents, ROUNDING_DIGIT) + + # Transform for skinning + if self.armature and self.blender_object: + apply_matrix = self.armature.matrix_world.inverted_safe() @ self.blender_object.matrix_world + tangent_transform = apply_matrix.to_quaternion().to_matrix() + self.tangents = PrimitiveCreator.apply_mat_to_all(tangent_transform, self.tangents) + PrimitiveCreator.normalize_vecs(self.tangents) + self.tangents = np.round(self.tangents, ROUNDING_DIGIT) + + if self.export_settings['gltf_yup']: + PrimitiveCreator.zup2yup(self.tangents) + + def __get_bitangent_signs(self): + self.signs = np.empty(len(self.blender_mesh.loops), dtype=np.float32) + self.blender_mesh.loops.foreach_get('bitangent_sign', self.signs) + + # Transform for skinning + if self.armature and self.blender_object: + # Bitangent signs should flip when handedness changes + # TODO: confirm + apply_matrix = self.armature.matrix_world.inverted_safe() @ self.blender_object.matrix_world + tangent_transform = apply_matrix.to_quaternion().to_matrix() + flipped = tangent_transform.determinant() < 0 + if flipped: + self.signs *= -1 + + # No change for Zup -> Yup + + def __get_bone_data(self): + + self.need_neutral_bone = False + min_influence = 0.0001 + + joint_name_to_index = {joint.name: index for index, joint in enumerate(self.skin.joints)} + group_to_joint = [joint_name_to_index.get(g.name) for g in self.blender_vertex_groups] + + # List of (joint, weight) pairs for each vert + self.vert_bones = [] + max_num_influences = 0 + + for vertex in self.blender_mesh.vertices: + bones = [] + if vertex.groups: + for group_element in vertex.groups: + weight = group_element.weight + if weight <= min_influence: + continue + try: + joint = group_to_joint[group_element.group] + except Exception: + continue + if joint is None: + continue + bones.append((joint, weight)) + bones.sort(key=lambda x: x[1], reverse=True) + if not bones: + # Is not assign to any bone + bones = ((len(self.skin.joints), 1.0),) # Assign to a joint that will be created later + self.need_neutral_bone = True + self.vert_bones.append(bones) + if len(bones) > max_num_influences: + max_num_influences = len(bones) + + # How many joint sets do we need? 1 set = 4 influences + self.num_joint_sets = (max_num_influences + 3) // 4 + +##################################### Set ################################### + def set_function(self): + + def setting_function(attr, edges_points=False): + if attr['gltf_attribute_name'] == "POSITION": + self.__set_positions_attribute(attr, edges_points=edges_points) + elif attr['gltf_attribute_name'].startswith("MORPH_POSITION_"): + self.__set_morph_locs_attribute(attr, edges_points=edges_points) + elif attr['gltf_attribute_name'].startswith("MORPH_TANGENT_"): + self.__set_morph_tangent_attribute(attr, edges_points=edges_points) + + return setting_function + + def __set_positions_attribute(self, attr, edges_points=False): + if edges_points is False: + self.attributes[attr['gltf_attribute_name']] = {} + self.attributes[attr['gltf_attribute_name']]["data"] = self.locs[self.blender_idxs] + self.attributes[attr['gltf_attribute_name']]["data_type"] = gltf2_io_constants.DataType.Vec3 + self.attributes[attr['gltf_attribute_name']]["component_type"] = gltf2_io_constants.ComponentType.Float + else: + self.attributes_edges_points[attr['gltf_attribute_name']] = {} + self.attributes_edges_points[attr['gltf_attribute_name']]["data"] = self.locs[self.blender_idxs] + self.attributes_edges_points[attr['gltf_attribute_name']]["data_type"] = gltf2_io_constants.DataType.Vec3 + self.attributes_edges_points[attr['gltf_attribute_name'] + ]["component_type"] = gltf2_io_constants.ComponentType.Float + + def __set_morph_locs_attribute(self, attr, edges_points=False): + if edges_points is False: + self.attributes[attr['gltf_attribute_name']] = {} + self.attributes[attr['gltf_attribute_name'] + ]["data"] = self.morph_locs[attr['blender_attribute_index']][self.blender_idxs] + else: + self.attributes_edges_points[attr['gltf_attribute_name']] = {} + self.attributes_edges_points[attr['gltf_attribute_name'] + ]["data"] = self.morph_locs[attr['blender_attribute_index']][self.blender_idxs] + + def __set_morph_tangent_attribute(self, attr, edges_points=False): + # Morph tangent are after these 3 others, so, they are already calculated + self.normals = self.attributes[attr['gltf_attribute_name_normal']]["data"] + self.morph_normals = self.attributes[attr['gltf_attribute_name_morph_normal']]["data"] + self.tangents = self.attributes[attr['gltf_attribute_name_tangent']]["data"] + + self.__calc_morph_tangents() + if edges_points is False: + self.attributes[attr['gltf_attribute_name']] = {} + self.attributes[attr['gltf_attribute_name']]["data"] = self.morph_tangents + else: + self.attributes_edges_points[attr['gltf_attribute_name']] = {} + self.attributes_edges_points[attr['gltf_attribute_name']]["data"] = self.morph_tangents + + def __calc_morph_tangents(self): + # TODO: check if this works + self.morph_tangents = np.empty((len(self.normals), 3), dtype=np.float32) + + for i in range(len(self.normals)): + n = Vector(self.normals[i]) + morph_n = n + Vector(self.morph_normals[i]) # convert back to non-delta + t = Vector(self.tangents[i, :3]) + + rotation = morph_n.rotation_difference(n) + + t_morph = Vector(t) + t_morph.rotate(rotation) + self.morph_tangents[i] = t_morph - t # back to delta + + def __set_regular_attribute(self, dots, attr): + res = np.empty((len(dots), attr['len']), dtype=attr['type']) + for i in range(attr['len']): + res[:, i] = dots[attr['gltf_attribute_name'] + str(i)] + self.attributes[attr['gltf_attribute_name']] = {} + self.attributes[attr['gltf_attribute_name']]["data"] = res + if attr['gltf_attribute_name'] == "NORMAL": + self.attributes[attr['gltf_attribute_name']]["component_type"] = gltf2_io_constants.ComponentType.Float + self.attributes[attr['gltf_attribute_name']]["data_type"] = gltf2_io_constants.DataType.Vec3 + elif attr['gltf_attribute_name'] == "TANGENT": + self.attributes[attr['gltf_attribute_name']]["component_type"] = gltf2_io_constants.ComponentType.Float + self.attributes[attr['gltf_attribute_name']]["data_type"] = gltf2_io_constants.DataType.Vec4 + elif attr['gltf_attribute_name'].startswith('TEXCOORD_'): + self.attributes[attr['gltf_attribute_name']]["component_type"] = gltf2_io_constants.ComponentType.Float + self.attributes[attr['gltf_attribute_name']]["data_type"] = gltf2_io_constants.DataType.Vec2 + elif attr['gltf_attribute_name'].startswith('COLOR_'): + # This is already managed, we only have to copy + self.attributes[attr['gltf_attribute_name']]["component_type"] = attr['component_type'] + self.attributes[attr['gltf_attribute_name']]["data_type"] = attr['data_type'] + else: + self.attributes[attr['gltf_attribute_name'] + ]["component_type"] = gltf2_blender_conversion.get_component_type(attr['blender_data_type']) + self.attributes[attr['gltf_attribute_name'] + ]["data_type"] = gltf2_blender_conversion.get_data_type(attr['blender_data_type']) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_sampler.py b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_sampler.py new file mode 100755 index 00000000000..5e454184f4b --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_sampler.py @@ -0,0 +1,187 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from ...io.com import gltf2_io +from ...io.exp.gltf2_io_user_extensions import export_user_extensions +from ...io.com.gltf2_io_constants import TextureFilter, TextureWrap +from .gltf2_blender_gather_cache import cached +from .material.gltf2_blender_search_node_tree import previous_node, previous_socket, get_const_from_socket, NodeSocket + + +@cached +def gather_sampler(blender_shader_node: bpy.types.Node, group_path_str, export_settings): + # reconstruct group_path from group_path_str + sep_item = "##~~gltf-sep~~##" + sep_inside_item = "##~~gltf-inside-sep~~##" + group_path = [] + tab = group_path_str.split(sep_item) + if len(tab) > 0: + group_path.append(bpy.data.materials[tab[0]]) + for idx, i in enumerate(tab[1:]): + subtab = i.split(sep_inside_item) + if idx == 0: + group_path.append(bpy.data.materials[tab[0]].node_tree.nodes[subtab[1]]) + else: + group_path.append(bpy.data.node_groups[subtab[0]].nodes[subtab[1]]) + + wrap_s, wrap_t = __gather_wrap(blender_shader_node, group_path, export_settings) + + sampler = gltf2_io.Sampler( + extensions=__gather_extensions(blender_shader_node, export_settings), + extras=__gather_extras(blender_shader_node, export_settings), + mag_filter=__gather_mag_filter(blender_shader_node, export_settings), + min_filter=__gather_min_filter(blender_shader_node, export_settings), + name=__gather_name(blender_shader_node, export_settings), + wrap_s=wrap_s, + wrap_t=wrap_t, + ) + + export_user_extensions('gather_sampler_hook', export_settings, sampler, blender_shader_node) + + if not sampler.extensions and not sampler.extras and not sampler.name: + return __sampler_by_value( + sampler.mag_filter, + sampler.min_filter, + sampler.wrap_s, + sampler.wrap_t, + export_settings, + ) + + return sampler + + +@cached +def __sampler_by_value(mag_filter, min_filter, wrap_s, wrap_t, export_settings): + # @cached function to dedupe samplers with the same settings. + return gltf2_io.Sampler( + extensions=None, + extras=None, + mag_filter=mag_filter, + min_filter=min_filter, + name=None, + wrap_s=wrap_s, + wrap_t=wrap_t, + ) + + +def __gather_extensions(blender_shader_node, export_settings): + return None + + +def __gather_extras(blender_shader_node, export_settings): + return None + + +def __gather_mag_filter(blender_shader_node, export_settings): + if blender_shader_node.interpolation == 'Closest': + return TextureFilter.Nearest + return TextureFilter.Linear + + +def __gather_min_filter(blender_shader_node, export_settings): + if blender_shader_node.interpolation == 'Closest': + return TextureFilter.NearestMipmapNearest + return TextureFilter.LinearMipmapLinear + + +def __gather_name(blender_shader_node, export_settings): + return None + + +def __gather_wrap(blender_shader_node, group_path, export_settings): + # First gather from the Texture node + if blender_shader_node.extension == 'EXTEND': + wrap_s = TextureWrap.ClampToEdge + elif blender_shader_node.extension == 'CLIP': + # Not possible in glTF, but ClampToEdge is closest + wrap_s = TextureWrap.ClampToEdge + elif blender_shader_node.extension == 'MIRROR': + wrap_s = TextureWrap.MirroredRepeat + else: + wrap_s = TextureWrap.Repeat + wrap_t = wrap_s + + # Starting Blender 3.5, MIRROR is now an extension of image node + # So this manual uv wrapping trick is no more usefull for MIRROR x MIRROR + # But still works for old files + # Still needed for heterogen heterogeneous sampler, like MIRROR x REPEAT, for example + # Take manual wrapping into account + result = detect_manual_uv_wrapping(blender_shader_node, group_path) + if result: + if result['wrap_s'] is not None: + wrap_s = result['wrap_s'] + if result['wrap_t'] is not None: + wrap_t = result['wrap_t'] + + # Omit if both are repeat + if (wrap_s, wrap_t) == (TextureWrap.Repeat, TextureWrap.Repeat): + wrap_s, wrap_t = None, None + + return wrap_s, wrap_t + + +def detect_manual_uv_wrapping(blender_shader_node, group_path): + # Detects UV wrapping done using math nodes. This is for emulating wrap + # modes Blender doesn't support. It looks like + # + # next_socket => [Sep XYZ] => [Wrap S] => [Comb XYZ] => blender_shader_node + # => [Wrap T] => + # + # The [Wrap _] blocks are either math nodes (eg. PINGPONG for mirrored + # repeat), or can be omitted. + # + # Returns None if not detected. Otherwise a dict containing the wrap + # mode in each direction (or None), and next_socket. + result = {} + + comb = previous_node(NodeSocket(blender_shader_node.inputs['Vector'], group_path)) + if comb.node is None or comb.node.type != 'COMBXYZ': + return None + + for soc in ['X', 'Y']: + node = previous_node(NodeSocket(comb.node.inputs[soc], comb.group_path)) + if node.node is None: + return None + + if node.node.type == 'SEPXYZ': + # Passed through without change + wrap = None + prev_socket = previous_socket(NodeSocket(comb.node.inputs[soc], comb.group_path)) + elif node.node.type == 'MATH': + # Math node applies a manual wrap + if (node.node.operation == 'PINGPONG' and get_const_from_socket(NodeSocket( + node.node.inputs[1], node.group_path), kind='VALUE')[0] == 1.0): # scale = 1 + wrap = TextureWrap.MirroredRepeat + elif (node.node.operation == 'WRAP' and + # min = 0 + get_const_from_socket(NodeSocket(node.node.inputs[1], node.group_path), kind='VALUE')[0] == 0.0 and + get_const_from_socket(NodeSocket(node.node.inputs[2], node.group_path), kind='VALUE')[0] == 1.0): # max = 1 + wrap = TextureWrap.Repeat + else: + return None + + prev_socket = previous_socket(NodeSocket(node.node.inputs[0], node.group_path)) + else: + return None + + if prev_socket.socket is None: + return None + prev_node = prev_socket.socket.node + if prev_node.type != 'SEPXYZ': + return None + # Make sure X goes to X, etc. + if prev_socket.socket.name != soc: + return None + # Make sure both attach to the same SeparateXYZ node + if soc == 'X': + sep = prev_node + else: + if sep != prev_node: + return None + + result['wrap_s' if soc == 'X' else 'wrap_t'] = wrap + + result['next_socket'] = NodeSocket(sep.inputs[0], prev_socket.group_path) + return result diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_skins.py b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_skins.py new file mode 100755 index 00000000000..ecf8cc0c76d --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_skins.py @@ -0,0 +1,148 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import mathutils +from ...io.com import gltf2_io, gltf2_io_constants +from ...io.exp import gltf2_io_binary_data +from ...io.exp.gltf2_io_user_extensions import export_user_extensions +from . import gltf2_blender_gather_accessors +from . import gltf2_blender_gather_joints +from .gltf2_blender_gather_tree import VExportNode +from .gltf2_blender_gather_cache import cached + + +@cached +def gather_skin(armature_uuid, export_settings): + """ + Gather armatures, bones etc into a glTF2 skin object. + + :param blender_object: the object which may contain a skin + :param export_settings: + :return: a glTF2 skin object + """ + + if armature_uuid not in export_settings['vtree'].nodes: + # User filtered objects to export, and keep the skined mesh, without keeping the armature + return None + + blender_armature_object = export_settings['vtree'].nodes[armature_uuid].blender_object + + if not __filter_skin(blender_armature_object, export_settings): + return None + + skin = gltf2_io.Skin( + extensions=__gather_extensions(blender_armature_object, export_settings), + extras=__gather_extras(blender_armature_object, export_settings), + inverse_bind_matrices=__gather_inverse_bind_matrices(armature_uuid, export_settings), + joints=__gather_joints(armature_uuid, export_settings), + name=__gather_name(blender_armature_object, export_settings), + skeleton=__gather_skeleton(blender_armature_object, export_settings) + ) + + # If armature is not exported, joints will be empty. + # Do not construct skin in that case + if len(skin.joints) == 0: + return None + + export_user_extensions('gather_skin_hook', export_settings, skin, blender_armature_object) + + return skin + + +def __filter_skin(blender_armature_object, export_settings): + if not export_settings['gltf_skins']: + return False + if blender_armature_object.type != 'ARMATURE' or len(blender_armature_object.pose.bones) == 0: + return False + + return True + + +def __gather_extensions(blender_armature_object, export_settings): + return None + + +def __gather_extras(blender_armature_object, export_settings): + return None + + +def __gather_inverse_bind_matrices(armature_uuid, export_settings): + + blender_armature_object = export_settings['vtree'].nodes[armature_uuid].blender_object + + axis_basis_change = mathutils.Matrix.Identity(4) + if export_settings['gltf_yup']: + axis_basis_change = mathutils.Matrix( + ((1.0, 0.0, 0.0, 0.0), (0.0, 0.0, 1.0, 0.0), (0.0, -1.0, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0))) + + # store matrix_world of armature in case we need to add a neutral bone + export_settings['vtree'].nodes[armature_uuid].matrix_world_armature = blender_armature_object.matrix_world.copy() + + bones_uuid = export_settings['vtree'].get_all_bones(armature_uuid) + + def __collect_matrices(bone): + inverse_bind_matrix = ( + axis_basis_change @ + ( + blender_armature_object.matrix_world @ + bone.bone.matrix_local + ) + ).inverted_safe() + matrices.append(inverse_bind_matrix) + + matrices = [] + for b in bones_uuid: + if export_settings['vtree'].nodes[b].leaf_reference is None: + __collect_matrices(blender_armature_object.pose.bones[export_settings['vtree'].nodes[b].blender_bone.name]) + else: + inverse_bind_matrix = ( + axis_basis_change @ + ( + blender_armature_object.matrix_world @ + export_settings['vtree'].nodes[export_settings['vtree'].nodes[b].leaf_reference].matrix_world_tail + ) + ).inverted_safe() + matrices.append(inverse_bind_matrix) # Leaf bone + + # flatten the matrices + inverse_matrices = [] + for matrix in matrices: + for column in range(0, 4): + for row in range(0, 4): + inverse_matrices.append(matrix[row][column]) + + binary_data = gltf2_io_binary_data.BinaryData.from_list(inverse_matrices, gltf2_io_constants.ComponentType.Float) + return gltf2_blender_gather_accessors.gather_accessor( + binary_data, + gltf2_io_constants.ComponentType.Float, + len(inverse_matrices) // gltf2_io_constants.DataType.num_elements(gltf2_io_constants.DataType.Mat4), + None, + None, + gltf2_io_constants.DataType.Mat4, + export_settings + ) + + +def __gather_joints(armature_uuid, export_settings): + + all_armature_children = export_settings['vtree'].nodes[armature_uuid].children + root_bones_uuid = [ + c for c in all_armature_children if export_settings['vtree'].nodes[c].blender_type == VExportNode.BONE] + + # Create bone nodes + for root_bone_uuid in root_bones_uuid: + gltf2_blender_gather_joints.gather_joint_vnode(root_bone_uuid, export_settings) + + bones_uuid = export_settings['vtree'].get_all_bones(armature_uuid) + joints = [export_settings['vtree'].nodes[b].node for b in bones_uuid] + return joints + + +def __gather_name(blender_armature_object, export_settings): + return blender_armature_object.name + + +def __gather_skeleton(blender_armature_object, export_settings): + # In the future support the result of https://github.com/KhronosGroup/glTF/pull/1195 + return None diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_tree.py b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_tree.py new file mode 100644 index 00000000000..1cfe6374fbd --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_tree.py @@ -0,0 +1,904 @@ +# SPDX-FileCopyrightText: 2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import uuid +import numpy as np +from mathutils import Quaternion, Matrix +from ...io.exp.gltf2_io_user_extensions import export_user_extensions +from ...io.com import gltf2_io +from ...io.imp.gltf2_io_binary import BinaryData +from ...io.com import gltf2_io_constants +from ...io.exp import gltf2_io_binary_data +from ..com.gltf2_blender_default import BLENDER_GLTF_SPECIAL_COLLECTION +from . import gltf2_blender_gather_accessors +from .gltf2_blender_gather_joints import gather_joint_vnode + + +class VExportNode: + + OBJECT = 1 + ARMATURE = 2 + BONE = 3 + LIGHT = 4 + CAMERA = 5 + COLLECTION = 6 + INSTANCE = 7 # For instances of GN + + INSTANCIER = 8 + NOT_INSTANCIER = 9 + INST_COLLECTION = 7 + + # Parent type, to be set on child regarding its parent + NO_PARENT = 54 + PARENT_OBJECT = 50 + PARENT_BONE = 51 + PARENT_BONE_RELATIVE = 52 + PARENT_ROOT_BONE = 53 + PARENT_BONE_BONE = 55 + + # Children type + # Is used to split instance collection into 2 categories: + CHILDREN_REAL = 90 + CHILDREN_IS_IN_COLLECTION = 91 + + def __init__(self): + self.children = [] + self.children_type = {} # Used for children of instance collection + self.blender_type = None + self.matrix_world = None + self.parent_type = None + + self.blender_object = None + self.blender_bone = None + self.leaf_reference = None # For leaf bones only + + self.default_hide_viewport = False # Need to store the default value for meshes in case of animation baking on armature + + self.force_as_empty = False # Used for instancer display + + # Only for bone/bone and object parented to bone + self.parent_bone_uuid = None + + # Only for bones + self.use_deform = None + + # Only for armature + self.bones = {} + + # For deformed object + self.armature = None # for deformed object and for bone + self.skin = None + + # glTF + self.node = None + + # For mesh instance data of GN instances + self.data = None + self.materials = None + + self.is_instancier = VExportNode.NOT_INSTANCIER + + def add_child(self, uuid): + self.children.append(uuid) + + def set_blender_data(self, blender_object, blender_bone): + self.blender_object = blender_object + self.blender_bone = blender_bone + + def recursive_display(self, tree, mode): + if mode == "simple": + for c in self.children: + print( + tree.nodes[c].uuid, + self.blender_object.name if self.blender_object is not None else "GN" + + self.data.name, + "/", + self.blender_bone.name if self.blender_bone else "", + "-->", + tree.nodes[c].blender_object.name if tree.nodes[c].blender_object else "GN" + + tree.nodes[c].data.name, + "/", + tree.nodes[c].blender_bone.name if tree.nodes[c].blender_bone else "") + tree.nodes[c].recursive_display(tree, mode) + + +class VExportTree: + def __init__(self, export_settings): + self.nodes = {} + self.roots = [] + + self.export_settings = export_settings + + self.tree_troncated = False + + self.axis_basis_change = Matrix.Identity(4) + if self.export_settings['gltf_yup']: + self.axis_basis_change = Matrix( + ((1.0, 0.0, 0.0, 0.0), (0.0, 0.0, 1.0, 0.0), (0.0, -1.0, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0))) + + def add_node(self, node): + self.nodes[node.uuid] = node + + def add_children(self, uuid_parent, uuid_child): + self.nodes[uuid_parent].add_child(uuid_child) + + def construct(self, blender_scene): + bpy.context.window.scene = blender_scene + depsgraph = bpy.context.evaluated_depsgraph_get() + + # Gather parent/children information once, as calling bobj.children is + # very expensive operation : takes O(len(bpy.data.objects)) time. + # TODO : In case of full collection export, we should add children / collection in the same way + blender_children = dict() + for bobj in bpy.data.objects: + bparent = bobj.parent + blender_children.setdefault(bobj, []) + blender_children.setdefault(bparent, []).append(bobj) + + if self.export_settings['gltf_hierarchy_full_collections'] is False: + scene_eval = blender_scene.evaluated_get(depsgraph=depsgraph) + for blender_object in [obj.original for obj in scene_eval.objects if obj.parent is None]: + self.recursive_node_traverse(blender_object, None, None, Matrix.Identity(4), False, blender_children) + else: + self.recursive_node_traverse( + blender_scene.collection, + None, + None, + Matrix.Identity(4), + False, + blender_children, + is_collection=True) + + def recursive_node_traverse( + self, + blender_object, + blender_bone, + parent_uuid, + parent_coll_matrix_world, + delta, + blender_children, + armature_uuid=None, + dupli_world_matrix=None, + data=None, + original_object=None, + is_collection=False, + is_children_in_collection=False): + node = VExportNode() + node.uuid = str(uuid.uuid4()) + node.parent_uuid = parent_uuid + node.set_blender_data(blender_object, blender_bone) + if blender_object is None: + node.data = data + node.original_object = original_object + + # add to parent if needed + if parent_uuid is not None: + self.add_children(parent_uuid, node.uuid) + if self.nodes[parent_uuid].blender_type == VExportNode.INST_COLLECTION or original_object is not None: + self.nodes[parent_uuid].children_type[node.uuid] = VExportNode.CHILDREN_IS_IN_COLLECTION if is_children_in_collection is True else VExportNode.CHILDREN_REAL + else: + self.roots.append(node.uuid) + + # Set blender type + if blender_object is None: # GN instance + node.blender_type = VExportNode.INSTANCE + elif blender_bone is not None: + node.blender_type = VExportNode.BONE + self.nodes[armature_uuid].bones[blender_bone.name] = node.uuid + node.use_deform = blender_bone.id_data.data.bones[blender_bone.name].use_deform + elif is_collection is True: + node.blender_type = VExportNode.COLLECTION + elif blender_object.type == "ARMATURE": + node.blender_type = VExportNode.ARMATURE + node.default_hide_viewport = blender_object.hide_viewport + elif blender_object.type == "CAMERA": + node.blender_type = VExportNode.CAMERA + elif blender_object.type == "LIGHT": + node.blender_type = VExportNode.LIGHT + elif blender_object.instance_type == "COLLECTION": + node.blender_type = VExportNode.INST_COLLECTION + node.default_hide_viewport = blender_object.hide_viewport + else: + node.blender_type = VExportNode.OBJECT + node.default_hide_viewport = blender_object.hide_viewport + + # For meshes with armature modifier (parent is armature), keep armature uuid + if node.blender_type == VExportNode.OBJECT: + modifiers = {m.type: m for m in blender_object.modifiers} + if "ARMATURE" in modifiers and modifiers["ARMATURE"].object is not None: + if parent_uuid is None or not self.nodes[parent_uuid].blender_type == VExportNode.ARMATURE: + # correct workflow is to parent skinned mesh to armature, but ... + # all users don't use correct workflow + self.export_settings['log'].warning( + "Armature must be the parent of skinned mesh" + "Armature is selected by its name, but may be false in case of instances" + ) + # Search an armature by name, and use the first found + # This will be done after all objects are setup + node.armature_needed = modifiers["ARMATURE"].object.name + else: + node.armature = parent_uuid + + # For bones, store uuid of armature + if blender_bone is not None: + node.armature = armature_uuid + + # for bone/bone parenting, store parent, this will help armature tree management + if parent_uuid is not None and self.nodes[parent_uuid].blender_type == VExportNode.BONE and node.blender_type == VExportNode.BONE: + node.parent_bone_uuid = parent_uuid + + # Objects parented to bone + if parent_uuid is not None and self.nodes[parent_uuid].blender_type == VExportNode.BONE and node.blender_type != VExportNode.BONE: + node.parent_bone_uuid = parent_uuid + + # World Matrix + + # Delta is used when rest transforms are used for armatures + # Any children of objects parented to bones must have this delta (for grandchildren, etc...) + new_delta = False + + # Store World Matrix for objects + if dupli_world_matrix is not None: + node.matrix_world = dupli_world_matrix + elif node.blender_type in [VExportNode.OBJECT, VExportNode.COLLECTION, VExportNode.INST_COLLECTION, VExportNode.ARMATURE, VExportNode.CAMERA, VExportNode.LIGHT]: + # Matrix World of object is expressed based on collection instance objects are + # So real world matrix is collection world_matrix @ "world_matrix" of object + if is_collection: + node.matrix_world = parent_coll_matrix_world.copy() + else: + node.matrix_world = parent_coll_matrix_world @ blender_object.matrix_world.copy() + + # If object is parented to bone, and Rest pose is used for Armature, we need to keep the world matrix transformed relative relative to rest pose, + # not the current world matrix (relation to pose) + if parent_uuid and self.nodes[parent_uuid].blender_type == VExportNode.BONE and self.export_settings['gltf_rest_position_armature'] is True: + _blender_bone = self.nodes[parent_uuid].blender_bone + _pose = self.nodes[self.nodes[parent_uuid].armature].matrix_world @ _blender_bone.matrix @ self.axis_basis_change + _rest = self.nodes[self.nodes[parent_uuid].armature].matrix_world @ _blender_bone.bone.matrix_local @ self.axis_basis_change + _delta = _pose.inverted_safe() @ node.matrix_world + node.original_matrix_world = node.matrix_world.copy() + node.matrix_world = _rest @ _delta + new_delta = True + + if node.blender_type == VExportNode.CAMERA and self.export_settings['gltf_cameras']: + if self.export_settings['gltf_yup']: + correction = Quaternion((2**0.5 / 2, -2**0.5 / 2, 0.0, 0.0)) + else: + correction = Matrix.Identity(4).to_quaternion() + node.matrix_world @= correction.to_matrix().to_4x4() + elif node.blender_type == VExportNode.LIGHT and self.export_settings['gltf_lights']: + if self.export_settings['gltf_yup']: + correction = Quaternion((2**0.5 / 2, -2**0.5 / 2, 0.0, 0.0)) + else: + correction = Matrix.Identity(4).to_quaternion() + node.matrix_world @= correction.to_matrix().to_4x4() + elif node.blender_type == VExportNode.BONE: + if self.export_settings['gltf_rest_position_armature'] is False: + # Use pose bone for TRS + node.matrix_world = self.nodes[node.armature].matrix_world @ blender_bone.matrix + if self.export_settings['gltf_leaf_bone'] is True: + node.matrix_world_tail = self.nodes[node.armature].matrix_world @ Matrix.Translation( + blender_bone.tail) + node.matrix_world_tail = node.matrix_world_tail @ self.axis_basis_change + else: + # Use edit bone for TRS --> REST pose will be used + node.matrix_world = self.nodes[node.armature].matrix_world @ blender_bone.bone.matrix_local + # Tail will be set after, as we need to be in edit mode + node.matrix_world = node.matrix_world @ self.axis_basis_change + + if delta is True: + _pose_parent = self.nodes[parent_uuid].original_matrix_world + _rest_parent = self.nodes[parent_uuid].matrix_world + _delta = _pose_parent.inverted_safe() @ node.matrix_world + node.original_matrix_world = node.matrix_world.copy() + node.matrix_world = _rest_parent @ _delta + + # Force empty ? + # For duplis, if instancer is not display, we should create an empty + if blender_object and is_collection is False and blender_object.is_instancer is True and blender_object.show_instancer_for_render is False: + node.force_as_empty = True + + # Storing this node + self.add_node(node) + + ###### Manage children ###### + + # GN instance have no children + if blender_object is None: + return + + # standard children (of object, or of instance collection) + if blender_bone is None and is_collection is False and blender_object.is_instancer is False: + for child_object in blender_children[blender_object]: + if child_object.parent_bone and child_object.parent_type in ("BONE", "BONE_RELATIVE"): + # Object parented to bones + # Will be manage later + continue + else: + # Classic parenting + + # If we export full collection hierarchy, we need to ignore children that + # are not in the same collection + if self.export_settings['gltf_hierarchy_full_collections'] is True: + if child_object.users_collection[0].name != blender_object.users_collection[0].name: + continue + + self.recursive_node_traverse( + child_object, + None, + node.uuid, + parent_coll_matrix_world, + new_delta or delta, + blender_children) + + # Collections + if is_collection is False and (blender_object.instance_type == + 'COLLECTION' and blender_object.instance_collection): + if self.export_settings['gltf_hierarchy_full_collections'] is False: + for dupli_object in blender_object.instance_collection.all_objects: + if dupli_object.parent is not None: + continue + self.recursive_node_traverse( + dupli_object, + None, + node.uuid, + node.matrix_world, + new_delta or delta, + blender_children, + is_children_in_collection=True) + else: + # Manage children objects + for child in blender_object.instance_collection.objects: + if child.users_collection[0].name != blender_object.name: + continue + self.recursive_node_traverse(child, None, node.uuid, node.matrix_world, + new_delta or delta, blender_children) + # Manage children collections + for child in blender_object.instance_collection.children: + self.recursive_node_traverse( + child, + None, + node.uuid, + node.matrix_world, + new_delta or delta, + blender_children, + is_collection=True) + + if is_collection is True: # Only for gltf_hierarchy_full_collections == True + # Manage children objects + for child in blender_object.objects: + if child.users_collection[0].name != blender_object.name: + continue + + # Keep only object if it has no parent, or parent is not in the collection + if not (child.parent is None or child.parent.users_collection[0].name != blender_object.name): + continue + + self.recursive_node_traverse(child, None, node.uuid, node.matrix_world, + new_delta or delta, blender_children) + # Manage children collections + for child in blender_object.children: + self.recursive_node_traverse( + child, + None, + node.uuid, + node.matrix_world, + new_delta or delta, + blender_children, + is_collection=True) + + # Armature : children are bones with no parent + if is_collection is False and blender_object.type == "ARMATURE" and blender_bone is None: + for b in [b for b in blender_object.pose.bones if b.parent is None]: + self.recursive_node_traverse( + blender_object, + b, + node.uuid, + parent_coll_matrix_world, + new_delta or delta, + blender_children, + node.uuid) + + # Bones + if is_collection is False and blender_object.type == "ARMATURE" and blender_bone is not None: + for b in blender_bone.children: + self.recursive_node_traverse( + blender_object, + b, + node.uuid, + parent_coll_matrix_world, + new_delta or delta, + blender_children, + armature_uuid) + + # Object parented to bone + if is_collection is False and blender_bone is not None: + for child_object in [c for c in blender_children[blender_object] if c.parent_type == + "BONE" and c.parent_bone is not None and c.parent_bone == blender_bone.name]: + self.recursive_node_traverse( + child_object, + None, + node.uuid, + parent_coll_matrix_world, + new_delta or delta, + blender_children) + + # Duplis + if is_collection is False and blender_object.is_instancer is True and blender_object.instance_type != 'COLLECTION': + depsgraph = bpy.context.evaluated_depsgraph_get() + for ( + dupl, + mat) in [ + (dup.object.original, + dup.matrix_world.copy()) for dup in depsgraph.object_instances if dup.parent and id( + dup.parent.original) == id(blender_object)]: + self.recursive_node_traverse( + dupl, + None, + node.uuid, + parent_coll_matrix_world, + new_delta or delta, + blender_children, + dupli_world_matrix=mat) + + # Geometry Nodes instances + if self.export_settings['gltf_gn_mesh'] is True: + # Do not force export as empty + # Because GN graph can have both geometry and instances + depsgraph = bpy.context.evaluated_depsgraph_get() + eval = blender_object.evaluated_get(depsgraph) + for inst in depsgraph.object_instances: # use only as iterator + if inst.parent == eval: + if not inst.is_instance: + continue + if type(inst.object.data).__name__ == "Mesh" and len(inst.object.data.vertices) == 0: + continue # This is nested instances, and this mesh has no vertices, so is an instancier for other instances + node.is_instancier = VExportNode.INSTANCIER + self.recursive_node_traverse( + None, + None, + node.uuid, + parent_coll_matrix_world, + new_delta or delta, + blender_children, + dupli_world_matrix=inst.matrix_world.copy(), + data=inst.object.data, + original_object=blender_object, + is_children_in_collection=True) + + def get_all_objects(self): + return [n.uuid for n in self.nodes.values() if n.blender_type != VExportNode.BONE] + + def get_all_bones(self, uuid): # For armature only + if not hasattr(self.nodes[uuid], "all_bones"): + if self.nodes[uuid].blender_type == VExportNode.ARMATURE: + def recursive_get_all_bones(uuid): + total = [] + if self.nodes[uuid].blender_type == VExportNode.BONE: + total.append(uuid) + for child_uuid in self.nodes[uuid].children: + total.extend(recursive_get_all_bones(child_uuid)) + + return total + + tot = [] + for c_uuid in self.nodes[uuid].children: + tot.extend(recursive_get_all_bones(c_uuid)) + self.nodes[uuid].all_bones = tot + return tot # Not really needed to return, we are just baking it before export really starts + else: + self.nodes[uuid].all_bones = [] + return [] + else: + return self.nodes[uuid].all_bones + + def get_root_bones_uuid(self, uuid): # For armature only + if not hasattr(self.nodes[uuid], "root_bones_uuid"): + if self.nodes[uuid].blender_type == VExportNode.ARMATURE: + all_armature_children = self.nodes[uuid].children + self.nodes[uuid].root_bones_uuid = [ + c for c in all_armature_children if self.nodes[c].blender_type == VExportNode.BONE] + # Not really needed to return, we are just baking it before export really starts + return self.nodes[uuid].root_bones_uuid + else: + self.nodes[uuid].root_bones_uuid = [] + return [] + else: + return self.nodes[uuid].root_bones_uuid + + def get_all_node_of_type(self, node_type): + return [n.uuid for n in self.nodes.values() if n.blender_type == node_type] + + def display(self, mode): + if mode == "simple": + for n in self.roots: + print( + self.nodes[n].uuid, + "Root", + self.nodes[n].blender_object.name if self.nodes[n].blender_object else "GN instance", + "/", + self.nodes[n].blender_bone.name if self.nodes[n].blender_bone else "") + self.nodes[n].recursive_display(self, mode) + + def filter_tag(self): + roots = self.roots.copy() + for r in roots: + self.recursive_filter_tag(r, None) + + def filter_perform(self): + roots = self.roots.copy() + for r in roots: + self.recursive_filter(r, None) # Root, so no parent + + def filter(self): + self.filter_tag() + export_user_extensions('gather_tree_filter_tag_hook', self.export_settings, self) + self.filter_perform() + self.remove_empty_collections() # Used only when exporting full collection hierarchy + self.remove_filtered_nodes() + + def recursive_filter_tag(self, uuid, parent_keep_tag): + # parent_keep_tag is for collection instance + # some properties (selection, visibility, renderability) + # are defined at collection level, and we need to use these values + # for all objects of the collection instance. + # But some properties (camera, lamp ...) are not defined at collection level + if parent_keep_tag is None: + self.nodes[uuid].keep_tag = self.node_filter_not_inheritable_is_kept( + uuid) and self.node_filter_inheritable_is_kept(uuid) + elif parent_keep_tag is True: + self.nodes[uuid].keep_tag = self.node_filter_not_inheritable_is_kept(uuid) + elif parent_keep_tag is False: + self.nodes[uuid].keep_tag = False + else: + self.export_settings['log'].error("This should not happen") + + for child in self.nodes[uuid].children: + if self.nodes[uuid].blender_type == VExportNode.INST_COLLECTION or self.nodes[uuid].is_instancier == VExportNode.INSTANCIER: + # We need to split children into 2 categories: real children, and objects inside the collection + if self.nodes[uuid].children_type[child] == VExportNode.CHILDREN_IS_IN_COLLECTION: + self.recursive_filter_tag(child, self.nodes[uuid].keep_tag) + else: + self.recursive_filter_tag(child, parent_keep_tag) + else: + self.recursive_filter_tag(child, parent_keep_tag) + + def recursive_filter(self, uuid, parent_kept_uuid): + children = self.nodes[uuid].children.copy() + + new_parent_kept_uuid = None + if self.nodes[uuid].keep_tag is False: + new_parent_kept_uuid = parent_kept_uuid + # Need to modify tree + if self.nodes[uuid].parent_uuid is not None: + self.nodes[self.nodes[uuid].parent_uuid].children.remove(uuid) + else: + # Remove from root + self.roots.remove(uuid) + else: + new_parent_kept_uuid = uuid + + # If parent_uuid is not parent_kept_uuid, we need to modify children list of parent_kept_uuid + if parent_kept_uuid != self.nodes[uuid].parent_uuid and parent_kept_uuid is not None: + self.tree_troncated = True + self.nodes[parent_kept_uuid].children.append(uuid) + + # If parent_kept_uuid is None, and parent_uuid was not, add to root list + if self.nodes[uuid].parent_uuid is not None and parent_kept_uuid is None: + self.tree_troncated = True + self.roots.append(uuid) + + # Modify parent uuid + self.nodes[uuid].parent_uuid = parent_kept_uuid + + for child in children: + self.recursive_filter(child, new_parent_kept_uuid) + + def node_filter_not_inheritable_is_kept(self, uuid): + # Export Camera or not + if self.nodes[uuid].blender_type == VExportNode.CAMERA: + if self.export_settings['gltf_cameras'] is False: + return False + + # Export Lamp or not + if self.nodes[uuid].blender_type == VExportNode.LIGHT: + if self.export_settings['gltf_lights'] is False: + return False + + # Export deform bones only + if self.nodes[uuid].blender_type == VExportNode.BONE: + if self.export_settings['gltf_def_bones'] is True and self.nodes[uuid].use_deform is False: + # Check if bone has some objected parented to bone. We need to keep it in + # that case, even if this is not a def bone + if len([c for c in self.nodes[uuid].children if self.nodes[c].blender_type != VExportNode.BONE]) != 0: + return True + return False + + return True + + def node_filter_inheritable_is_kept(self, uuid): + + if self.nodes[uuid].blender_object is None: + # geometry node instances + return True + + if self.nodes[uuid].blender_type == VExportNode.COLLECTION: + # Collections, can't be filtered => we always keep them + return True + + if self.export_settings['gltf_selected'] and self.nodes[uuid].blender_object.select_get() is False: + return False + + if self.export_settings['gltf_visible']: + # The eye in outliner (object) + if self.nodes[uuid].blender_object.visible_get() is False: + return False + + # The screen in outliner (object) + if self.nodes[uuid].blender_object.hide_viewport is True: + return False + + # The screen in outliner (collections) + if all([c.hide_viewport for c in self.nodes[uuid].blender_object.users_collection]): + return False + + # The camera in outliner (object) + if self.export_settings['gltf_renderable']: + if self.nodes[uuid].blender_object.hide_render is True: + return False + + # The camera in outliner (collections) + if all([c.hide_render for c in self.nodes[uuid].blender_object.users_collection]): + return False + + # If we are given a collection, use all objects from it + if self.export_settings['gltf_collection']: + local_collection = bpy.data.collections.get((self.export_settings['gltf_collection'], None)) + if not local_collection: + return False + found = any(x == self.nodes[uuid].blender_object for x in local_collection.all_objects) + if not found: + return False + else: + if self.export_settings['gltf_active_collection'] and not self.export_settings['gltf_active_collection_with_nested']: + found = any(x == self.nodes[uuid].blender_object for x in bpy.context.collection.objects) + if not found: + return False + + if self.export_settings['gltf_active_collection'] and self.export_settings['gltf_active_collection_with_nested']: + found = any(x == self.nodes[uuid].blender_object for x in bpy.context.collection.all_objects) + if not found: + return False + + if BLENDER_GLTF_SPECIAL_COLLECTION in bpy.data.collections and self.nodes[uuid].blender_object.name in \ + bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].objects: + return False + + if self.export_settings['gltf_armature_object_remove'] is True: + # If we remove the Armature object + if self.nodes[uuid].blender_type == VExportNode.ARMATURE: + self.nodes[uuid].arma_exported = True + return False + + return True + + def remove_filtered_nodes(self): + if self.export_settings['gltf_armature_object_remove'] is True: + # If we remove the Armature object + self.nodes = {k: n for (k, n) in self.nodes.items() if n.keep_tag is True or ( + n.keep_tag is False and n.blender_type == VExportNode.ARMATURE)} + else: + self.nodes = {k: n for (k, n) in self.nodes.items() if n.keep_tag is True} + + def remove_empty_collections(self): + def recursive_remove_empty_collections(uuid): + if self.nodes[uuid].blender_type == VExportNode.COLLECTION: + if len(self.nodes[uuid].children) == 0: + if self.nodes[uuid].parent_uuid is not None: + self.nodes[self.nodes[uuid].parent_uuid].children.remove(uuid) + else: + self.roots.remove(uuid) + self.nodes[uuid].keep_tag = False + else: + for c in self.nodes[uuid].children: + recursive_remove_empty_collections(c) + + roots = self.roots.copy() + for r in roots: + recursive_remove_empty_collections(r) + + def search_missing_armature(self): + for n in [n for n in self.nodes.values() if hasattr(n, "armature_needed") is True]: + candidates = [i for i in self.nodes.values() if i.blender_type == + VExportNode.ARMATURE and i.blender_object.name == n.armature_needed] + if len(candidates) > 0: + n.armature = candidates[0].uuid + del n.armature_needed + + def bake_armature_bone_list(self): + + if self.export_settings['gltf_leaf_bone'] is True: + self.add_leaf_bones() + + # Used to store data in armature vnode + # If armature is removed from export + # Data are still available, even if armature is not exported (so bones are re-parented) + for n in [n for n in self.nodes.values() if n.blender_type == VExportNode.ARMATURE]: + + self.get_all_bones(n.uuid) + self.get_root_bones_uuid(n.uuid) + + def add_leaf_bones(self): + + # If we are using rest pose, we need to get tail of editbone, going to edit mode for each armature + if self.export_settings['gltf_rest_position_armature'] is True: + for obj_uuid in [n for n in self.nodes if self.nodes[n].blender_type == VExportNode.ARMATURE]: + armature = self.nodes[obj_uuid].blender_object + bpy.context.view_layer.objects.active = armature + bpy.ops.object.mode_set(mode="EDIT") + + for bone in armature.data.edit_bones: + if len(bone.children) == 0: + self.nodes[self.nodes[obj_uuid].bones[bone.name] + ].matrix_world_tail = armature.matrix_world @ Matrix.Translation(bone.tail) @ self.axis_basis_change + + bpy.ops.object.mode_set(mode="OBJECT") + + for bone_uuid in [n for n in self.nodes if self.nodes[n].blender_type == VExportNode.BONE + and len(self.nodes[n].children) == 0]: + + bone_node = self.nodes[bone_uuid] + + # Add a new node + node = VExportNode() + node.uuid = str(uuid.uuid4()) + node.parent_uuid = bone_uuid + node.parent_bone_uuid = bone_uuid + node.blender_object = bone_node.blender_object + node.armature = bone_node.armature + node.blender_type = VExportNode.BONE + node.leaf_reference = bone_uuid + node.keep_tag = True + + node.matrix_world = bone_node.matrix_world_tail.copy() + + self.add_children(bone_uuid, node.uuid) + self.add_node(node) + + def add_neutral_bones(self): + added_armatures = [] + for n in [n for n in self.nodes.values() if + n.armature is not None and + n.armature in self.nodes and + n.blender_type == VExportNode.OBJECT and + n.blender_object.type == "MESH" and + hasattr(self.nodes[n.armature], "need_neutral_bone")]: # all skin meshes objects where neutral bone is needed + # Only for meshes, as curve can't have skin data (no weights pain available) + + # Be sure to add it to really exported meshes + if n.node.skin is None: + self.export_settings['log'].warning( + "{} has no skin, skipping adding neutral bone data on it.".format( + n.blender_object.name)) + continue + + if n.armature not in added_armatures: + + added_armatures.append(n.armature) # Make sure to not insert 2 times the neural bone + + # First add a new node + trans, rot, sca = self.axis_basis_change.decompose() + translation, rotation, scale = (None, None, None) + if trans[0] != 0.0 or trans[1] != 0.0 or trans[2] != 0.0: + translation = [trans[0], trans[1], trans[2]] + if rot[0] != 1.0 or rot[1] != 0.0 or rot[2] != 0.0 or rot[3] != 0.0: + rotation = [rot[1], rot[2], rot[3], rot[0]] + if sca[0] != 1.0 or sca[1] != 1.0 or sca[2] != 1.0: + scale = [sca[0], sca[1], sca[2]] + neutral_bone = gltf2_io.Node( + camera=None, + children=None, + extensions=None, + extras=None, + matrix=None, + mesh=None, + name='neutral_bone', + rotation=rotation, + scale=scale, + skin=None, + translation=translation, + weights=None + ) + # Add it to child list of armature + self.nodes[n.armature].node.children.append(neutral_bone) + + # Add it to joint list + n.node.skin.joints.append(neutral_bone) + + # Need to add an InverseBindMatrix + array = BinaryData.decode_accessor_internal(n.node.skin.inverse_bind_matrices) + + inverse_bind_matrix = ( + self.axis_basis_change @ self.nodes[n.armature].matrix_world_armature).inverted_safe() + + matrix = [] + for column in range(0, 4): + for row in range(0, 4): + matrix.append(inverse_bind_matrix[row][column]) + + array = np.append(array, np.array([matrix]), axis=0) + binary_data = gltf2_io_binary_data.BinaryData.from_list( + array.flatten(), gltf2_io_constants.ComponentType.Float) + n.node.skin.inverse_bind_matrices = gltf2_blender_gather_accessors.gather_accessor( + binary_data, + gltf2_io_constants.ComponentType.Float, + len(array.flatten()) // gltf2_io_constants.DataType.num_elements(gltf2_io_constants.DataType.Mat4), + None, + None, + gltf2_io_constants.DataType.Mat4, + self.export_settings + ) + + def get_unused_skins(self): + from .gltf2_blender_gather_skins import gather_skin + skins = [] + for n in [n for n in self.nodes.values() if n.blender_type == VExportNode.ARMATURE]: + if self.export_settings['gltf_armature_object_remove'] is True: + if hasattr(n, "arma_exported") is False: + continue + if len([m for m in self.nodes.values() if m.keep_tag is True and m.blender_type == + VExportNode.OBJECT and m.armature == n.uuid]) == 0: + skin = gather_skin(n.uuid, self.export_settings) + skins.append(skin) + return skins + + def variants_reset_to_original(self): + # Only if Variants are displayed and exported + if bpy.context.preferences.addons['io_scene_gltf2'].preferences.KHR_materials_variants_ui is False: + return + objects = [self.nodes[o].blender_object for o in self.get_all_node_of_type(VExportNode.OBJECT) if self.nodes[o].blender_object.type == "MESH" + and self.nodes[o].blender_object.data.get('gltf2_variant_default_materials') is not None] + for obj in objects: + # loop on material slots ( primitives ) + for mat_slot_idx, s in enumerate(obj.material_slots): + # Check if there is a default material for this slot + for i in obj.data.gltf2_variant_default_materials: + if i.material_slot_index == mat_slot_idx: + s.material = i.default_material + break + + # If not found, keep current material as default + + def break_bone_hierarchy(self): + # Can be usefull when matrix is not decomposable + for arma in self.get_all_node_of_type(VExportNode.ARMATURE): + bones = self.get_all_bones(arma) + for bone in bones: + if self.nodes[bone].parent_uuid is not None and self.nodes[bone].parent_uuid != arma: + self.nodes[self.nodes[bone].parent_uuid].children.remove(bone) + self.nodes[bone].parent_uuid = arma + self.nodes[arma].children.append(bone) + + def break_obj_hierarchy(self): + # Can be usefull when matrix is not decomposable + # TODO: if we get real collection one day, we probably need to adapt this code + for obj in self.get_all_objects(): + if self.nodes[obj].armature is not None and self.nodes[obj].parent_uuid == self.nodes[obj].armature: + continue # Keep skined meshs as children of armature + if self.nodes[obj].parent_uuid is not None: + self.nodes[self.nodes[obj].parent_uuid].children.remove(obj) + self.nodes[obj].parent_uuid = None + self.roots.append(obj) + + def check_if_we_can_remove_armature(self): + # If user requested to remove armature, we need to check if it is possible + # If is impossible to remove it if armature has multiple root bones. (glTF validator error) + # Currently, we manage it at export level, not at each armature level + for arma_uuid in [n for n in self.nodes.keys() if self.nodes[n].blender_type == VExportNode.ARMATURE]: + if len(self.get_root_bones_uuid(arma_uuid)) > 1: + # We can't remove armature + self.export_settings['gltf_armature_object_remove'] = False + self.export_settings['log'].warning( + "We can't remove armature object because some armatures have multiple root bones.") + break diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_get.py b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_get.py new file mode 100755 index 00000000000..ee20c33d10a --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_get.py @@ -0,0 +1,33 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from mathutils import Vector, Matrix +from ...blender.com.gltf2_blender_conversion import texture_transform_blender_to_gltf +from ...io.com import gltf2_io_debug +from ..com.gltf2_blender_material_helpers import get_gltf_node_name, get_gltf_node_old_name +from .material import gltf2_blender_search_node_tree + + +def get_animation_target(action_group: bpy.types.ActionGroup): + return action_group.channels[0].data_path.split('.')[-1] + + +def get_object_from_datapath(blender_object, data_path: str): + if "." in data_path: + # gives us: ('modifiers["Subsurf"]', 'levels') + path_prop, path_attr = data_path.rsplit(".", 1) + + # same as: prop = obj.modifiers["Subsurf"] + if path_attr in ["rotation", "scale", "location", + "rotation_axis_angle", "rotation_euler", "rotation_quaternion"]: + prop = blender_object.path_resolve(path_prop) + else: + prop = blender_object.path_resolve(data_path) + else: + prop = blender_object + # single attribute such as name, location... etc + # path_attr = data_path + + return prop diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gltf2_exporter.py b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gltf2_exporter.py new file mode 100755 index 00000000000..1efe7b9aa15 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gltf2_exporter.py @@ -0,0 +1,554 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import re +import os +from typing import List + +from ... import get_version_string +from ...io.com import gltf2_io, gltf2_io_extensions +from ...io.com.gltf2_io_path import path_to_uri, uri_to_path +from ...io.com.gltf2_io_constants import ComponentType, DataType +from ...io.exp import gltf2_io_binary_data, gltf2_io_buffer, gltf2_io_image_data +from ...io.exp.gltf2_io_user_extensions import export_user_extensions +from .gltf2_blender_gather_accessors import gather_accessor +from .material.gltf2_blender_gather_image import get_gltf_image_from_blender_image + + +class AdditionalData: + def __init__(self): + additional_textures = [] + + +class GlTF2Exporter: + """ + The glTF exporter flattens a scene graph to a glTF serializable format. + + Any child properties are replaced with references where necessary + """ + + def __init__(self, export_settings): + self.export_settings = export_settings + self.__finalized = False + + copyright = export_settings['gltf_copyright'] or None + asset = gltf2_io.Asset( + copyright=copyright, + extensions=None, + extras=None, + generator='Khronos glTF Blender I/O v' + get_version_string(), + min_version=None, + version='2.0') + + export_user_extensions('gather_asset_hook', export_settings, asset) + + self.__gltf = gltf2_io.Gltf( + accessors=[], + animations=[], + asset=asset, + buffers=[], + buffer_views=[], + cameras=[], + extensions={}, + extensions_required=[], + extensions_used=[], + extras=None, + images=[], + materials=[], + meshes=[], + nodes=[], + samplers=[], + scene=-1, + scenes=[], + skins=[], + textures=[] + ) + + self.additional_data = AdditionalData() + + self.__buffer = gltf2_io_buffer.Buffer() + self.__images = {} + + # mapping of all glTFChildOfRootProperty types to their corresponding root level arrays + self.__childOfRootPropertyTypeLookup = { + gltf2_io.Accessor: self.__gltf.accessors, + gltf2_io.Animation: self.__gltf.animations, + gltf2_io.Buffer: self.__gltf.buffers, + gltf2_io.BufferView: self.__gltf.buffer_views, + gltf2_io.Camera: self.__gltf.cameras, + gltf2_io.Image: self.__gltf.images, + gltf2_io.Material: self.__gltf.materials, + gltf2_io.Mesh: self.__gltf.meshes, + gltf2_io.Node: self.__gltf.nodes, + gltf2_io.Sampler: self.__gltf.samplers, + gltf2_io.Scene: self.__gltf.scenes, + gltf2_io.Skin: self.__gltf.skins, + gltf2_io.Texture: self.__gltf.textures + } + + self.__propertyTypeLookup = [ + gltf2_io.AccessorSparseIndices, + gltf2_io.AccessorSparse, + gltf2_io.AccessorSparseValues, + gltf2_io.AnimationChannel, + gltf2_io.AnimationChannelTarget, + gltf2_io.AnimationSampler, + gltf2_io.Asset, + gltf2_io.CameraOrthographic, + gltf2_io.CameraPerspective, + gltf2_io.MeshPrimitive, + gltf2_io.TextureInfo, + gltf2_io.MaterialPBRMetallicRoughness, + gltf2_io.MaterialNormalTextureInfoClass, + gltf2_io.MaterialOcclusionTextureInfoClass + ] + + self.__traverse(asset) + + @property + def glTF(self): + if not self.__finalized: + raise RuntimeError("glTF requested, but buffers are not finalized yet") + return self.__gltf + + def finalize_buffer(self, output_path=None, buffer_name=None, is_glb=False): + """Finalize the glTF and write buffers.""" + if self.__finalized: + raise RuntimeError("Tried to finalize buffers for finalized glTF file") + + if self.__buffer.byte_length > 0: + if is_glb: + uri = None + elif output_path and buffer_name: + with open(output_path + uri_to_path(buffer_name), 'wb') as f: + f.write(self.__buffer.to_bytes()) + uri = buffer_name + else: + uri = self.__buffer.to_embed_string() + + buffer = gltf2_io.Buffer( + byte_length=self.__buffer.byte_length, + extensions=None, + extras=None, + name=None, + uri=uri + ) + self.__gltf.buffers.append(buffer) + + self.__finalized = True + + if is_glb: + return self.__buffer.to_bytes() + + def add_draco_extension(self): + """ + Register Draco extension as *used* and *required*. + + :return: + """ + self.__gltf.extensions_required.append('KHR_draco_mesh_compression') + self.__gltf.extensions_used.append('KHR_draco_mesh_compression') + + def finalize_images(self): + """ + Write all images. + """ + output_path = self.export_settings['gltf_texturedirectory'] + + if self.__images: + os.makedirs(output_path, exist_ok=True) + + for name, image in self.__images.items(): + dst_path = output_path + "/" + name + with open(dst_path, 'wb') as f: + f.write(image.data) + + def manage_gpu_instancing(self, node, also_mesh=False): + instances = {} + for child_idx in node.children: + child = self.__gltf.nodes[child_idx] + if child.children: + continue + if child.mesh is not None and child.mesh not in instances.keys(): + instances[child.mesh] = [] + if child.mesh is not None: + instances[child.mesh].append(child_idx) + + # For now, manage instances only if there are all children of same object + # And this instances don't have any children + instances = {k: v for k, v in instances.items() if len(v) > 1} + + holders = [] + if len(instances.keys()) == 1 and also_mesh is False: + # There is only 1 set of instances. So using the parent as instance holder + holder = node + holders = [node] + elif len(instances.keys()) > 1 or (len(instances.keys()) == 1 and also_mesh is True): + for h in range(len(instances.keys())): + # Create a new node + n = gltf2_io.Node( + camera=None, + children=[], + extensions=None, + extras=None, + matrix=None, + mesh=None, + name=node.name + "." + str(h), + rotation=None, + scale=None, + skin=None, + translation=None, + weights=None, + ) + n = self.__traverse_property(n) + idx = self.__to_reference(n) + + # Add it to original empty + node.children.append(idx) + holders.append(self.__gltf.nodes[idx]) + + for idx, inst_key in enumerate(instances.keys()): + insts = instances[inst_key] + holder = holders[idx] + + # Let's retrieve TRS of instances + translation = [] + rotation = [] + scale = [] + for inst_node_idx in insts: + inst_node = self.__gltf.nodes[inst_node_idx] + t = inst_node.translation if inst_node.translation is not None else [0, 0, 0] + r = inst_node.rotation if inst_node.rotation is not None else [0, 0, 0, 1] + s = inst_node.scale if inst_node.scale is not None else [1, 1, 1] + for i in t: + translation.append(i) + for i in r: + rotation.append(i) + for i in s: + scale.append(i) + + # Create Accessors for the extension + ext = {} + ext['attributes'] = {} + ext['attributes']['TRANSLATION'] = gather_accessor( + gltf2_io_binary_data.BinaryData.from_list(translation, ComponentType.Float), + ComponentType.Float, + len(translation) // 3, + None, + None, + DataType.Vec3, + None + ) + ext['attributes']['ROTATION'] = gather_accessor( + gltf2_io_binary_data.BinaryData.from_list(rotation, ComponentType.Float), + ComponentType.Float, + len(rotation) // 4, + None, + None, + DataType.Vec4, + None + ) + ext['attributes']['SCALE'] = gather_accessor( + gltf2_io_binary_data.BinaryData.from_list(scale, ComponentType.Float), + ComponentType.Float, + len(scale) // 3, + None, + None, + DataType.Vec3, + None + ) + + # Add extension to the Node, and traverse it + if not holder.extensions: + holder.extensions = {} + holder.extensions["EXT_mesh_gpu_instancing"] = gltf2_io_extensions.Extension( + 'EXT_mesh_gpu_instancing', ext, False) + holder.mesh = inst_key + self.__traverse(holder.extensions) + + # Remove children from original Empty + new_children = [] + for child_idx in node.children: + if child_idx not in insts: + new_children.append(child_idx) + node.children = new_children + + self.nodes_idx_to_remove.extend(insts) + + def manage_gpu_instancing_nodes(self, export_settings): + if export_settings['gltf_gpu_instances'] is True: + for scene_num in range(len(self.__gltf.scenes)): + # Modify the scene data in case of EXT_mesh_gpu_instancing export + + self.nodes_idx_to_remove = [] + for node_idx in self.__gltf.scenes[scene_num].nodes: + node = self.__gltf.nodes[node_idx] + if node.mesh is None: + self.manage_gpu_instancing(node) + else: + self.manage_gpu_instancing(node, also_mesh=True) + for child_idx in node.children: + child = self.__gltf.nodes[child_idx] + self.manage_gpu_instancing(child, also_mesh=child.mesh is not None) + + # Slides other nodes index + + self.nodes_idx_to_remove.sort() + for node_idx in self.__gltf.scenes[scene_num].nodes: + self.recursive_slide_node_idx(node_idx) + + new_node_list = [] + for node_idx in self.__gltf.scenes[scene_num].nodes: + len_ = len([i for i in self.nodes_idx_to_remove if i < node_idx]) + new_node_list.append(node_idx - len_) + self.__gltf.scenes[scene_num].nodes = new_node_list + + for skin in self.__gltf.skins: + new_joint_list = [] + for node_idx in skin.joints: + len_ = len([i for i in self.nodes_idx_to_remove if i < node_idx]) + new_joint_list.append(node_idx - len_) + skin.joints = new_joint_list + if skin.skeleton is not None: + len_ = len([i for i in self.nodes_idx_to_remove if i < skin.skeleton]) + skin.skeleton = skin.skeleton - len_ + + # Remove animation channels that was targeting a node that will be removed + new_animation_list = [] + for animation in self.__gltf.animations: + new_channel_list = [] + for channel in animation.channels: + if channel.target.node not in self.nodes_idx_to_remove: + new_channel_list.append(channel) + animation.channels = new_channel_list + if len(animation.channels) > 0: + new_animation_list.append(animation) + self.__gltf.animations = new_animation_list + + # TODO: remove unused animation accessors? + + # And now really remove nodes + self.__gltf.nodes = [node for idx, node in enumerate( + self.__gltf.nodes) if idx not in self.nodes_idx_to_remove] + + def add_scene(self, scene: gltf2_io.Scene, active: bool = False, export_settings=None): + """ + Add a scene to the glTF. + + The scene should be built up with the generated glTF classes + :param scene: gltf2_io.Scene type. Root node of the scene graph + :param active: If true, sets the glTD.scene index to the added scene + :return: nothing + """ + if self.__finalized: + raise RuntimeError("Tried to add scene to finalized glTF file") + + scene_num = self.__traverse(scene) + if active: + self.__gltf.scene = scene_num + + def recursive_slide_node_idx(self, node_idx): + node = self.__gltf.nodes[node_idx] + + new_node_children = [] + for child_idx in node.children: + len_ = len([i for i in self.nodes_idx_to_remove if i < child_idx]) + new_node_children.append(child_idx - len_) + + for child_idx in node.children: + self.recursive_slide_node_idx(child_idx) + + node.children = new_node_children + + def traverse_unused_skins(self, skins): + for s in skins: + self.__traverse(s) + + def traverse_additional_textures(self): + if self.export_settings['gltf_unused_textures'] is True: + tab = [] + for tex in self.export_settings['additional_texture_export']: + res = self.__traverse(tex) + tab.append(res) + + self.additional_data.additional_textures = tab + + def traverse_additional_images(self): + if self.export_settings['gltf_unused_images']: + for img in [img for img in bpy.data.images if img.source != "VIEWER"]: + # TODO manage full / partial / custom via hook ... + if img.name not in self.export_settings['exported_images'].keys(): + self.__traverse(get_gltf_image_from_blender_image(img.name, self.export_settings)) + + def add_animation(self, animation: gltf2_io.Animation): + """ + Add an animation to the glTF. + + :param animation: glTF animation, with python style references (names) + :return: nothing + """ + if self.__finalized: + raise RuntimeError("Tried to add animation to finalized glTF file") + + self.__traverse(animation) + + def __to_reference(self, property): + """ + Append a child of root property to its respective list and return a reference into said list. + + If the property is not child of root, the property itself is returned. + :param property: A property type object that should be converted to a reference + :return: a reference or the object itself if it is not child or root + """ + gltf_list = self.__childOfRootPropertyTypeLookup.get(type(property), None) + if gltf_list is None: + # The object is not of a child of root --> don't convert to reference + return property + + return self.__append_unique_and_get_index(gltf_list, property) + + @staticmethod + def __append_unique_and_get_index(target: list, obj): + if obj in target: + return target.index(obj) + else: + index = len(target) + target.append(obj) + return index + + def __add_image(self, image: gltf2_io_image_data.ImageData): + name = image.adjusted_name() + count = 1 + regex = re.compile(r"-\d+$") + while name + image.file_extension in self.__images.keys(): + regex_found = re.findall(regex, name) + if regex_found: + name = re.sub(regex, "-" + str(count), name) + else: + name += "-" + str(count) + + count += 1 + # TODO: allow embedding of images (base64) + + self.__images[name + image.file_extension] = image + + texture_dir = self.export_settings['gltf_texturedirectory'] + abs_path = os.path.join(texture_dir, name + image.file_extension) + rel_path = os.path.relpath( + abs_path, + start=self.export_settings['gltf_filedirectory'], + ) + return path_to_uri(rel_path) + + @classmethod + def __get_key_path(cls, d: dict, keypath: List[str], default): + """Create if necessary and get the element at key path from a dict""" + key = keypath.pop(0) + + if len(keypath) == 0: + v = d.get(key, default) + d[key] = v + return v + + d_key = d.get(key, {}) + d[key] = d_key + return cls.__get_key_path(d[key], keypath, default) + + def traverse_extensions(self): + self.__traverse(self.__gltf.extensions) + + def __traverse_property(self, node): + for member_name in [a for a in dir(node) if not a.startswith('__') and not callable(getattr(node, a))]: + new_value = self.__traverse(getattr(node, member_name)) + setattr(node, member_name, new_value) # usually this is the same as before + + # # TODO: maybe with extensions hooks we can find a more elegant solution + # if member_name == "extensions" and new_value is not None: + # for extension_name in new_value.keys(): + # self.__append_unique_and_get_index(self.__gltf.extensions_used, extension_name) + # self.__append_unique_and_get_index(self.__gltf.extensions_required, extension_name) + + if self.export_settings['gltf_trs_w_animation_pointer'] is True: + if type(node) == gltf2_io.AnimationChannelTarget: + if node.path in ["translation", "rotation", "scale", "weights"]: + if node.extensions is None: + node.extensions = {} + node.extensions["KHR_animation_pointer"] = {"pointer": "/nodes/" + str(node.node) + "/" + node.path} + node.node = None + node.path = "pointer" + self.__append_unique_and_get_index(self.__gltf.extensions_used, "KHR_animation_pointer") + + if type(node) == gltf2_io.AnimationChannelTarget: + if node.path not in ["translation", "rotation", "scale", "weights"] and node.path != "pointer": + if node.extensions is None: + node.extensions = {} + node.extensions["KHR_animation_pointer"] = {"pointer": node.path.replace("XXX", str(node.node))} + node.node = None + node.path = "pointer" + self.__append_unique_and_get_index(self.__gltf.extensions_used, "KHR_animation_pointer") + + return node + + def __traverse(self, node): + """ + Recursively traverse a scene graph consisting of gltf compatible elements. + + The tree is traversed downwards until a primitive is reached. Then any ChildOfRoot property + is stored in the according list in the glTF and replaced with a index reference in the upper level. + """ + # traverse nodes of a child of root property type and add them to the glTF root + if type(node) in self.__childOfRootPropertyTypeLookup: + node = self.__traverse_property(node) + idx = self.__to_reference(node) + # child of root properties are only present at root level --> replace with index in upper level + return idx + + # traverse lists, such as children and replace them with indices + if isinstance(node, list): + for i in range(len(node)): + node[i] = self.__traverse(node[i]) + return node + + if isinstance(node, dict): + for key in node.keys(): + node[key] = self.__traverse(node[key]) + return node + + # traverse into any other property + if type(node) in self.__propertyTypeLookup: + return self.__traverse_property(node) + + # binary data needs to be moved to a buffer and referenced with a buffer view + if isinstance(node, gltf2_io_binary_data.BinaryData): + buffer_view = self.__buffer.add_and_get_view(node) + return self.__to_reference(buffer_view) + + # image data needs to be saved to file + if isinstance(node, gltf2_io_image_data.ImageData): + image = self.__add_image(node) + return image + + # extensions + # I don't know why, but after reloading script, this condition failed + # So using name comparison, instead of isinstance + # if isinstance(node, gltf2_io_extensions.Extension): + if isinstance(node, gltf2_io_extensions.Extension) \ + or (node and hasattr(type(node), "extension")): + extension = self.__traverse(node.extension) + self.__append_unique_and_get_index(self.__gltf.extensions_used, node.name) + if node.required: + self.__append_unique_and_get_index(self.__gltf.extensions_required, node.name) + + # extensions that lie in the root of the glTF. + # They need to be converted to a reference at place of occurrence + if isinstance(node, gltf2_io_extensions.ChildOfRootExtension): + root_extension_list = self.__get_key_path(self.__gltf.extensions, [node.name] + node.path, []) + idx = self.__append_unique_and_get_index(root_extension_list, extension) + return idx + + return extension + + # do nothing for any type that does not match a glTF schema (primitives) + return node diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_anisotropy.py b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_anisotropy.py new file mode 100644 index 00000000000..4ca9419e0a9 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_anisotropy.py @@ -0,0 +1,260 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from .gltf2_blender_image import TmpImageGuard, make_temp_image_copy, StoreImage, StoreData +import numpy as np +from .....io.com.gltf2_io_extensions import Extension +from ....com.gltf2_blender_conversion import get_anisotropy_rotation_blender_to_gltf +from ...material import gltf2_blender_gather_texture_info +from ..gltf2_blender_search_node_tree import detect_anisotropy_nodes, get_socket, has_image_node_from_socket, get_factor_from_socket + + +def export_anisotropy(blender_material, export_settings): + + anisotropy_extension = {} + uvmap_infos = {} + udim_infos = {} + + anisotropy_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, 'Anisotropic') + anisotropic_rotation_socket = get_socket( + blender_material.node_tree, + blender_material.use_nodes, + 'Anisotropic Rotation') + anisotropy_tangent_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, 'Tangent') + + if anisotropy_socket.socket is None or anisotropic_rotation_socket.socket is None or anisotropy_tangent_socket.socket is None: + return None, {}, {} + + if anisotropy_socket.socket.is_linked is False and anisotropic_rotation_socket.socket.is_linked is False: + # We don't need the complex node setup, just export the value + anisotropyStrength = anisotropy_socket.socket.default_value + if anisotropyStrength != 0.0: + anisotropy_extension['anisotropyStrength'] = anisotropyStrength + + # Storing path for KHR_animation_pointer + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_anisotropy/anisotropyStrength" + export_settings['current_paths']["node_tree." + anisotropy_socket.socket.path_from_id() + + ".default_value"] = path_ + + anisotropyRotation = get_anisotropy_rotation_blender_to_gltf(anisotropic_rotation_socket.socket.default_value) + if anisotropyRotation != 0.0: + anisotropy_extension['anisotropyRotation'] = anisotropyRotation + + # Storing path for KHR_animation_pointer + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_anisotropy/anisotropyRotation" + export_settings['current_paths']["node_tree." + + anisotropic_rotation_socket.socket.path_from_id() + + ".default_value"] = path_ + + # Always export extension, even if no value, in case of animation + # If data are not animated, it will be removed later + + return Extension('KHR_materials_anisotropy', anisotropy_extension, False), uvmap_infos, udim_infos + + # Get complex node setup + + is_anisotropy, anisotropy_data = detect_anisotropy_nodes( + anisotropy_socket, + anisotropic_rotation_socket, + anisotropy_tangent_socket, + export_settings + ) + + if not is_anisotropy: + # Trying to export from grayscale textures + anisotropy_texture, uvmap_info = export_anisotropy_from_grayscale_textures(blender_material, export_settings) + if anisotropy_texture is None: + return None, {}, {} + + fac, path = get_factor_from_socket(anisotropy_socket, kind='VALUE') + if fac is None and anisotropy_texture is not None: + anisotropy_extension['anisotropyStrength'] = 1.0 + elif fac != 0.0 and anisotropy_texture is not None: + anisotropy_extension['anisotropyStrength'] = fac + + # Storing path for KHR_animation_pointer + if path is not None: + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_anisotropy/anisotropyStrength" + export_settings['current_paths'][path] = path_ + + fac, path = get_factor_from_socket(anisotropic_rotation_socket, kind='VALUE') + if fac is None and anisotropy_texture is not None: + pass # Rotation 0 is default + elif fac != 0.0 and anisotropy_texture is not None: + anisotropy_extension['anisotropyRotation'] = get_anisotropy_rotation_blender_to_gltf(fac) + + # Storing path for KHR_animation_pointer + if path is not None: + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_anisotropy/anisotropyRotation" + export_settings['current_paths'][path] = path_ + + anisotropy_extension['anisotropyTexture'] = anisotropy_texture + uvmap_infos.update({'anisotropyTexture': uvmap_info}) + + return Extension('KHR_materials_anisotropy', anisotropy_extension, False), uvmap_infos, udim_infos + + # Export from complex node setup + + if anisotropy_data['anisotropyStrength'][0] != 0.0: + anisotropy_extension['anisotropyStrength'] = anisotropy_data['anisotropyStrength'][0] + if anisotropy_data['anisotropyRotation'][0] != 0.0: + anisotropy_extension['anisotropyRotation'] = anisotropy_data['anisotropyRotation'][0] + + # Storing path for KHR_animation_pointer + if anisotropy_data['anisotropyStrength'][1] is not None: + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_anisotropy/anisotropyStrength" + export_settings['current_paths'][anisotropy_data['anisotropyStrength'][1]] = path_ + + # Storing path for KHR_animation_pointer + if anisotropy_data['anisotropyRotation'][1] is not None: + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_anisotropy/anisotropyRotation" + export_settings['current_paths'][anisotropy_data['anisotropyRotation'][1]] = path_ + + # Get texture data + # No need to check here that we have a texture, this check is already done insode detect_anisotropy_nodes + anisotropy_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info( + anisotropy_data['tex_socket'], + (anisotropy_data['tex_socket'],), + export_settings, + ) + anisotropy_extension['anisotropyTexture'] = anisotropy_texture + uvmap_infos.update({'anisotropyTexture': uvmap_info}) + udim_infos.update({'anisotropyTexture': udim_info} if len(udim_info.keys()) > 0 else {}) + + if len(export_settings['current_texture_transform']) != 0: + for k in export_settings['current_texture_transform'].keys(): + path_ = {} + path_['length'] = export_settings['current_texture_transform'][k]['length'] + path_['path'] = export_settings['current_texture_transform'][k]['path'].replace( + "YYY", "extensions/KHR_materials_anisotropy/anisotropyTexture/extensions") + path_['vector_type'] = export_settings['current_texture_transform'][k]['vector_type'] + export_settings['current_paths'][k] = path_ + + export_settings['current_texture_transform'] = {} + + return Extension('KHR_materials_anisotropy', anisotropy_extension, False), uvmap_infos, udim_infos + + +def export_anisotropy_from_grayscale_textures(blender_material, export_settings): + # There will be a texture, with a complex calculation (no direct channel mapping) + + anisotropy_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, 'Anisotropic') + anisotropic_rotation_socket = get_socket( + blender_material.node_tree, + blender_material.use_nodes, + 'Anisotropic Rotation') + anisotropy_tangent_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, 'Tangent') + + sockets = (anisotropy_socket, anisotropic_rotation_socket, anisotropy_tangent_socket) + + # Set primary socket having a texture + primary_socket = anisotropy_socket + if not has_image_node_from_socket(primary_socket, export_settings): + primary_socket = anisotropic_rotation_socket + + anisotropyTexture, uvmap_info, _, _ = gltf2_blender_gather_texture_info.gather_texture_info( + primary_socket, + sockets, + export_settings, + filter_type='ANY') + + if anisotropyTexture is None: + return None, {} + + if len(export_settings['current_texture_transform']) != 0: + for k in export_settings['current_texture_transform'].keys(): + path_ = {} + path_['length'] = export_settings['current_texture_transform'][k]['length'] + path_['path'] = export_settings['current_texture_transform'][k]['path'].replace( + "YYY", "extensions/KHR_materials_anisotropy/anisotropyTexture/extensions") + path_['vector_type'] = export_settings['current_texture_transform'][k]['vector_type'] + export_settings['current_paths'][k] = path_ + + export_settings['current_texture_transform'] = {} + + return anisotropyTexture, uvmap_info + + +def grayscale_anisotropy_calculation(stored, export_settings): + + # Find all Blender images used + images = [] + for fill in stored.values(): + if isinstance(fill, StoreImage): + if fill.image not in images: + images.append(fill.image) + + if not images: + # No ImageFills; use a 1x1 white pixel + pixels = np.array([1.0, 1.0, 1.0, 1.0], np.float32) + return pixels, 1, 1 + + width = max(image.size[0] for image in images) + height = max(image.size[1] for image in images) + + buffers = {} + + def rgb2gray(rgb): + r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] + gray = 0.2989 * r + 0.5870 * g + 0.1140 * b + return gray + + for identifier, image in [ + (ident, store.image) for ( + ident, store) in stored.items() if isinstance( + store, StoreImage)]: + tmp_buf = np.empty(width * height * 4, np.float32) + + if image.size[0] == width and image.size[1] == height: + image.pixels.foreach_get(tmp_buf) + else: + # Image is the wrong size; make a temp copy and scale it. + with TmpImageGuard() as guard: + make_temp_image_copy(guard, src_image=image) + tmp_image = guard.image + tmp_image.scale(width, height) + tmp_image.pixels.foreach_get(tmp_buf) + + buffers[identifier] = np.reshape(tmp_buf, [width, height, 4]) + buffers[identifier] = rgb2gray(buffers[identifier]) + + for identifier, data in [(ident, data) for (ident, data) in stored.items() if isinstance(data, StoreData)]: + buffers[identifier] = np.full((width, height), 1) # Set to white / 1.0, as value is set as factor + + # Combine the image + out_buf = np.zeros((width, height, 4), np.float32) + out_buf[:, :, 3] = 1.0 # A : Alpha + out_buf[:, :, 2] = buffers['anisotropy'] # B : Strength (Anisotropic socket) + + # Rotation needs to be converted from 0-1 to 0-2pi, and then vectorized it, normalized, and apply to R & G channels + # with mapping + + buffers['anisotropic_rotation'] = buffers['anisotropic_rotation'] * 2 * np.pi + buffers['anisotropic_rotation'] = np.stack( + (np.cos( + buffers['anisotropic_rotation']), np.sin( + buffers['anisotropic_rotation'])), axis=-1) + buffers['anisotropic_rotation'] = buffers['anisotropic_rotation'] / \ + np.linalg.norm(buffers['anisotropic_rotation'], axis=-1, keepdims=True) + buffers['anisotropic_rotation'] = (buffers['anisotropic_rotation'] + 1.0) / 2.0 + + out_buf[:, :, 0] = buffers['anisotropic_rotation'][:, :, 0] # R : Rotation X + out_buf[:, :, 1] = buffers['anisotropic_rotation'][:, :, 1] # G : Rotation Y + + out_buf = np.reshape(out_buf, (width * height * 4)) + + return np.float32(out_buf), width, height, None diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_clearcoat.py b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_clearcoat.py new file mode 100644 index 00000000000..d9c3f8bfda2 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_clearcoat.py @@ -0,0 +1,156 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from .....io.com.gltf2_io_constants import BLENDER_COAT_ROUGHNESS +from .....io.com.gltf2_io_extensions import Extension +from ...material import gltf2_blender_gather_texture_info + +from ..gltf2_blender_search_node_tree import has_image_node_from_socket, get_socket, get_factor_from_socket + + +def export_clearcoat(blender_material, export_settings): + has_clearcoat_texture = False + has_clearcoat_roughness_texture = False + + clearcoat_extension = {} + clearcoat_roughness_slots = () + + clearcoat_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, 'Coat Weight') + clearcoat_roughness_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, 'Coat Roughness') + clearcoat_normal_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, 'Coat Normal') + + if clearcoat_socket.socket is not None and isinstance( + clearcoat_socket.socket, + bpy.types.NodeSocket) and not clearcoat_socket.socket.is_linked: + if clearcoat_socket.socket.default_value != 0.0: + clearcoat_extension['clearcoatFactor'] = clearcoat_socket.socket.default_value + + # Storing path for KHR_animation_pointer + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_clearcoat/clearcoatFactor" + export_settings['current_paths']["node_tree." + clearcoat_socket.socket.path_from_id() + + ".default_value"] = path_ + + elif has_image_node_from_socket(clearcoat_socket, export_settings): + fac, path = get_factor_from_socket(clearcoat_socket, kind='VALUE') + # default value in glTF is 0.0, but if there is a texture without factor, use 1 + clearcoat_extension['clearcoatFactor'] = fac if fac is not None else 1.0 + has_clearcoat_texture = True + + # Storing path for KHR_animation_pointer + if path is not None: + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_clearcoat/clearcoatFactor" + export_settings['current_paths'][path] = path_, {} + + if clearcoat_roughness_socket.socket is not None and isinstance( + clearcoat_roughness_socket.socket, + bpy.types.NodeSocket) and not clearcoat_roughness_socket.socket.is_linked: + if abs(clearcoat_roughness_socket.socket.default_value - + BLENDER_COAT_ROUGHNESS) > 1e-5 or (abs(clearcoat_roughness_socket.socket.default_value - + BLENDER_COAT_ROUGHNESS) > 1e-5 and 'clearcoatFactor' in clearcoat_extension): + clearcoat_extension['clearcoatRoughnessFactor'] = clearcoat_roughness_socket.socket.default_value + + # Storing path for KHR_animation_pointer + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_clearcoat/clearcoatRoughnessFactor " + export_settings['current_paths']["node_tree." + + clearcoat_roughness_socket.socket.path_from_id() + + ".default_value"] = path_ + + elif has_image_node_from_socket(clearcoat_roughness_socket, export_settings): + fac, path = get_factor_from_socket(clearcoat_roughness_socket, kind='VALUE') + # default value in glTF is 0.0, but if there is a texture without factor, use 1 + clearcoat_extension['clearcoatRoughnessFactor'] = fac if fac is not None else 1.0 + has_clearcoat_roughness_texture = True + + # Storing path for KHR_animation_pointer + if path is not None: + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_clearcoat/clearcoatRoughnessFactor" + export_settings['current_paths'][path] = path_ + + # Pack clearcoat (R) and clearcoatRoughness (G) channels. + if has_clearcoat_texture and has_clearcoat_roughness_texture: + clearcoat_roughness_slots = (clearcoat_socket, clearcoat_roughness_socket,) + elif has_clearcoat_texture: + clearcoat_roughness_slots = (clearcoat_socket,) + elif has_clearcoat_roughness_texture: + clearcoat_roughness_slots = (clearcoat_roughness_socket,) + + uvmap_infos = {} + udim_infos = {} + + if len(clearcoat_roughness_slots) > 0: + if has_clearcoat_texture: + clearcoat_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info( + clearcoat_socket, + clearcoat_roughness_slots, + export_settings, + ) + clearcoat_extension['clearcoatTexture'] = clearcoat_texture + uvmap_infos.update({'clearcoatTexture': uvmap_info}) + udim_infos.update({'clearcoatTexture': udim_info} if len(udim_info.keys()) > 0 else {}) + + if len(export_settings['current_texture_transform']) != 0: + for k in export_settings['current_texture_transform'].keys(): + path_ = {} + path_['length'] = export_settings['current_texture_transform'][k]['length'] + path_['path'] = export_settings['current_texture_transform'][k]['path'].replace( + "YYY", "extensions/KHR_materials_clearcoat/clearcoatTexture/extensions") + path_['vector_type'] = export_settings['current_texture_transform'][k]['vector_type'] + export_settings['current_paths'][k] = path_ + + export_settings['current_texture_transform'] = {} + + if has_clearcoat_roughness_texture: + clearcoat_roughness_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info( + clearcoat_roughness_socket, clearcoat_roughness_slots, export_settings, ) + clearcoat_extension['clearcoatRoughnessTexture'] = clearcoat_roughness_texture + uvmap_infos.update({'clearcoatRoughnessTexture': uvmap_info}) + udim_infos.update({'clearcoatRoughnessTexture': udim_info} if len(udim_info.keys()) > 0 else {}) + + if len(export_settings['current_texture_transform']) != 0: + for k in export_settings['current_texture_transform'].keys(): + path_ = {} + path_['length'] = export_settings['current_texture_transform'][k]['length'] + path_['path'] = export_settings['current_texture_transform'][k]['path'].replace( + "YYY", "extensions/KHR_materials_clearcoat/clearcoatRoughnessTexture/extensions") + path_['vector_type'] = export_settings['current_texture_transform'][k]['vector_type'] + export_settings['current_paths'][k] = path_ + + export_settings['current_texture_transform'] = {} + + if has_image_node_from_socket(clearcoat_normal_socket, export_settings): + clearcoat_normal_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_material_normal_texture_info_class( + clearcoat_normal_socket, (clearcoat_normal_socket,), export_settings) + clearcoat_extension['clearcoatNormalTexture'] = clearcoat_normal_texture + uvmap_infos.update({'clearcoatNormalTexture': uvmap_info}) + udim_infos.update({'clearcoatNormalTexture': udim_info} if len(udim_info.keys()) > 0 else {}) + + if len(export_settings['current_texture_transform']) != 0: + for k in export_settings['current_texture_transform'].keys(): + path_ = {} + path_['length'] = export_settings['current_texture_transform'][k]['length'] + path_['path'] = export_settings['current_texture_transform'][k]['path'].replace( + "YYY", "extensions/KHR_materials_clearcoat/clearcoatRoughnessTexture/extensions") + path_['vector_type'] = export_settings['current_texture_transform'][k]['vector_type'] + export_settings['current_paths'][k] = path_ + + if len(export_settings['current_normal_scale']) != 0: + for k in export_settings['current_normal_scale'].keys(): + path_ = {} + path_['length'] = export_settings['current_normal_scale'][k]['length'] + path_['path'] = export_settings['current_normal_scale'][k]['path'].replace( + "YYY", "extensions/KHR_materials_clearcoat/clearcoatNormalTexture") + export_settings['current_paths'][k] = path_ + + export_settings['current_normal_scale'] = {} + + return Extension('KHR_materials_clearcoat', clearcoat_extension, False), uvmap_infos, udim_infos diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_emission.py b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_emission.py new file mode 100644 index 00000000000..973f7882605 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_emission.py @@ -0,0 +1,115 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from .....io.com.gltf2_io_extensions import Extension +from ...material import gltf2_blender_gather_texture_info +from ..gltf2_blender_search_node_tree import \ + get_const_from_default_value_socket, \ + get_socket, \ + get_factor_from_socket, \ + get_const_from_socket, \ + NodeSocket, \ + get_socket_from_gltf_material_node + + +def export_emission_factor(blender_material, export_settings): + emissive_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, "Emissive") + if emissive_socket.socket is None: + emissive_socket = get_socket_from_gltf_material_node( + blender_material.node_tree, blender_material.use_nodes, "EmissiveFactor") + if emissive_socket is not None and isinstance(emissive_socket.socket, bpy.types.NodeSocket): + if export_settings['gltf_image_format'] != "NONE": + factor, path = get_factor_from_socket(emissive_socket, kind='RGB') + else: + factor, path = get_const_from_default_value_socket(emissive_socket, kind='RGB') + + if factor is None and emissive_socket.socket.is_linked: + # In glTF, the default emissiveFactor is all zeros, so if an emission texture is connected, + # we have to manually set it to all ones. + factor = [1.0, 1.0, 1.0] + + if factor is None: + factor = [0.0, 0.0, 0.0] + + # Handle Emission Strength + strength_socket = None + strength_path = None + if emissive_socket.socket.node.type == 'EMISSION': + strength_socket = emissive_socket.socket.node.inputs['Strength'] + elif 'Emission Strength' in emissive_socket.socket.node.inputs: + strength_socket = emissive_socket.socket.node.inputs['Emission Strength'] + if strength_socket is not None and isinstance(strength_socket, bpy.types.NodeSocket): + strength, strength_path = get_factor_from_socket(NodeSocket( + strength_socket, emissive_socket.group_path), kind='VALUE') + strength = ( + strength + if strength_socket is not None + else None + ) + if strength is not None: + factor = [f * strength for f in factor] + + # Clamp to range [0,1] + # Official glTF clamp to range [0,1] + # If we are outside, we need to use extension KHR_materials_emissive_strength + + if factor == [0, 0, 0]: + factor = None + + # Storing path for KHR_animation_pointer + if path is not None: + path_ = {} + path_['length'] = 3 + path_['path'] = "/materials/XXX/emissiveFactor" + path_['strength_channel'] = strength_path + export_settings['current_paths'][path] = path_ + + # Storing path for KHR_animation_pointer, for emissiveStrength (if needed) + if strength_path is not None: + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_emissive_strength/emissiveStrength" + path_['factor_channel'] = path + export_settings['current_paths'][strength_path] = path_ + + return factor + + return None + + +def export_emission_texture(blender_material, export_settings): + emissive = get_socket(blender_material.node_tree, blender_material.use_nodes, "Emissive") + if emissive.socket is None: + emissive = get_socket_from_gltf_material_node( + blender_material.node_tree, blender_material.use_nodes, "Emissive") + emissive_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info( + emissive, (emissive,), export_settings) + + if len(export_settings['current_texture_transform']) != 0: + for k in export_settings['current_texture_transform'].keys(): + path_ = {} + path_['length'] = export_settings['current_texture_transform'][k]['length'] + path_['path'] = export_settings['current_texture_transform'][k]['path'].replace( + "YYY", "emissiveTexture/extensions") + path_['vector_type'] = export_settings['current_texture_transform'][k]['vector_type'] + export_settings['current_paths'][k] = path_ + + export_settings['current_texture_transform'] = {} + + return emissive_texture, { + 'emissiveTexture': uvmap_info}, { + 'emissiveTexture': udim_info} if len( + udim_info.keys()) > 0 else {} + + +def export_emission_strength_extension(emissive_factor, export_settings): + # Always export the extension if the emissive factor + # If the emissive factor is animated, we need to export the extension, even if the initial value is < 1.0 + # We will check if the strength is animated and this extension is needed at end of the export + emissive_strength_extension = {} + if any([i > 1.0 for i in emissive_factor or []]): + emissive_strength_extension['emissiveStrength'] = max(emissive_factor) + + return Extension('KHR_materials_emissive_strength', emissive_strength_extension, False) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_ior.py b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_ior.py new file mode 100644 index 00000000000..d3817b9d9ff --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_ior.py @@ -0,0 +1,45 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from .....io.com.gltf2_io_extensions import Extension +from .....io.com.gltf2_io_constants import GLTF_IOR +from ..gltf2_blender_search_node_tree import get_socket + + +def export_ior(blender_material, extensions, export_settings): + ior_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, 'IOR') + + if not ior_socket.socket: + return None + + # We don't manage case where socket is linked, always check default value + if ior_socket.socket.is_linked: + # TODOExt: add warning? + return None + + # Exporting IOR even if it is the default value + # It will be removed by the exporter if it is not animated + # (In case the first key is the default value, we need to keep the extension) + + # Export only if the following extensions are exported: + need_to_export_ior = [ + 'KHR_materials_transmission', + 'KHR_materials_volume', + 'KHR_materials_specular' + ] + + if not any([e in extensions.keys() for e in need_to_export_ior]): + return None + + ior_extension = {} + if ior_socket.socket.default_value != GLTF_IOR: + ior_extension['ior'] = ior_socket.socket.default_value + + # Storing path for KHR_animation_pointer + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_ior/ior" + export_settings['current_paths']["node_tree." + ior_socket.socket.path_from_id() + ".default_value"] = path_ + + return Extension('KHR_materials_ior', ior_extension, False) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_sheen.py b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_sheen.py new file mode 100644 index 00000000000..b9f696bbc35 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_sheen.py @@ -0,0 +1,129 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from .....io.com.gltf2_io_extensions import Extension +from ...material import gltf2_blender_gather_texture_info +from ..gltf2_blender_search_node_tree import \ + has_image_node_from_socket, \ + get_socket, \ + get_factor_from_socket + + +def export_sheen(blender_material, export_settings): + sheen_extension = {} + + sheenTint_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, "Sheen Tint") + sheenRoughness_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, "Sheen Roughness") + sheen_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, "Sheen Weight") + + if sheenTint_socket.socket is None or sheenRoughness_socket.socket is None or sheen_socket.socket is None: + return None, {}, {} + + if sheen_socket.socket.is_linked is False and sheen_socket.socket.default_value == 0.0: + return None, {}, {} + + uvmap_infos = {} + udim_infos = {} + + # TODOExt : What to do if sheen_socket is linked? or is not between 0 and 1? + + sheenTint_non_linked = sheenTint_socket.socket is not None and isinstance( + sheenTint_socket.socket, bpy.types.NodeSocket) and not sheenTint_socket.socket.is_linked + sheenRoughness_non_linked = sheenRoughness_socket.socket is not None and isinstance( + sheenRoughness_socket.socket, bpy.types.NodeSocket) and not sheenRoughness_socket.socket.is_linked + + if sheenTint_non_linked is True: + color = sheenTint_socket.socket.default_value[:3] + if color != (0.0, 0.0, 0.0): + sheen_extension['sheenColorFactor'] = color + + # Storing path for KHR_animation_pointer + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_sheen/sheenColorFactor" + export_settings['current_paths']["node_tree." + sheenTint_socket.socket.path_from_id() + + ".default_value"] = path_ + + else: + # Factor + fac, path = get_factor_from_socket(sheenTint_socket, kind='RGB') + if fac is None: + fac = [1.0, 1.0, 1.0] # Default is 0.0/0.0/0.0, so we need to set it to 1 if no factor + if fac is not None and fac != [0.0, 0.0, 0.0]: + sheen_extension['sheenColorFactor'] = fac + + # Storing path for KHR_animation_pointer + if path is not None: + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_sheen/sheenColorFactor" + export_settings['current_paths'][path] = path_ + + # Texture + if has_image_node_from_socket(sheenTint_socket, export_settings): + original_sheenColor_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info( + sheenTint_socket, (sheenTint_socket,), export_settings, ) + sheen_extension['sheenColorTexture'] = original_sheenColor_texture + uvmap_infos.update({'sheenColorTexture': uvmap_info}) + udim_infos.update({'sheenColorTexture': udim_info} if len(udim_info) > 0 else {}) + + if len(export_settings['current_texture_transform']) != 0: + for k in export_settings['current_texture_transform'].keys(): + path_ = {} + path_['length'] = export_settings['current_texture_transform'][k]['length'] + path_['path'] = export_settings['current_texture_transform'][k]['path'].replace( + "YYY", "extensions/KHR_materials_sheen/sheenColorTexture/extensions") + path_['vector_type'] = export_settings['current_texture_transform'][k]['vector_type'] + export_settings['current_paths'][k] = path_ + + export_settings['current_texture_transform'] = {} + + if sheenRoughness_non_linked is True: + fac = sheenRoughness_socket.socket.default_value + if fac != 0.0: + sheen_extension['sheenRoughnessFactor'] = fac + + # Storing path for KHR_animation_pointer + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_sheen/sheenRoughnessFactor" + export_settings['current_paths']["node_tree." + sheenRoughness_socket.socket.path_from_id() + + ".default_value"] = path_ + + else: + # Factor + fac, path = get_factor_from_socket(sheenRoughness_socket, kind='VALUE') + if fac is None: + fac = 1.0 # Default is 0.0 so we need to set it to 1.0 if no factor + if fac is not None and fac != 0.0: + sheen_extension['sheenRoughnessFactor'] = fac + + # Storing path for KHR_animation_pointer + if path is not None: + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_sheen/sheenRoughnessFactor" + export_settings['current_paths'][path] = path_ + + # Texture + if has_image_node_from_socket(sheenRoughness_socket, export_settings): + original_sheenRoughness_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info( + sheenRoughness_socket, (sheenRoughness_socket,), export_settings, ) + sheen_extension['sheenRoughnessTexture'] = original_sheenRoughness_texture + uvmap_infos.update({'sheenRoughnessTexture': uvmap_info}) + udim_infos.update({'sheenRoughnessTexture': udim_info} if len(udim_info) > 0 else {}) + + if len(export_settings['current_texture_transform']) != 0: + for k in export_settings['current_texture_transform'].keys(): + path_ = {} + path_['length'] = export_settings['current_texture_transform'][k]['length'] + path_['path'] = export_settings['current_texture_transform'][k]['path'].replace( + "YYY", "extensions/KHR_materials_sheen/sheenRoughnessTexture/extensions") + path_['vector_type'] = export_settings['current_texture_transform'][k]['vector_type'] + export_settings['current_paths'][k] = path_ + + export_settings['current_texture_transform'] = {} + + return Extension('KHR_materials_sheen', sheen_extension, False), uvmap_infos, udim_infos diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_specular.py b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_specular.py new file mode 100644 index 00000000000..ce17395a008 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_specular.py @@ -0,0 +1,140 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from .....io.com.gltf2_io_extensions import Extension +from ...material.gltf2_blender_gather_texture_info import gather_texture_info +from ..gltf2_blender_search_node_tree import \ + has_image_node_from_socket, \ + get_socket, \ + get_factor_from_socket + + +def export_specular(blender_material, export_settings): + specular_extension = {} + + specular_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, 'Specular IOR Level') + speculartint_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, 'Specular Tint') + + if specular_socket.socket is None or speculartint_socket.socket is None: + return None, {}, {} + + uvmap_infos = {} + udim_infos = {} + + specular_non_linked = specular_socket.socket is not None and isinstance( + specular_socket.socket, bpy.types.NodeSocket) and not specular_socket.socket.is_linked + specularcolor_non_linked = speculartint_socket.socket is not None and isinstance( + speculartint_socket.socket, bpy.types.NodeSocket) and not speculartint_socket.socket.is_linked + + if specular_non_linked is True: + fac = specular_socket.socket.default_value + fac = fac * 2.0 + if fac < 1.0: + specular_extension['specularFactor'] = fac + elif fac > 1.0: + # glTF specularFactor should be <= 1.0, so we will multiply ColorFactory + # by specularFactor, and set SpecularFactor to 1.0 (default value) + pass + else: + pass # If fac == 1.0, no need to export specularFactor, the default value is 1.0 + + # Storing path for KHR_animation_pointer + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_specular/specularFactor" + export_settings['current_paths']["node_tree." + + specular_socket.socket.path_from_id() + ".default_value"] = path_ + + else: + # Factor + fac, path = get_factor_from_socket(specular_socket, kind='VALUE') + if fac is not None and fac != 1.0: + fac = fac * 2.0 if fac is not None else None + if fac is not None and fac < 1.0: + specular_extension['specularFactor'] = fac + elif fac is not None and fac > 1.0: + # glTF specularFactor should be <= 1.0, so we will multiply ColorFactory + # by specularFactor, and set SpecularFactor to 1.0 (default value) + pass + + if path is not None: + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_specular/specularFactor" + export_settings['current_paths'][path] = path_ + + # Texture + if has_image_node_from_socket(specular_socket, export_settings): + specular_texture, uvmap_info, udim_info, _ = gather_texture_info( + specular_socket, + (specular_socket,), + export_settings, + ) + specular_extension['specularTexture'] = specular_texture + uvmap_infos.update({'specularTexture': uvmap_info}) + udim_infos.update({'specularTexture': udim_info} if len(udim_info) > 0 else {}) + + if len(export_settings['current_texture_transform']) != 0: + for k in export_settings['current_texture_transform'].keys(): + path_ = {} + path_['length'] = export_settings['current_texture_transform'][k]['length'] + path_['path'] = export_settings['current_texture_transform'][k]['path'].replace( + "YYY", "extensions/KHR_materials_specular/specularTexture/extensions") + path_['vector_type'] = export_settings['current_texture_transform'][k]['vector_type'] + export_settings['current_paths'][k] = path_ + + export_settings['current_texture_transform'] = {} + + if specularcolor_non_linked is True: + color = speculartint_socket.socket.default_value[:3] + if fac is not None and fac > 1.0: + color = (color[0] * fac, color[1] * fac, color[2] * fac) + if color != (1.0, 1.0, 1.0): + specular_extension['specularColorFactor'] = color + + # Storing path for KHR_animation_pointer + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_specular/specularColorFactor" + export_settings['current_paths']["node_tree." + speculartint_socket.socket.path_from_id() + + ".default_value"] = path_ + else: + # Factor + fac_color, path = get_factor_from_socket(speculartint_socket, kind='RGB') + if fac_color is not None and fac is not None and fac > 1.0: + fac_color = (fac_color[0] * fac, fac_color[1] * fac, fac_color[2] * fac) + elif fac_color is None and fac is not None and fac > 1.0: + fac_color = (fac, fac, fac) + if fac_color != (1.0, 1.0, 1.0): + specular_extension['specularColorFactor'] = fac_color + + if path is not None: + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_specular/specularColorFactor" + export_settings['current_paths'][path] = path_ + + # Texture + if has_image_node_from_socket(speculartint_socket, export_settings): + specularcolor_texture, uvmap_info, udim_info, _ = gather_texture_info( + speculartint_socket, + (speculartint_socket,), + export_settings, + ) + specular_extension['specularColorTexture'] = specularcolor_texture + uvmap_infos.update({'specularColorTexture': uvmap_info}) + + if len(export_settings['current_texture_transform']) != 0: + for k in export_settings['current_texture_transform'].keys(): + path_ = {} + path_['length'] = export_settings['current_texture_transform'][k]['length'] + path_['path'] = export_settings['current_texture_transform'][k]['path'].replace( + "YYY", "extensions/KHR_materials_specular/specularColorTexture/extensions") + path_['vector_type'] = export_settings['current_texture_transform'][k]['vector_type'] + export_settings['current_paths'][k] = path_ + + export_settings['current_texture_transform'] = {} + + return Extension('KHR_materials_specular', specular_extension, False), uvmap_infos, udim_infos diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_transmission.py b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_transmission.py new file mode 100644 index 00000000000..ad35ff5ea9c --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_transmission.py @@ -0,0 +1,76 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from .....io.com.gltf2_io_extensions import Extension +from ...material import gltf2_blender_gather_texture_info +from ..gltf2_blender_search_node_tree import \ + has_image_node_from_socket, \ + get_socket, \ + get_factor_from_socket + + +def export_transmission(blender_material, export_settings): + has_transmission_texture = False + + transmission_extension = {} + transmission_slots = () + + transmission_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, 'Transmission Weight') + + if transmission_socket.socket is not None and isinstance( + transmission_socket.socket, + bpy.types.NodeSocket) and not transmission_socket.socket.is_linked: + if transmission_socket.socket.default_value != 0.0: + transmission_extension['transmissionFactor'] = transmission_socket.socket.default_value + + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_transmission/transmissionFactor" + export_settings['current_paths']["node_tree." + transmission_socket.socket.path_from_id() + + ".default_value"] = path_ + + elif has_image_node_from_socket(transmission_socket, export_settings): + fac, path = get_factor_from_socket(transmission_socket, kind='VALUE') + transmission_extension['transmissionFactor'] = fac if fac is not None else 1.0 + has_transmission_texture = True + + # Storing path for KHR_animation_pointer + if path is not None: + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_transmission/transmissionFactor" + export_settings['current_paths'][path] = path_ + + uvmap_info = {} + udim_info = {} + + # Pack transmission channel (R). + if has_transmission_texture: + transmission_slots = (transmission_socket,) + + if len(transmission_slots) > 0: + combined_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info( + transmission_socket, + transmission_slots, + export_settings, + ) + if has_transmission_texture: + transmission_extension['transmissionTexture'] = combined_texture + + if len(export_settings['current_texture_transform']) != 0: + for k in export_settings['current_texture_transform'].keys(): + path_ = {} + path_['length'] = export_settings['current_texture_transform'][k]['length'] + path_['path'] = export_settings['current_texture_transform'][k]['path'].replace( + "YYY", "extensions/KHR_materials_transmission/transmissionTexture/extensions") + path_['vector_type'] = export_settings['current_texture_transform'][k]['vector_type'] + export_settings['current_paths'][k] = path_ + + export_settings['current_texture_transform'] = {} + + return Extension( + 'KHR_materials_transmission', transmission_extension, False), { + 'transmissionTexture': uvmap_info}, { + 'transmissionTexture': udim_info} if len(udim_info) > 0 else {} diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_variants.py b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_variants.py new file mode 100644 index 00000000000..b15453a15c7 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_variants.py @@ -0,0 +1,19 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from typing import Dict, Any +from .....io.com import gltf2_io_variants +from ...gltf2_blender_gather_cache import cached + + +@cached +def gather_variant(variant_idx, export_settings) -> Dict[str, Any]: + + variant = gltf2_io_variants.Variant( + name=bpy.data.scenes[0].gltf2_KHR_materials_variants_variants[variant_idx].name, + extensions=None, + extras=None + ) + return variant.to_dict() diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_volume.py b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_volume.py new file mode 100644 index 00000000000..f568c1f884f --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_gather_materials_volume.py @@ -0,0 +1,118 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from .....io.com.gltf2_io_extensions import Extension +from ...material import gltf2_blender_gather_texture_info +from ..gltf2_blender_search_node_tree import \ + has_image_node_from_socket, \ + get_const_from_default_value_socket, \ + get_socket_from_gltf_material_node, \ + get_socket, \ + get_factor_from_socket + + +def export_volume(blender_material, export_settings): + # Implementation based on https://github.com/KhronosGroup/glTF-Blender-IO/issues/1454#issuecomment-928319444 + + # If no transmission --> No volume + # But we need to keep it, in case it is animated + + volume_extension = {} + has_thickness_texture = False + thickness_slots = () + uvmap_info = {} + + thickness_socket = get_socket_from_gltf_material_node( + blender_material.node_tree, blender_material.use_nodes, 'Thickness') + if thickness_socket.socket is None: + # If no thickness (here because there is no glTF Material Output node), no volume extension export + return None, {}, {} + + density_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, 'Density', volume=True) + attenuation_color_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, 'Color', volume=True) + # Even if density or attenuation are not set, we export volume extension + + if attenuation_color_socket.socket is not None and isinstance( + attenuation_color_socket.socket, bpy.types.NodeSocket): + rgb, path = get_const_from_default_value_socket(attenuation_color_socket, kind='RGB') + volume_extension['attenuationColor'] = rgb + + # Storing path for KHR_animation_pointer + if path is not None: + path_ = {} + path_['length'] = 3 + path_['path'] = "/materials/XXX/extensions/KHR_materials_volume/attenuationColor" + export_settings['current_paths'][path] = path_ + + if density_socket.socket is not None and isinstance(density_socket.socket, bpy.types.NodeSocket): + density, path = get_const_from_default_value_socket(density_socket, kind='VALUE') + volume_extension['attenuationDistance'] = 1.0 / \ + density if density != 0 else None # infinity (Using None as glTF default) + + # Storing path for KHR_animation_pointer + if path is not None: + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_volume/attenuationDistance" + export_settings['current_paths'][path] = path_ + + if isinstance(thickness_socket.socket, bpy.types.NodeSocket) and not thickness_socket.socket.is_linked: + val = thickness_socket.socket.default_value + if val == 0.0: + # If no thickness, no volume extension export + return None, {}, {} + volume_extension['thicknessFactor'] = val + + # Storing path for KHR_animation_pointer + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_volume/thicknessFactor" + export_settings['current_paths']["node_tree." + thickness_socket.socket.path_from_id() + + ".default_value"] = path_ + + elif has_image_node_from_socket(thickness_socket, export_settings): + fac, path = get_factor_from_socket(thickness_socket, kind='VALUE') + # default value in glTF is 0.0, but if there is a texture without factor, use 1 + volume_extension['thicknessFactor'] = fac if fac is not None else 1.0 + + # Storing path for KHR_animation_pointer + if path is not None: + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/extensions/KHR_materials_volume/thicknessFactor" + export_settings['current_paths'][path] = path_ + + has_thickness_texture = True + + # Pack thickness channel (G). + if has_thickness_texture: + thickness_slots = (thickness_socket,) + + udim_info = {} + if len(thickness_slots) > 0: + combined_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info( + thickness_socket, + thickness_slots, + export_settings, + ) + if has_thickness_texture: + volume_extension['thicknessTexture'] = combined_texture + + if len(export_settings['current_texture_transform']) != 0: + for k in export_settings['current_texture_transform'].keys(): + path_ = {} + path_['length'] = export_settings['current_texture_transform'][k]['length'] + path_['path'] = export_settings['current_texture_transform'][k]['path'].replace( + "YYY", "extensions/KHR_materials_volume/thicknessTexture/extensions") + path_['vector_type'] = export_settings['current_texture_transform'][k]['vector_type'] + export_settings['current_paths'][k] = path_ + + export_settings['current_texture_transform'] = {} + + return Extension( + 'KHR_materials_volume', volume_extension, False), { + 'thicknessTexture': uvmap_info}, { + 'thicknessTexture': udim_info} if len( + udim_info.keys()) > 0 else {} diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_image.py b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_image.py new file mode 100644 index 00000000000..6af694b3d08 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/material/extensions/gltf2_blender_image.py @@ -0,0 +1,373 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import os +from typing import Optional, Tuple +import numpy as np +import tempfile +import enum + + +class Channel(enum.IntEnum): + R = 0 + G = 1 + B = 2 + A = 3 + +# These describe how an ExportImage's channels should be filled. + + +class FillImage: + """Fills a channel with the channel src_chan from a Blender image.""" + + def __init__(self, image: bpy.types.Image, src_chan: Channel): + self.image = image + self.src_chan = src_chan + + +class FillImageTile: + """Fills a channel with the channel src_chan from a Blender UDIM image.""" + + def __init__(self, image: bpy.types.Image, tile, src_chan: Channel): + self.image = image + self.tile = tile + self.src_chan = src_chan + + +class FillWhite: + """Fills a channel with all ones (1.0).""" + pass + + +class FillWith: + """Fills a channel with all same values""" + + def __init__(self, value): + self.value = value + + +class StoreData: + def __init__(self, data): + """Store numeric data (not an image channel""" + self.data = data + + +class StoreImage: + """ + Store a channel with the channel src_chan from a Blender image. + This channel will be used for numpy calculation (no direct channel mapping) + """ + + def __init__(self, image: bpy.types.Image): + self.image = image + + +class ExportImage: + """Custom image class. + + An image is represented by giving a description of how to fill its red, + green, blue, and alpha channels. For example: + + self.fills = { + Channel.R: FillImage(image=bpy.data.images['Im1'], src_chan=Channel.B), + Channel.G: FillWhite(), + } + + This says that the ExportImage's R channel should be filled with the B + channel of the Blender image 'Im1', and the ExportImage's G channel + should be filled with all 1.0s. Undefined channels mean we don't care + what values that channel has. + + This is flexible enough to handle the case where eg. the user used the R + channel of one image as the metallic value and the G channel of another + image as the roughness, and we need to synthesize an ExportImage that + packs those into the B and G channels for glTF. + + Storing this description (instead of raw pixels) lets us make more + intelligent decisions about how to encode the image. + """ + + def __init__(self, original=None): + self.fills = {} + self.stored = {} + + self.original = original # In case of keeping original texture images + self.numpy_calc = None + + def set_calc(self, numpy_calc): + self.numpy_calc = numpy_calc # In case of numpy calculation (no direct channel mapping) + + @staticmethod + def from_blender_image(image: bpy.types.Image): + export_image = ExportImage() + for chan in range(image.channels): + export_image.fill_image(image, dst_chan=chan, src_chan=chan) + return export_image + + @staticmethod + def from_blender_image_tile(export_settings): + export_image = ExportImage() + original_udim = export_settings['current_udim_info']['image'] + for chan in range(original_udim.channels): + export_image.fill_image_tile( + original_udim, + export_settings['current_udim_info']['tile'], + dst_chan=chan, + src_chan=chan) + + return export_image + + @staticmethod + def from_original(image: bpy.types.Image): + return ExportImage(image) + + def fill_image(self, image: bpy.types.Image, dst_chan: Channel, src_chan: Channel): + self.fills[dst_chan] = FillImage(image, src_chan) + + def fill_image_tile(self, image: bpy.types.Image, tile, dst_chan: Channel, src_chan: Channel): + self.fills[dst_chan] = FillImageTile(image, tile, src_chan) + + def store_data(self, identifier, data, type='Image'): + if type == "Image": # This is an image + self.stored[identifier] = StoreImage(data) + else: # This is a numeric value + self.stored[identifier] = StoreData(data) + + def fill_white(self, dst_chan: Channel): + self.fills[dst_chan] = FillWhite() + + def fill_with(self, dst_chan, value): + self.fills[dst_chan] = FillWith(value) + + def is_filled(self, chan: Channel) -> bool: + return chan in self.fills + + def empty(self) -> bool: + if self.original is None: + return not (self.fills or self.stored) + else: + return False + + def blender_image(self, export_settings) -> Optional[bpy.types.Image]: + """If there's an existing Blender image we can use, + returns it. Otherwise (if channels need packing), + returns None. + """ + if self.__on_happy_path(): + # Store that this image is fully exported (used to export or not not used images) + for fill in self.fills.values(): + export_settings['exported_images'][fill.image.name] = 1 # Fully used + break + + for fill in self.fills.values(): + return fill.image + return None + + def __on_happy_path(self) -> bool: + # All src_chans match their dst_chan and come from the same image + return ( + all(isinstance(fill, FillImage) for fill in self.fills.values()) and + all(dst_chan == fill.src_chan for dst_chan, fill in self.fills.items()) and + len(set(fill.image.name for fill in self.fills.values())) == 1 + ) + + def __on_happy_path_udim(self) -> bool: + # All src_chans match their dst_chan and come from the same udim image + + return ( + all(isinstance(fill, FillImageTile) for fill in self.fills.values()) and + all(dst_chan == fill.src_chan for dst_chan, fill in self.fills.items()) and + len(set(fill.image.name for fill in self.fills.values())) == 1 and + all(fill.tile == self.fills[list(self.fills.keys())[0]].tile for fill in self.fills.values()) + ) + + def encode(self, mime_type: Optional[str], export_settings) -> Tuple[bytes, bool]: + self.file_format = { + "image/jpeg": "JPEG", + "image/png": "PNG", + "image/webp": "WEBP" + }.get(mime_type, "PNG") + + # Happy path = we can just use an existing Blender image + if self.__on_happy_path(): + # Store that this image is fully exported (used to export or not not used images) + for fill in self.fills.values(): + export_settings['exported_images'][fill.image.name] = 1 # Fully used + break + return self.__encode_happy(export_settings), None + + if self.__on_happy_path_udim(): + return self.__encode_happy_tile(export_settings), None + + # Unhappy path = we need to create the image self.fills describes or self.stores describes + if self.numpy_calc is None: + return self.__encode_unhappy(export_settings), None + else: + pixels, width, height, factor = self.numpy_calc(self.stored, export_settings) + return self.__encode_from_numpy_array(pixels, (width, height), export_settings), factor + + def __encode_happy(self, export_settings) -> bytes: + return self.__encode_from_image(self.blender_image(export_settings), export_settings) + + def __encode_happy_tile(self, export_settings) -> bytes: + return self.__encode_from_image_tile( + self.fills[list(self.fills.keys())[0]].image, export_settings['current_udim_info']['tile'], export_settings) + + def __encode_unhappy(self, export_settings) -> bytes: + # We need to assemble the image out of channels. + # Do it with numpy and image.pixels. + + # Find all Blender images used + images = [] + for fill in self.fills.values(): + if isinstance(fill, FillImage): + if fill.image not in images: + images.append(fill.image) + export_settings['exported_images'][fill.image.name] = 2 # 2 = partially used + + if not images: + # No ImageFills; use a 1x1 white pixel + pixels = np.array([1.0, 1.0, 1.0, 1.0], np.float32) + return self.__encode_from_numpy_array(pixels, (1, 1), export_settings) + + width = max(image.size[0] for image in images) + height = max(image.size[1] for image in images) + + out_buf = np.ones(width * height * 4, np.float32) + tmp_buf = np.empty(width * height * 4, np.float32) + + for image in images: + if image.size[0] == width and image.size[1] == height: + image.pixels.foreach_get(tmp_buf) + else: + # Image is the wrong size; make a temp copy and scale it. + with TmpImageGuard() as guard: + make_temp_image_copy(guard, src_image=image) + tmp_image = guard.image + tmp_image.scale(width, height) + tmp_image.pixels.foreach_get(tmp_buf) + + # Copy any channels for this image to the output + for dst_chan, fill in self.fills.items(): + if isinstance(fill, FillImage) and fill.image == image: + out_buf[int(dst_chan)::4] = tmp_buf[int(fill.src_chan)::4] + elif isinstance(fill, FillWith): + out_buf[int(dst_chan)::4] = fill.value + + tmp_buf = None # GC this + + return self.__encode_from_numpy_array(out_buf, (width, height), export_settings) + + def __encode_from_numpy_array(self, pixels: np.ndarray, dim: Tuple[int, int], export_settings) -> bytes: + with TmpImageGuard() as guard: + guard.image = bpy.data.images.new( + "##gltf-export:tmp-image##", + width=dim[0], + height=dim[1], + alpha=Channel.A in self.fills, + ) + tmp_image = guard.image + + tmp_image.pixels.foreach_set(pixels) + + return _encode_temp_image(tmp_image, self.file_format, export_settings) + + def __encode_from_image(self, image: bpy.types.Image, export_settings) -> bytes: + # See if there is an existing file we can use. + data = None + # Sequence image can't be exported, but it avoid to crash to check that default image exists + # Else, it can crash when trying to access a non existing image + if image.source in ['FILE', 'SEQUENCE'] and not image.is_dirty: + if image.packed_file is not None: + data = image.packed_file.data + else: + src_path = bpy.path.abspath(image.filepath_raw) + if os.path.isfile(src_path): + with open(src_path, 'rb') as f: + data = f.read() + # Check magic number is right + if data: + if self.file_format == 'PNG': + if data.startswith(b'\x89PNG'): + return data + elif self.file_format == 'JPEG': + if data.startswith(b'\xff\xd8\xff'): + return data + elif self.file_format == 'WEBP': + if data[8:12] == b'WEBP': + return data + + # Copy to a temp image and save. + with TmpImageGuard() as guard: + make_temp_image_copy(guard, src_image=image) + tmp_image = guard.image + return _encode_temp_image(tmp_image, self.file_format, export_settings) + + def __encode_from_image_tile(self, udim_image, tile, export_settings): + src_path = bpy.path.abspath(udim_image.filepath_raw).replace("", tile) + + if os.path.isfile(src_path): + with open(src_path, 'rb') as f: + data = f.read() + + if data: + if self.file_format == 'PNG': + if data.startswith(b'\x89PNG'): + return data + elif self.file_format == 'JPEG': + if data.startswith(b'\xff\xd8\xff'): + return data + elif self.file_format == 'WEBP': + if data[8:12] == b'WEBP': + return data + + # We don't manage UDIM packed image, so this could not happen to be here + + +def _encode_temp_image(tmp_image: bpy.types.Image, file_format: str, export_settings) -> bytes: + with tempfile.TemporaryDirectory() as tmpdirname: + tmpfilename = tmpdirname + '/img' + tmp_image.filepath_raw = tmpfilename + + tmp_image.file_format = file_format + + # if image is jpeg, use quality export settings + if file_format in ["JPEG", "WEBP"]: + tmp_image.save(quality=export_settings['gltf_image_quality']) + else: + tmp_image.save() + + with open(tmpfilename, "rb") as f: + return f.read() + + +class TmpImageGuard: + """Guard to automatically clean up temp images (use it with `with`).""" + + def __init__(self): + self.image = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + if self.image is not None: + bpy.data.images.remove(self.image, do_unlink=True) + + +def make_temp_image_copy(guard: TmpImageGuard, src_image: bpy.types.Image): + """Makes a temporary copy of src_image. Will be cleaned up with guard.""" + guard.image = src_image.copy() + tmp_image = guard.image + + tmp_image.update() + # See #1564 and T95616 + tmp_image.scale(*src_image.size) + + if src_image.is_dirty: # Warning, img size change doesn't make it dirty, see T95616 + # Unsaved changes aren't copied by .copy(), so do them ourselves + tmp_buf = np.empty(src_image.size[0] * src_image.size[1] * 4, np.float32) + src_image.pixels.foreach_get(tmp_buf) + tmp_image.pixels.foreach_set(tmp_buf) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_image.py b/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_image.py new file mode 100644 index 00000000000..0d89ee64d12 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_image.py @@ -0,0 +1,439 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import typing +import os + +from ....io.com import gltf2_io +from ....io.com.gltf2_io_path import path_to_uri +from ....io.exp import gltf2_io_binary_data, gltf2_io_image_data +from ....io.com import gltf2_io_debug +from ....io.exp.gltf2_io_user_extensions import export_user_extensions +from ..gltf2_blender_gather_cache import cached +from .extensions.gltf2_blender_image import Channel, ExportImage, FillImage +from .gltf2_blender_search_node_tree import get_texture_node_from_socket, detect_anisotropy_nodes + + +@cached +def gather_image( + blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket], + use_tile: bool, + export_settings): + if not __filter_image(blender_shader_sockets, export_settings): + return None, None, None, None + + image_data, udim_image = __get_image_data(blender_shader_sockets, use_tile, export_settings) + + if udim_image is not None: + # We are in a UDIM case, so we return no image data + # This will be used later to create multiple primitives/material/texture with UDIM information + return None, None, None, udim_image + + if image_data.empty(): + # The export image has no data + return None, None, None, None + + mime_type = __gather_mime_type(blender_shader_sockets, image_data, export_settings) + name = __gather_name(image_data, export_settings) + + factor = None + + if image_data.original is None: + uri, factor_uri = __gather_uri(image_data, mime_type, name, export_settings) + else: + # Retrieve URI relative to exported glTF files + uri = __gather_original_uri(image_data.original.filepath, export_settings) + # In case we can't retrieve image (for example packed images, with original moved) + # We don't create invalid image without uri + factor_uri = None + if uri is None: + return None, None, None, None + + buffer_view, factor_buffer_view = __gather_buffer_view(image_data, mime_type, name, export_settings) + + factor = factor_uri if uri is not None else factor_buffer_view + + image = __make_image( + buffer_view, + __gather_extensions(blender_shader_sockets, export_settings), + __gather_extras(blender_shader_sockets, export_settings), + mime_type, + name, + uri, + export_settings + ) + + export_user_extensions('gather_image_hook', export_settings, image, blender_shader_sockets) + + # We also return image_data, as it can be used to generate same file with another extension for WebP management + return image, image_data, factor, None + + +def __gather_original_uri(original_uri, export_settings): + + path_to_image = bpy.path.abspath(original_uri) + if not os.path.exists(path_to_image): + return None + try: + rel_path = os.path.relpath( + path_to_image, + start=export_settings['gltf_filedirectory'], + ) + except ValueError: + # eg. because no relative path between C:\ and D:\ on Windows + return None + return path_to_uri(rel_path) + + +@cached +def __make_image(buffer_view, extensions, extras, mime_type, name, uri, export_settings): + return gltf2_io.Image( + buffer_view=buffer_view, + extensions=extensions, + extras=extras, + mime_type=mime_type, + name=name, + uri=uri + ) + + +def __filter_image(sockets, export_settings): + if not sockets: + return False + return True + + +@cached +def __gather_buffer_view(image_data, mime_type, name, export_settings): + if export_settings['gltf_format'] != 'GLTF_SEPARATE': + data, factor = image_data.encode(mime_type, export_settings) + return gltf2_io_binary_data.BinaryData(data=data), factor + return None, None + + +def __gather_extensions(sockets, export_settings): + return None + + +def __gather_extras(sockets, export_settings): + return None + + +def __gather_mime_type(sockets, export_image, export_settings): + # force png or webp if Alpha contained so we can export alpha + for socket in sockets: + if socket.socket.name == "Alpha": + if export_settings["gltf_image_format"] == "WEBP": + return "image/webp" + else: + # If we keep image as is (no channel composition), we need to keep original format (for WebP) + image = export_image.blender_image(export_settings) + if image is not None and __is_blender_image_a_webp(image): + return "image/webp" + return "image/png" + + if export_settings["gltf_image_format"] == "AUTO": + if export_image.original is None: # We are going to create a new image + image = export_image.blender_image(export_settings) + else: + # Using original image + image = export_image.original + + if image is not None and __is_blender_image_a_jpeg(image): + return "image/jpeg" + elif image is not None and __is_blender_image_a_webp(image): + return "image/webp" + return "image/png" + + elif export_settings["gltf_image_format"] == "WEBP": + return "image/webp" + elif export_settings["gltf_image_format"] == "JPEG": + return "image/jpeg" + + +def __gather_name(export_image, export_settings): + if export_image.original is None: + # Find all Blender images used in the ExportImage + imgs = [] + for fill in export_image.fills.values(): + if isinstance(fill, FillImage): + img = fill.image + if img not in imgs: + imgs.append(img) + + # If all the images have the same path, use the common filename + filepaths = set(img.filepath for img in imgs) + if len(filepaths) == 1: + filename = os.path.basename(list(filepaths)[0]) + name, extension = os.path.splitext(filename) + if extension.lower() in ['.png', '.jpg', '.jpeg']: + if name: + return name + + # Combine the image names: img1-img2-img3 + names = [] + for img in imgs: + name, extension = os.path.splitext(img.name) + names.append(name) + name = '-'.join(names) + return name or 'Image' + else: + return export_image.original.name + + +@cached +def __gather_uri(image_data, mime_type, name, export_settings): + if export_settings['gltf_format'] == 'GLTF_SEPARATE': + # as usual we just store the data in place instead of already resolving the references + data, factor = image_data.encode(mime_type, export_settings) + return gltf2_io_image_data.ImageData( + data=data, + mime_type=mime_type, + name=name + ), factor + + return None, None + + +def __get_image_data(sockets, use_tile, export_settings) -> ExportImage: + # For shared resources, such as images, we just store the portion of data that is needed in the glTF property + # in a helper class. During generation of the glTF in the exporter these will then be combined to actual binary + # resources. + results = [get_texture_node_from_socket(socket, export_settings) for socket in sockets] + + if use_tile is None: + # First checking if texture used is UDIM + # In that case, we return no texture data for now, and only get that this texture is UDIM + # This will be used later + if any([r.shader_node.image.source == "TILED" for r in results if r is not None and r.shader_node.image is not None]): + return ExportImage(), [ + r.shader_node.image for r in results if r is not None and r.shader_node.image is not None and r.shader_node.image.source == "TILED"][0] + + # If we are here, we are in UDIM split process + # Check if we need a simple mapping or more complex calculation + + # Case of Anisotropy : It can be a complex node setup, or simple grayscale textures + # In case of complex node setup, this will be a direct mapping of channels + # But in case of grayscale textures, we need to combine them, we numpy calculations + # So we need to check if we have a complex node setup or not + + need_to_check_anisotropy = is_anisotropy = False + try: + anisotropy_socket = [s for s in sockets if s.socket.name == 'Anisotropic'][0] + anisotropy_rotation_socket = [s for s in sockets if s.socket.name == 'Anisotropic Rotation'][0] + anisotropy_tangent_socket = [s for s in sockets if s.socket.name == 'Tangent'][0] + need_to_check_anisotropy = True + except: + need_to_check_anisotropy = False + + if need_to_check_anisotropy is True: + is_anisotropy, anisotropy_data = detect_anisotropy_nodes( + anisotropy_socket, + anisotropy_rotation_socket, + anisotropy_tangent_socket, + export_settings + ) + + if need_to_check_anisotropy is True and is_anisotropy is False: + # We are not in complex node setup, so we can try to get the image data from grayscale textures + return __get_image_data_grayscale_anisotropy(sockets, results, export_settings), None + + return __get_image_data_mapping(sockets, results, use_tile, export_settings), None + + +def __get_image_data_mapping(sockets, results, use_tile, export_settings) -> ExportImage: + """ + Simple mapping + Will fit for most of exported textures : RoughnessMetallic, Basecolor, normal, ... + """ + composed_image = ExportImage() + + for result, socket in zip(results, sockets): + # Assume that user know what he does, and that channels/images are already combined correctly for pbr + # If not, we are going to keep only the first texture found + # Example : If user set up 2 or 3 different textures for Metallic / Roughness / Occlusion + # Only 1 will be used at export + # This Warning is displayed in UI of this option + if export_settings['gltf_keep_original_textures']: + composed_image = ExportImage.from_original(result.shader_node.image) + + else: + # rudimentarily try follow the node tree to find the correct image data. + src_chan = None + for elem in result.path: + if isinstance(elem.from_node, bpy.types.ShaderNodeSeparateColor): + src_chan = { + 'Red': Channel.R, + 'Green': Channel.G, + 'Blue': Channel.B, + }[elem.from_socket.name] + if elem.from_socket.name == 'Alpha': + src_chan = Channel.A + + if src_chan is None: + # No SeparateColor node found, so take the specification channel that is needed + # So export is correct if user plug the texture directly to the socket + if socket.socket.name == 'Metallic': + src_chan = Channel.B + elif socket.socket.name == 'Roughness': + src_chan = Channel.G + elif socket.socket.name == 'Occlusion': + src_chan = Channel.R + elif socket.socket.name == 'Alpha': + src_chan = Channel.A + elif socket.socket.name == 'Coat Weight': + src_chan = Channel.R + elif socket.socket.name == 'Coat Roughness': + src_chan = Channel.G + elif socket.socket.name == 'Thickness': # For KHR_materials_volume + src_chan = Channel.G + + if src_chan is None: + # Seems we can't find the channel + # We are in a case where user plugged a texture in a Color socket, but we may have used the alpha one + if socket.socket.name in ["Alpha", "Specular IOR Level", "Sheen Roughness"]: + src_chan = Channel.A + + if src_chan is None: + # We definitely can't find the channel, so keep the first channel even if this is wrong + src_chan = Channel.R + + dst_chan = None + + # some sockets need channel rewriting (gltf pbr defines fixed channels for some attributes) + if socket.socket.name == 'Metallic': + dst_chan = Channel.B + elif socket.socket.name == 'Roughness': + dst_chan = Channel.G + elif socket.socket.name == 'Occlusion': + dst_chan = Channel.R + elif socket.socket.name == 'Alpha': + dst_chan = Channel.A + elif socket.socket.name == 'Coat Weight': + dst_chan = Channel.R + elif socket.socket.name == 'Coat Roughness': + dst_chan = Channel.G + elif socket.socket.name == 'Thickness': # For KHR_materials_volume + dst_chan = Channel.G + elif socket.socket.name == "Specular IOR Level": # For KHR_materials_specular + dst_chan = Channel.A + elif socket.socket.name == "Sheen Roughness": # For KHR_materials_sheen + dst_chan = Channel.A + + if dst_chan is not None: + if use_tile is None: + composed_image.fill_image(result.shader_node.image, dst_chan, src_chan) + else: + composed_image.fill_image_tile( + result.shader_node.image, + export_settings['current_udim_info']['tile'], + dst_chan, + src_chan) + + # Since metal/roughness are always used together, make sure + # the other channel is filled. + if socket.socket.name == 'Metallic' and not composed_image.is_filled(Channel.G): + composed_image.fill_white(Channel.G) + elif socket.socket.name == 'Roughness' and not composed_image.is_filled(Channel.B): + composed_image.fill_white(Channel.B) + else: + # copy full image...eventually following sockets might overwrite things + if use_tile is None: + composed_image = ExportImage.from_blender_image(result.shader_node.image) + else: + composed_image = ExportImage.from_blender_image_tile(export_settings) + + # Check that we don't have some empty channels (based on weird images without any size for example) + keys = list(composed_image.fills.keys()) # do not loop on dict, we may have to delete an element + for k in [k for k in keys if isinstance(composed_image.fills[k], FillImage)]: + if composed_image.fills[k].image.size[0] == 0 or composed_image.fills[k].image.size[1] == 0: + export_settings['log'].warning("Image '{}' has no size and cannot be exported.".format( + composed_image.fills[k].image)) + del composed_image.fills[k] + + return composed_image + + +def __get_image_data_grayscale_anisotropy(sockets, results, export_settings) -> ExportImage: + """ + calculating Anisotropy Texture from grayscale textures, settings needed data + """ + from .extensions.gltf2_blender_gather_materials_anisotropy import grayscale_anisotropy_calculation + composed_image = ExportImage() + composed_image.set_calc(grayscale_anisotropy_calculation) + + results = [get_texture_node_from_socket(socket, export_settings) + for socket in sockets[:-1]] # No texture from tangent + + mapping = { + 0: "anisotropy", + 1: "anisotropic_rotation", + } + + for idx, result in enumerate(results): + if get_texture_node_from_socket(sockets[idx], export_settings): + composed_image.store_data(mapping[idx], result.shader_node.image, type="Image") + else: + composed_image.store_data(mapping[idx], sockets[idx].socket.default_value, type="Data") + + return composed_image + + +def __is_blender_image_a_jpeg(image: bpy.types.Image) -> bool: + if image.source != 'FILE': + return False + if image.filepath_raw == '' and image.packed_file: + return image.packed_file.data[:3] == b'\xff\xd8\xff' + else: + path = image.filepath_raw.lower() + return path.endswith('.jpg') or path.endswith('.jpeg') or path.endswith('.jpe') + + +def __is_blender_image_a_webp(image: bpy.types.Image) -> bool: + if image.source != 'FILE': + return False + if image.filepath_raw == '' and image.packed_file: + return image.packed_file.data[8:12] == b'WEBP' + else: + path = image.filepath_raw.lower() + return path.endswith('.webp') + + +def get_gltf_image_from_blender_image(blender_image_name, export_settings): + image_data = ExportImage.from_blender_image(bpy.data.images[blender_image_name]) + + name = __gather_name(image_data, export_settings) + mime_type = __get_mime_type_of_image(blender_image_name, export_settings) + + uri, _ = __gather_uri(image_data, mime_type, name, export_settings) + buffer_view, _ = __gather_buffer_view(image_data, mime_type, name, export_settings) + + return gltf2_io.Image( + buffer_view=buffer_view, + extensions=None, + extras=None, + mime_type=mime_type, + name=name, + uri=uri + ) + + +def __get_mime_type_of_image(blender_image_name, export_settings): + + image = bpy.data.images[blender_image_name] + if image.channels == 4: + if __is_blender_image_a_webp(image): + return "image/webp" + return "image/png" + + if export_settings["gltf_image_format"] == "AUTO": + if __is_blender_image_a_jpeg(image): + return "image/jpeg" + elif __is_blender_image_a_webp(image): + return "image/webp" + return "image/png" + + elif export_settings["gltf_image_format"] == "JPEG": + return "image/jpeg" diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_materials.py b/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_materials.py new file mode 100644 index 00000000000..9df89640f1e --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_materials.py @@ -0,0 +1,717 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from copy import deepcopy +import bpy + +from ....io.com import gltf2_io +from ....io.com.gltf2_io_extensions import Extension +from ....io.exp.gltf2_io_user_extensions import export_user_extensions +from ...com.gltf2_blender_extras import generate_extras +from ..gltf2_blender_gather_cache import cached, cached_by_key +from . import gltf2_blender_gather_materials_unlit +from . import gltf2_blender_gather_texture_info +from . import gltf2_blender_gather_materials_pbr_metallic_roughness +from .extensions.gltf2_blender_gather_materials_volume import export_volume +from .extensions.gltf2_blender_gather_materials_emission import export_emission_factor, \ + export_emission_texture, export_emission_strength_extension +from .extensions.gltf2_blender_gather_materials_sheen import export_sheen +from .extensions.gltf2_blender_gather_materials_specular import export_specular +from .extensions.gltf2_blender_gather_materials_transmission import export_transmission +from .extensions.gltf2_blender_gather_materials_clearcoat import export_clearcoat +from .extensions.gltf2_blender_gather_materials_anisotropy import export_anisotropy +from .extensions.gltf2_blender_gather_materials_ior import export_ior +from .gltf2_blender_search_node_tree import \ + has_image_node_from_socket, \ + get_socket_from_gltf_material_node, \ + get_socket, \ + get_node_socket, \ + get_material_nodes, \ + NodeSocket, \ + get_vertex_color_info + + +@cached +def get_material_cache_key(blender_material, export_settings): + # Use id of material + # Do not use bpy.types that can be unhashable + # Do not use material name, that can be not unique (when linked) + # We use here the id of original material as for apply modifier, the material has a new id + # So, when no modifier applied => original is the same id + # And when modifier applied => new one is different id, but original is still the same + return ( + (id(blender_material.original),), + ) + + +@cached_by_key(key=get_material_cache_key) +def gather_material(blender_material, export_settings): + """ + Gather the material used by the blender primitive. + + :param blender_material: the blender material used in the glTF primitive + :param export_settings: + :return: a glTF material + """ + if not __filter_material(blender_material, export_settings): + return None, {"uv_info": {}, "vc_info": {'color': None, 'alpha': None, + 'color_type': None, 'alpha_type': None}, "udim_info": {}} + + # Reset exported images / textures nodes + export_settings['exported_texture_nodes'] = [] + if blender_material.node_tree and blender_material.use_nodes: + nodes = get_material_nodes( + blender_material.node_tree, [ + blender_material.node_tree], bpy.types.ShaderNodeTexImage) + else: + nodes = [] + for node in nodes: + if node[0].get("used", None) is not None: + del(node[0]['used']) + + mat_unlit, uvmap_info, vc_info, udim_info = __export_unlit(blender_material, export_settings) + if mat_unlit is not None: + export_user_extensions('gather_material_hook', export_settings, mat_unlit, blender_material) + return mat_unlit, {"uv_info": uvmap_info, "vc_info": vc_info, "udim_info": udim_info} + + orm_texture = __gather_orm_texture(blender_material, export_settings) + + emissive_factor = __gather_emissive_factor(blender_material, export_settings) + emissive_texture, uvmap_info_emissive, udim_info_emissive = __gather_emissive_texture( + blender_material, export_settings) + extensions, uvmap_info_extensions, udim_info_extensions = __gather_extensions( + blender_material, emissive_factor, export_settings) + normal_texture, uvmap_info_normal, udim_info_normal = __gather_normal_texture(blender_material, export_settings) + occlusion_texture, uvmap_info_occlusion, udim_occlusion = __gather_occlusion_texture( + blender_material, orm_texture, export_settings) + pbr_metallic_roughness, uvmap_info_pbr_metallic_roughness, vc_info, udim_info_prb_mr = __gather_pbr_metallic_roughness( + blender_material, orm_texture, export_settings) + + if any([i > 1.0 for i in emissive_factor or []]) is True: + # Strength is set on extension + emission_strength = max(emissive_factor) + emissive_factor = [f / emission_strength for f in emissive_factor] + + material = gltf2_io.Material( + alpha_cutoff=__gather_alpha_cutoff(blender_material, export_settings), + alpha_mode=__gather_alpha_mode(blender_material, export_settings), + double_sided=__gather_double_sided(blender_material, extensions, export_settings), + emissive_factor=emissive_factor, + emissive_texture=emissive_texture, + extensions=extensions, + extras=__gather_extras(blender_material, export_settings), + name=__gather_name(blender_material, export_settings), + normal_texture=normal_texture, + occlusion_texture=occlusion_texture, + pbr_metallic_roughness=pbr_metallic_roughness + ) + + uvmap_infos = {} + udim_infos = {} + + # Get all textures nodes that are not used in the material + if export_settings['gltf_unused_textures'] is True: + if blender_material.node_tree and blender_material.use_nodes: + nodes = get_material_nodes( + blender_material.node_tree, [ + blender_material.node_tree], bpy.types.ShaderNodeTexImage) + else: + nodes = [] + cpt_additional = 0 + for node in nodes: + if node[0].get("used", None) is not None: + del(node[0]['used']) + continue + + s = NodeSocket(node[0].outputs[0], node[1]) + tex, uv_info_additional, udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info( + s, (s,), export_settings) + if tex is not None: + export_settings['exported_images'][node[0].image.name] = 1 # Fully used + uvmap_infos.update({'additional' + str(cpt_additional): uv_info_additional}) + udim_infos.update({'additional' + str(cpt_additional): udim_info}) + cpt_additional += 1 + export_settings['additional_texture_export'].append(tex) + + # Reset + if blender_material.node_tree and blender_material.use_nodes: + nodes = get_material_nodes( + blender_material.node_tree, [ + blender_material.node_tree], bpy.types.ShaderNodeTexImage) + else: + nodes = [] + for node in nodes: + if node[0].get("used", None) is not None: + del(node[0]['used']) + + uvmap_infos.update(uvmap_info_emissive) + uvmap_infos.update(uvmap_info_extensions) + uvmap_infos.update(uvmap_info_normal) + uvmap_infos.update(uvmap_info_occlusion) + uvmap_infos.update(uvmap_info_pbr_metallic_roughness) + + udim_infos = {} + udim_infos.update(udim_info_prb_mr) + udim_infos.update(udim_info_normal) + udim_infos.update(udim_info_emissive) + udim_infos.update(udim_occlusion) + udim_infos.update(udim_info_extensions) + + # If emissive is set, from an emissive node (not PBR) + # We need to set manually default values for + # pbr_metallic_roughness.baseColor + if material.emissive_factor is not None and get_node_socket( + blender_material.node_tree, + bpy.types.ShaderNodeBsdfPrincipled, + "Base Color").socket is None: + material.pbr_metallic_roughness = gltf2_blender_gather_materials_pbr_metallic_roughness.get_default_pbr_for_emissive_node() + + export_user_extensions('gather_material_hook', export_settings, material, blender_material) + + # Now we have exported the material itself, we need to store some additional data + # This will be used when trying to export some KHR_animation_pointer + + if len(export_settings['current_paths']) > 0: + export_settings['KHR_animation_pointer']['materials'][id(blender_material)] = {} + export_settings['KHR_animation_pointer']['materials'][id( + blender_material)]['paths'] = export_settings['current_paths'].copy() + export_settings['KHR_animation_pointer']['materials'][id(blender_material)]['glTF_material'] = material + + export_settings['current_paths'] = {} + + return material, {"uv_info": uvmap_infos, "vc_info": vc_info, "udim_info": udim_infos} + + +def get_new_material_texture_shared(base, node): + if node is None: + return + if callable(node) is True: + return + if node.__str__().startswith('__'): + return + if type(node) in [gltf2_io.TextureInfo, + gltf2_io.MaterialOcclusionTextureInfoClass, + gltf2_io.MaterialNormalTextureInfoClass]: + node.index = base.index + else: + if hasattr(node, '__dict__'): + for attr, value in node.__dict__.items(): + get_new_material_texture_shared(getattr(base, attr), value) + else: + # For extensions (on a dict) + if type(node).__name__ == 'dict': + for i in node.keys(): + get_new_material_texture_shared(base[i], node[i]) + + +def __filter_material(blender_material, export_settings): + return export_settings['gltf_materials'] + + +def __gather_alpha_cutoff(blender_material, export_settings): + if blender_material.blend_method == 'CLIP': + + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/alphaCutoff" + export_settings['current_paths']['alpha_threshold'] = path_ + + return blender_material.alpha_threshold + return None + + +def __gather_alpha_mode(blender_material, export_settings): + if blender_material.blend_method == 'CLIP': + return 'MASK' + elif blender_material.blend_method in ['BLEND', 'HASHED']: + return 'BLEND' + return None + + +def __gather_double_sided(blender_material, extensions, export_settings): + + # If user create a volume extension, we force double sided to False + if 'KHR_materials_volume' in extensions: + return False + + if not blender_material.use_backface_culling: + return True + return None + + +def __gather_emissive_factor(blender_material, export_settings): + return export_emission_factor(blender_material, export_settings) + + +def __gather_emissive_texture(blender_material, export_settings): + return export_emission_texture(blender_material, export_settings) + + +def __gather_extensions(blender_material, emissive_factor, export_settings): + extensions = {} + + uvmap_infos = {} + udim_infos = {} + + # KHR_materials_clearcoat + clearcoat_extension, uvmap_info, udim_info_clearcoat = export_clearcoat(blender_material, export_settings) + if clearcoat_extension: + extensions["KHR_materials_clearcoat"] = clearcoat_extension + uvmap_infos.update(uvmap_info) + udim_infos.update(udim_info_clearcoat) + + # KHR_materials_transmission + + transmission_extension, uvmap_info, udim_info_transmission = export_transmission(blender_material, export_settings) + if transmission_extension: + extensions["KHR_materials_transmission"] = transmission_extension + uvmap_infos.update(uvmap_info) + udim_infos.update(udim_info_transmission) + + # KHR_materials_emissive_strength + emissive_strength_extension = export_emission_strength_extension(emissive_factor, export_settings) + if emissive_strength_extension: + extensions["KHR_materials_emissive_strength"] = emissive_strength_extension + + # KHR_materials_volume + + volume_extension, uvmap_info, udim_info = export_volume(blender_material, export_settings) + if volume_extension: + extensions["KHR_materials_volume"] = volume_extension + uvmap_infos.update(uvmap_info) + udim_infos.update(udim_info) + + # KHR_materials_specular + specular_extension, uvmap_info, udim_info = export_specular(blender_material, export_settings) + if specular_extension: + extensions["KHR_materials_specular"] = specular_extension + uvmap_infos.update(uvmap_info) + udim_infos.update(udim_info) + + # KHR_materials_sheen + sheen_extension, uvmap_info, udim_info = export_sheen(blender_material, export_settings) + if sheen_extension: + extensions["KHR_materials_sheen"] = sheen_extension + uvmap_infos.update(uvmap_info) + udim_infos.update(udim_info) + + # KHR_materials_anisotropy + anisotropy_extension, uvmap_info, udim_info = export_anisotropy(blender_material, export_settings) + if anisotropy_extension: + extensions["KHR_materials_anisotropy"] = anisotropy_extension + uvmap_infos.update(uvmap_info) + udim_infos.update(udim_info) + + # KHR_materials_ior + # Keep this extension at the end, because we export it only if some others are exported + ior_extension = export_ior(blender_material, extensions, export_settings) + if ior_extension: + extensions["KHR_materials_ior"] = ior_extension + + return extensions, uvmap_infos, udim_infos + + +def __gather_extras(blender_material, export_settings): + if export_settings['gltf_extras']: + return generate_extras(blender_material) + return None + + +def __gather_name(blender_material, export_settings): + return blender_material.name + + +def __gather_normal_texture(blender_material, export_settings): + normal = get_socket(blender_material.node_tree, blender_material.use_nodes, "Normal") + normal_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_material_normal_texture_info_class( + normal, (normal,), export_settings) + + if len(export_settings['current_texture_transform']) != 0: + for k in export_settings['current_texture_transform'].keys(): + path_ = {} + path_['length'] = export_settings['current_texture_transform'][k]['length'] + path_['path'] = export_settings['current_texture_transform'][k]['path'].replace( + "YYY", "normalTexture/extensions") + path_['vector_type'] = export_settings['current_texture_transform'][k]['vector_type'] + export_settings['current_paths'][k] = path_ + + export_settings['current_texture_transform'] = {} + + if len(export_settings['current_normal_scale']) != 0: + for k in export_settings['current_normal_scale'].keys(): + path_ = {} + path_['length'] = export_settings['current_normal_scale'][k]['length'] + path_['path'] = export_settings['current_normal_scale'][k]['path'].replace("YYY", "normalTexture") + export_settings['current_paths'][k] = path_ + + export_settings['current_normal_scale'] = {} + + return normal_texture, { + "normalTexture": uvmap_info}, { + 'normalTexture': udim_info} if len( + udim_info.keys()) > 0 else {} + + +def __gather_orm_texture(blender_material, export_settings): + # Check for the presence of Occlusion, Roughness, Metallic sharing a single image. + # If not fully shared, return None, so the images will be cached and processed separately. + + occlusion = get_socket(blender_material.node_tree, blender_material.use_nodes, "Occlusion") + if occlusion.socket is None or not has_image_node_from_socket(occlusion, export_settings): + occlusion = get_socket_from_gltf_material_node( + blender_material.node_tree, blender_material.use_nodes, "Occlusion") + if occlusion.socket is None or not has_image_node_from_socket(occlusion, export_settings): + return None + + metallic_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, "Metallic") + roughness_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, "Roughness") + + hasMetal = metallic_socket.socket is not None and has_image_node_from_socket(metallic_socket, export_settings) + hasRough = roughness_socket.socket is not None and has_image_node_from_socket(roughness_socket, export_settings) + + # Warning: for default socket, do not use NodeSocket object, because it will break cache + # Using directlty the Blender socket object + if not hasMetal and not hasRough: + metallic_roughness = get_socket_from_gltf_material_node( + blender_material.node_tree, blender_material.use_nodes, "MetallicRoughness") + if metallic_roughness.socket is None or not has_image_node_from_socket(metallic_roughness, export_settings): + return None + result = (occlusion, metallic_roughness) + elif not hasMetal: + result = (occlusion, roughness_socket) + elif not hasRough: + result = (occlusion, metallic_socket) + else: + result = (occlusion, roughness_socket, metallic_socket) + + if not gltf2_blender_gather_texture_info.check_same_size_images(result, export_settings): + export_settings['log'].info( + "Occlusion and metal-roughness texture will be exported separately " + "(use same-sized images if you want them combined)" + ) + return None + + # Double-check this will past the filter in texture_info + info, _, _, _ = gltf2_blender_gather_texture_info.gather_texture_info(result[0], result, export_settings) + if info is None: + return None + + if len(export_settings['current_texture_transform']) != 0: + for k in export_settings['current_texture_transform'].keys(): + path_ = {} + path_['length'] = export_settings['current_texture_transform'][k]['length'] + path_['path'] = export_settings['current_texture_transform'][k]['path'].replace( + "YYY", "occlusionTexture/extensions") + path_['vector_type'] = export_settings['current_texture_transform'][k]['vector_type'] + export_settings['current_paths'][k] = path_ + + # This case can't happen because we are going to keep only 1 UVMap + export_settings['log'].warning("This case should not happen, please report a bug") + for k in export_settings['current_texture_transform'].keys(): + path_ = {} + path_['length'] = export_settings['current_texture_transform'][k]['length'] + path_['path'] = export_settings['current_texture_transform'][k]['path'].replace( + "YYY", "pbrMetallicRoughness/metallicRoughnessTexture/extensions") + path_['vector_type'] = export_settings['current_texture_transform'][k]['vector_type'] + export_settings['current_paths'][k] = path_ + + export_settings['current_texture_transform'] = {} + + return result + + +def __gather_occlusion_texture(blender_material, orm_texture, export_settings): + occlusion = get_socket(blender_material.node_tree, blender_material.use_nodes, "Occlusion") + if occlusion.socket is None: + occlusion = get_socket_from_gltf_material_node( + blender_material.node_tree, blender_material.use_nodes, "Occlusion") + if occlusion.socket is None: + return None, {}, {} + occlusion_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_material_occlusion_texture_info_class( + occlusion, orm_texture or (occlusion,), export_settings) + + if len(export_settings['current_occlusion_strength']) != 0: + for k in export_settings['current_occlusion_strength'].keys(): + path_ = {} + path_['length'] = export_settings['current_occlusion_strength'][k]['length'] + path_['path'] = export_settings['current_occlusion_strength'][k]['path'] + path_['reverse'] = export_settings['current_occlusion_strength'][k]['reverse'] + export_settings['current_paths'][k] = path_ + + export_settings['current_occlusion_strength'] = {} + + if len(export_settings['current_texture_transform']) != 0: + for k in export_settings['current_texture_transform'].keys(): + path_ = {} + path_['length'] = export_settings['current_texture_transform'][k]['length'] + path_['path'] = export_settings['current_texture_transform'][k]['path'].replace( + "YYY", "occlusionTexture/extensions") + path_['vector_type'] = export_settings['current_texture_transform'][k]['vector_type'] + export_settings['current_paths'][k] = path_ + + export_settings['current_texture_transform'] = {} + + return occlusion_texture, \ + {"occlusionTexture": uvmap_info}, {'occlusionTexture': udim_info} if len(udim_info.keys()) > 0 else {} + + +def __gather_pbr_metallic_roughness(blender_material, orm_texture, export_settings): + return gltf2_blender_gather_materials_pbr_metallic_roughness.gather_material_pbr_metallic_roughness( + blender_material, + orm_texture, + export_settings) + + +def __export_unlit(blender_material, export_settings): + gltf2_unlit = gltf2_blender_gather_materials_unlit + + info = gltf2_unlit.detect_shadeless_material( + blender_material.node_tree, + blender_material.use_nodes, + export_settings) + if info is None: + return None, {}, {"color": None, "alpha": None, "color_type": None, "alpha_type": None}, {} + + base_color_texture, uvmap_info, udim_info = gltf2_unlit.gather_base_color_texture(info, export_settings) + + vc_info = get_vertex_color_info(info.get('rgb_socket'), info.get('alpha_socket'), export_settings) + + material = gltf2_io.Material( + alpha_cutoff=__gather_alpha_cutoff(blender_material, export_settings), + alpha_mode=__gather_alpha_mode(blender_material, export_settings), + double_sided=__gather_double_sided(blender_material, {}, export_settings), + extensions={"KHR_materials_unlit": Extension("KHR_materials_unlit", {}, required=False)}, + extras=__gather_extras(blender_material, export_settings), + name=__gather_name(blender_material, export_settings), + emissive_factor=None, + emissive_texture=None, + normal_texture=None, + occlusion_texture=None, + + pbr_metallic_roughness=gltf2_io.MaterialPBRMetallicRoughness( + base_color_factor=gltf2_unlit.gather_base_color_factor(info, export_settings), + base_color_texture=base_color_texture, + metallic_factor=0.0, + roughness_factor=0.9, + metallic_roughness_texture=None, + extensions=None, + extras=None, + ) + ) + + export_user_extensions('gather_material_unlit_hook', export_settings, material, blender_material) + + # Now we have exported the material itself, we need to store some additional data + # This will be used when trying to export some KHR_animation_pointer + + if len(export_settings['current_paths']) > 0: + export_settings['KHR_animation_pointer']['materials'][id(blender_material)] = {} + export_settings['KHR_animation_pointer']['materials'][id( + blender_material)]['paths'] = export_settings['current_paths'].copy() + export_settings['KHR_animation_pointer']['materials'][id(blender_material)]['glTF_material'] = material + + export_settings['current_paths'] = {} + + return material, uvmap_info, vc_info, udim_info + + +def get_active_uvmap_index(blender_mesh): + # retrieve active render UVMap + active_uvmap_idx = 0 + for i in range(len(blender_mesh.uv_layers)): + if blender_mesh.uv_layers[i].active_render is True: + active_uvmap_idx = i + break + return active_uvmap_idx + + +def get_final_material(mesh, blender_material, attr_indices, base_material, uvmap_info, export_settings): + + # First, we need to calculate all index of UVMap + + indices = {} + additional_indices = 0 + + for m, v in uvmap_info.items(): + + if m.startswith("additional") and additional_indices <= int(m[10:]): + additional_indices = +1 + + if 'type' not in v.keys(): + continue + + if v['type'] == 'Fixed': + i = mesh.uv_layers.find(v['value']) + if i >= 0: + indices[m] = i + else: + # Using active index + indices[m] = get_active_uvmap_index(mesh) + elif v['type'] == 'Active': + indices[m] = get_active_uvmap_index(mesh) + elif v['type'] == "Attribute": + indices[m] = attr_indices[v['value']] + + # Now we have all needed indices, let's create a set that can be used for + # caching, so containing all possible textures + all_textures = get_all_textures(additional_indices) + + caching_indices = [] + for tex in all_textures: + caching_indices.append(indices.get(tex, None)) + + caching_indices = [i if i != 0 else None for i in caching_indices] + caching_indices = tuple(caching_indices) + + material = __get_final_material_with_indices(blender_material, base_material, caching_indices, export_settings) + + return material + + +@cached +def caching_material_tex_indices(blender_material, material, caching_indices, export_settings): + return ( + (id(blender_material),), + (caching_indices,) + ) + + +@cached_by_key(key=caching_material_tex_indices) +def __get_final_material_with_indices(blender_material, base_material, caching_indices, export_settings): + + if base_material is None: + return None + + if all([i is None for i in caching_indices]): + return base_material + + material = deepcopy(base_material) + get_new_material_texture_shared(base_material, material) + + for tex, ind in zip(get_all_textures(len(caching_indices) - len(get_all_textures())), caching_indices): + + if ind is None: + continue + + # Need to check if texture is not None, because it can be the case for UDIM on non managed UDIM textures + if tex == "emissiveTexture": + if material.emissive_texture: + material.emissive_texture.tex_coord = ind + elif tex == "normalTexture": + if material.normal_texture: + material.normal_texture.tex_coord = ind + elif tex == "occlusionTexture": + if material.occlusion_texture: + material.occlusion_texture.tex_coord = ind + elif tex == "baseColorTexture": + if material.pbr_metallic_roughness.base_color_texture: + material.pbr_metallic_roughness.base_color_texture.tex_coord = ind + elif tex == "metallicRoughnessTexture": + if material.pbr_metallic_roughness.metallic_roughness_texture: + material.pbr_metallic_roughness.metallic_roughness_texture.tex_coord = ind + elif tex == "clearcoatTexture": + if material.extensions["KHR_materials_clearcoat"].extension['clearcoatTexture']: + material.extensions["KHR_materials_clearcoat"].extension['clearcoatTexture'].tex_coord = ind + elif tex == "clearcoatRoughnessTexture": + if material.extensions["KHR_materials_clearcoat"].extension['clearcoatRoughnessTexture']: + material.extensions["KHR_materials_clearcoat"].extension['clearcoatRoughnessTexture'].tex_coord = ind + elif tex == "clearcoatNormalTexture": + if material.extensions["KHR_materials_clearcoat"].extension['clearcoatNormalTexture']: + material.extensions["KHR_materials_clearcoat"].extension['clearcoatNormalTexture'].tex_coord = ind + elif tex == "transmissionTexture": + if material.extensions["KHR_materials_transmission"].extension['transmissionTexture']: + material.extensions["KHR_materials_transmission"].extension['transmissionTexture'].tex_coord = ind + elif tex == "specularTexture": + if material.extensions["KHR_materials_specular"].extension['specularTexture']: + material.extensions["KHR_materials_specular"].extension['specularTexture'].tex_coord = ind + elif tex == "specularColorTexture": + if material.extensions["KHR_materials_specular"].extension['specularColorTexture']: + material.extensions["KHR_materials_specular"].extension['specularColorTexture'].tex_coord = ind + elif tex == "sheenColorTexture": + if material.extensions["KHR_materials_sheen"].extension['sheenColorTexture']: + material.extensions["KHR_materials_sheen"].extension['sheenColorTexture'].tex_coord = ind + elif tex == "sheenRoughnessTexture": + if material.extensions["KHR_materials_sheen"].extension['sheenRoughnessTexture']: + material.extensions["KHR_materials_sheen"].extension['sheenRoughnessTexture'].tex_coord = ind + elif tex == "thicknessTexture": + if material.extensions["KHR_materials_volume"].extension['thicknessTexture']: + material.extensions["KHR_materials_volume"].extension['thicknessTexture'].tex_ccord = ind + elif tex == "anisotropyTexture": + if material.extensions["KHR_materials_anisotropy"].extension['anisotropyTexture']: + material.extensions["KHR_materials_anisotropy"].extension['anisotropyTexture'].tex_coord = ind + elif tex.startswith("additional"): + export_settings['additional_texture_export'][export_settings['additional_texture_export_current_idx'] + + int(tex[10:])].tex_coord = ind + else: + export_settings['log'].error("some Textures tex coord are not managed") + + export_settings['additional_texture_export_current_idx'] = len(export_settings['additional_texture_export']) + + return material + + +def get_material_from_idx(material_idx, materials, export_settings): + mat = None + if export_settings['gltf_materials'] == "EXPORT" and material_idx is not None: + if materials: + i = material_idx if material_idx < len(materials) else -1 + mat = materials[i] + return mat + + +def get_base_material(material_idx, materials, export_settings): + + export_settings['current_paths'] = {} + + material = None + material_info = { + "uv_info": {}, + "vc_info": { + "color": None, + "alpha": None, + "color_type": None, + "alpha_type": None + }, + "udim_info": {} + } + + mat = get_material_from_idx(material_idx, materials, export_settings) + if mat is not None: + material, material_info = gather_material( + mat, + export_settings + ) + + if material is None: + # If no material, the mesh can still have vertex color + # So, retrieving it + material_info["vc_info"] = {"color_type": "active", "alpha_type": "active"} + + return material, material_info + + +def get_all_textures(idx=0): + # Make sure to have all texture here, always in same order + tab = [] + + tab.append("emissiveTexture") + tab.append("normalTexture") + tab.append("occlusionTexture") + tab.append("baseColorTexture") + tab.append("metallicRoughnessTexture") + tab.append("clearcoatTexture") + tab.append("clearcoatRoughnessTexture") + tab.append("clearcoatNormalTexture") + tab.append("transmissionTexture") + tab.append("specularTexture") + tab.append("specularColorTexture") + tab.append("sheenColorTexture") + tab.append("sheenRoughnessTexture") + tab.append("thicknessTexture") + tab.append("anisotropyTexture") + + for i in range(idx): + tab.append("additional" + str(i)) + + return tab diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_materials_pbr_metallic_roughness.py b/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_materials_pbr_metallic_roughness.py new file mode 100644 index 00000000000..4a59da5130c --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_materials_pbr_metallic_roughness.py @@ -0,0 +1,260 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + + +import bpy +from ....io.com import gltf2_io +from ....io.exp.gltf2_io_user_extensions import export_user_extensions +from ..gltf2_blender_gather_cache import cached +from .gltf2_blender_search_node_tree import get_vertex_color_info +from .gltf2_blender_gather_texture_info import gather_texture_info +from .gltf2_blender_search_node_tree import \ + get_socket_from_gltf_material_node, \ + has_image_node_from_socket, \ + get_const_from_default_value_socket, \ + get_socket, \ + get_factor_from_socket + + +@cached +def gather_material_pbr_metallic_roughness(blender_material, orm_texture, export_settings): + if not __filter_pbr_material(blender_material, export_settings): + return None, {}, {'color': None, 'alpha': None, 'color_type': None, 'alpha_type': None}, {} + + uvmap_infos = {} + udim_infos = {} + + base_color_texture, uvmap_info, udim_info_bc, _ = __gather_base_color_texture(blender_material, export_settings) + uvmap_infos.update(uvmap_info) + udim_infos.update(udim_info_bc) + metallic_roughness_texture, uvmap_info, udim_info_mr, _ = __gather_metallic_roughness_texture( + blender_material, orm_texture, export_settings) + uvmap_infos.update(uvmap_info) + udim_infos.update(udim_info_mr) + + base_color_factor, vc_info = __gather_base_color_factor(blender_material, export_settings) + + material = gltf2_io.MaterialPBRMetallicRoughness( + base_color_factor=base_color_factor, + base_color_texture=base_color_texture, + extensions=__gather_extensions(blender_material, export_settings), + extras=__gather_extras(blender_material, export_settings), + metallic_factor=__gather_metallic_factor(blender_material, export_settings), + metallic_roughness_texture=metallic_roughness_texture, + roughness_factor=__gather_roughness_factor(blender_material, export_settings) + ) + + export_user_extensions( + 'gather_material_pbr_metallic_roughness_hook', + export_settings, + material, + blender_material, + orm_texture) + + return material, uvmap_infos, vc_info, udim_infos + + +def __filter_pbr_material(blender_material, export_settings): + return True + + +def __gather_base_color_factor(blender_material, export_settings): + if not blender_material.use_nodes: + return [*blender_material.diffuse_color[:3], 1.0], {"color": None, + "alpha": None, "color_type": None, "alpha_type": None} + + rgb, alpha = None, None + + path_alpha = None + path = None + alpha_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, "Alpha") + if alpha_socket.socket is not None and isinstance(alpha_socket.socket, bpy.types.NodeSocket): + if export_settings['gltf_image_format'] != "NONE": + alpha, path_alpha = get_factor_from_socket(alpha_socket, kind='VALUE') + else: + alpha, path_alpha = get_const_from_default_value_socket(alpha_socket, kind='VALUE') + + base_color_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, "Base Color") + if base_color_socket.socket is None: + base_color_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, "BaseColor") + if base_color_socket.socket is None: + base_color_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, "BaseColor") + if base_color_socket.socket is None: + base_color_socket = get_socket_from_gltf_material_node( + blender_material.node_tree, blender_material.use_nodes, "BaseColorFactor") + if base_color_socket.socket is not None and isinstance(base_color_socket.socket, bpy.types.NodeSocket): + if export_settings['gltf_image_format'] != "NONE": + rgb, path = get_factor_from_socket(base_color_socket, kind='RGB') + else: + rgb, path = get_const_from_default_value_socket(base_color_socket, kind='RGB') + + # Storing path for KHR_animation_pointer + if path is not None: + path_ = {} + path_['length'] = 3 + path_['path'] = "/materials/XXX/pbrMetallicRoughness/baseColorFactor" + path_['additional_path'] = path_alpha + export_settings['current_paths'][path] = path_ + + # Storing path for KHR_animation_pointer + if path_alpha is not None: + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/pbrMetallicRoughness/baseColorFactor" + path_['additional_path'] = path + export_settings['current_paths'][path_alpha] = path_ + + if rgb is None: + rgb = [1.0, 1.0, 1.0] + if alpha is None: + alpha = 1.0 + + # Need to clamp between 0.0 and 1.0: Blender color can be outside this range + rgb = [max(min(c, 1.0), 0.0) for c in rgb] + + rgba = [*rgb, alpha] + + vc_info = get_vertex_color_info(base_color_socket, alpha_socket, export_settings) + + if rgba == [1, 1, 1, 1]: + return None, vc_info + return rgba, vc_info + + +def __gather_base_color_texture(blender_material, export_settings): + base_color_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, "Base Color") + if base_color_socket.socket is None: + base_color_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, "BaseColor") + if base_color_socket.socket is None: + base_color_socket = get_socket_from_gltf_material_node( + blender_material.node_tree, blender_material.use_nodes, "BaseColor") + + alpha_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, "Alpha") + + # keep sockets that have some texture : color and/or alpha + inputs = tuple( + socket for socket in [base_color_socket, alpha_socket] + if socket.socket is not None and has_image_node_from_socket(socket, export_settings) + ) + if not inputs: + return None, {}, {}, None + + tex, uvmap_info, udim_info, factor = gather_texture_info(inputs[0], inputs, export_settings) + + if len(export_settings['current_texture_transform']) != 0: + for k in export_settings['current_texture_transform'].keys(): + path_ = {} + path_['length'] = export_settings['current_texture_transform'][k]['length'] + path_['path'] = export_settings['current_texture_transform'][k]['path'].replace( + "YYY", "pbrMetallicRoughness/baseColorTexture/extensions") + path_['vector_type'] = export_settings['current_texture_transform'][k]['vector_type'] + export_settings['current_paths'][k] = path_ + + export_settings['current_texture_transform'] = {} + + return tex, { + 'baseColorTexture': uvmap_info}, { + 'baseColorTexture': udim_info} if len( + udim_info.keys()) > 0 else {}, factor + + +def __gather_extensions(blender_material, export_settings): + return None + + +def __gather_extras(blender_material, export_settings): + return None + + +def __gather_metallic_factor(blender_material, export_settings): + if not blender_material.use_nodes: + return blender_material.metallic + + metallic_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, "Metallic") + if metallic_socket.socket is None: + metallic_socket = get_socket_from_gltf_material_node( + blender_material.node_tree, blender_material.use_nodes, "MetallicFactor") + if metallic_socket.socket is not None and isinstance(metallic_socket.socket, bpy.types.NodeSocket): + fac, path = get_factor_from_socket(metallic_socket, kind='VALUE') + + # Storing path for KHR_animation_pointer + if path is not None: + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/pbrMetallicRoughness/metallicFactor" + export_settings['current_paths'][path] = path_ + + return fac if fac != 1 else None + + return None + + +def __gather_metallic_roughness_texture(blender_material, orm_texture, export_settings): + metallic_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, "Metallic") + roughness_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, "Roughness") + + hasMetal = metallic_socket.socket is not None and has_image_node_from_socket(metallic_socket, export_settings) + hasRough = roughness_socket.socket is not None and has_image_node_from_socket(roughness_socket, export_settings) + + # Warning: for default socket, do not use NodeSocket object, because it will break cache + # Using directlty the Blender socket object + if not hasMetal and not hasRough: + metallic_roughness = get_socket_from_gltf_material_node( + blender_material.node_tree, blender_material.use_nodes, "MetallicRoughness") + if metallic_roughness.socket is None or not has_image_node_from_socket(metallic_roughness, export_settings): + return None, {}, {}, None + else: + texture_input = (metallic_roughness, metallic_roughness) + elif not hasMetal: + texture_input = (roughness_socket,) + elif not hasRough: + texture_input = (metallic_socket,) + else: + texture_input = (metallic_socket, roughness_socket) + + tex, uvmap_info, udim_info, factor = gather_texture_info( + + texture_input[0], + orm_texture or texture_input, + export_settings, + ) + + return tex, { + 'metallicRoughnessTexture': uvmap_info}, { + 'metallicRoughnessTexture': udim_info} if len( + udim_info.keys()) > 0 else {}, factor + + +def __gather_roughness_factor(blender_material, export_settings): + if not blender_material.use_nodes: + return blender_material.roughness + + roughness_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, "Roughness") + if roughness_socket is None: + roughness_socket = get_socket_from_gltf_material_node( + blender_material.node_tree, blender_material.use_nodes, "RoughnessFactor") + if roughness_socket.socket is not None and isinstance(roughness_socket.socket, bpy.types.NodeSocket): + fac, path = get_factor_from_socket(roughness_socket, kind='VALUE') + + # Storing path for KHR_animation_pointer + if path is not None: + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/pbrMetallicRoughness/roughnessFactor" + export_settings['current_paths'][path] = path_ + + return fac if fac != 1 else None + return None + + +def get_default_pbr_for_emissive_node(): + return gltf2_io.MaterialPBRMetallicRoughness( + base_color_factor=[0.0, 0.0, 0.0, 1.0], + base_color_texture=None, + extensions=None, + extras=None, + metallic_factor=None, + metallic_roughness_texture=None, + roughness_factor=None + ) diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_materials_unlit.py b/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_materials_unlit.py new file mode 100644 index 00000000000..2a986b9de07 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_materials_unlit.py @@ -0,0 +1,180 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from . import gltf2_blender_gather_texture_info +from .gltf2_blender_search_node_tree import \ + get_socket, \ + NodeSocket, \ + previous_socket, \ + previous_node, \ + get_factor_from_socket + + +def detect_shadeless_material(blender_material_node_tree, use_nodes, export_settings): + """Detect if this material is "shadeless" ie. should be exported + with KHR_materials_unlit. Returns None if not. Otherwise, returns + a dict with info from parsing the node tree. + """ + if not use_nodes: + return None + + # Old Background node detection (unlikely to happen) + bg_socket = get_socket(blender_material_node_tree, use_nodes, "Background") + if bg_socket.socket is not None: + return {'rgb_socket': bg_socket} + + # Look for + # * any color socket, connected to... + # * optionally, the lightpath trick, connected to... + # * optionally, a mix-with-transparent (for alpha), connected to... + # * the output node + + info = {} + + # TODOSNode this can be a function call + for node in blender_material_node_tree.nodes: + if node.type == 'OUTPUT_MATERIAL' and node.is_active_output: + socket = node.inputs[0] + break + else: + return None + + socket = NodeSocket(socket, [blender_material_node_tree]) + + # Be careful not to misidentify a lightpath trick as mix-alpha. + result = __detect_lightpath_trick(socket) + if result is not None: + socket = result['next_socket'] + else: + result = __detect_mix_alpha(socket) + if result is not None: + socket = result['next_socket'] + info['alpha_socket'] = result['alpha_socket'] + + result = __detect_lightpath_trick(socket) + if result is not None: + socket = result['next_socket'] + + # Check if a color socket, or connected to a color socket + if socket.socket.type != 'RGBA': + from_socket = previous_socket(socket) + if from_socket.socket is None: + return None + if from_socket.socket.type != 'RGBA': + return None + + info['rgb_socket'] = socket + return info + + +def __detect_mix_alpha(socket): + # Detects this (used for an alpha hookup) + # + # [ Mix ] + # alpha_socket => [Factor ] => socket + # [Transparent] => [Shader ] + # next_socket => [Shader ] + # + # Returns None if not detected. Otherwise, a dict containing alpha_socket + # and next_socket. + prev = previous_node(socket) + if prev.node is None or prev.node.type != 'MIX_SHADER': + return None + in1 = previous_node(NodeSocket(prev.node.inputs[1], prev.group_path)) + if in1.node is None or in1.node.type != 'BSDF_TRANSPARENT': + return None + return { + 'alpha_socket': NodeSocket(prev.node.inputs[0], prev.group_path), + 'next_socket': NodeSocket(prev.node.inputs[2], prev.group_path), + } + + +def __detect_lightpath_trick(socket): + # Detects this (used to prevent casting light on other objects) See ex. + # https://blender.stackexchange.com/a/21535/88681 + # + # [ Lightpath ] [ Mix ] + # [ Is Camera Ray] => [Factor ] => socket + # (don't care) => [Shader ] + # next_socket => [ Emission ] => [Shader ] + # + # The Emission node can be omitted. + # Returns None if not detected. Otherwise, a dict containing + # next_socket. + prev = previous_node(socket) + if prev.node is None or prev.node.type != 'MIX_SHADER': + return None + in0 = previous_socket(NodeSocket(prev.node.inputs[0], prev.group_path)) + if in0.socket is None or in0.socket.node.type != 'LIGHT_PATH': + return None + if in0.socket.name != 'Is Camera Ray': + return None + next_socket = NodeSocket(prev.node.inputs[2], prev.group_path) + + # Detect emission + prev = previous_node(next_socket) + if prev.node is not None and prev.node.type == 'EMISSION': + next_socket = NodeSocket(prev.node.inputs[0], prev.group_path) + + return {'next_socket': next_socket} + + +def gather_base_color_factor(info, export_settings): + rgb, alpha = None, None + path, path_alpha = None, None + + if 'rgb_socket' in info: + rgb, path = get_factor_from_socket(info['rgb_socket'], kind='RGB') + if 'alpha_socket' in info: + alpha, path_alpha = get_factor_from_socket(info['alpha_socket'], kind='VALUE') + + # Storing path for KHR_animation_pointer + if path is not None: + path_ = {} + path_['length'] = 3 + path_['path'] = "/materials/XXX/pbrMetallicRoughness/baseColorFactor" + path_['additional_path'] = path_alpha + export_settings['current_paths'][path] = path_ + + if rgb is None: + rgb = [1.0, 1.0, 1.0] + if alpha is None: + alpha = 1.0 + + rgba = [*rgb, alpha] + if rgba == [1, 1, 1, 1]: + return None + return rgba + + +def gather_base_color_texture(info, export_settings): + sockets = (info.get('rgb_socket', NodeSocket(None, None)), info.get('alpha_socket', NodeSocket(None, None))) + sockets = tuple(s for s in sockets if s.socket is not None) + if sockets: + # NOTE: separate RGB and Alpha textures will not get combined + # because gather_image determines how to pack images based on the + # names of sockets, and the names are hard-coded to a Principled + # style graph. + unlit_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info( + sockets[0], + sockets, + export_settings, + ) + + if len(export_settings['current_texture_transform']) != 0: + for k in export_settings['current_texture_transform'].keys(): + path_ = {} + path_['length'] = export_settings['current_texture_transform'][k]['length'] + path_['path'] = export_settings['current_texture_transform'][k]['path'].replace( + "YYY", "pbrMetallicRoughness/baseColorTexture/extensions") + path_['vector_type'] = export_settings['current_texture_transform'][k]['vector_type'] + export_settings['current_paths'][k] = path_ + + export_settings['current_texture_transform'] = {} + + return unlit_texture, { + 'baseColorTexture': uvmap_info}, { + 'baseColorTexture': udim_info} if len( + udim_info.keys()) > 0 else {} + return None, {}, {} diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_texture.py b/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_texture.py new file mode 100644 index 00000000000..d8f278335f4 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_texture.py @@ -0,0 +1,244 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import typing +import bpy + +from ....io.exp.gltf2_io_user_extensions import export_user_extensions +from ....io.com.gltf2_io_extensions import Extension +from ....io.exp.gltf2_io_image_data import ImageData +from ....io.exp.gltf2_io_binary_data import BinaryData +from ....io.com import gltf2_io_debug +from ....io.com import gltf2_io +from ..gltf2_blender_gather_sampler import gather_sampler +from ..gltf2_blender_gather_cache import cached +from .gltf2_blender_search_node_tree import get_texture_node_from_socket, NodeSocket +from . import gltf2_blender_gather_image + + +@cached +def gather_texture( + blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket], + use_tile: bool, + export_settings): + """ + Gather texture sampling information and image channels from a blender shader texture attached to a shader socket. + + :param blender_shader_sockets: The sockets of the material which should contribute to the texture + :param export_settings: configuration of the export + :return: a glTF 2.0 texture with sampler and source embedded (will be converted to references by the exporter) + """ + + if not __filter_texture(blender_shader_sockets, export_settings): + return None, None, None + + source, webp_image, image_data, factor, udim_image = __gather_source( + blender_shader_sockets, use_tile, export_settings) + + exts, remove_source = __gather_extensions(blender_shader_sockets, source, webp_image, image_data, export_settings) + + texture = gltf2_io.Texture( + extensions=exts, + extras=__gather_extras(blender_shader_sockets, export_settings), + name=__gather_name(blender_shader_sockets, export_settings), + sampler=__gather_sampler(blender_shader_sockets, export_settings), + source=source if remove_source is False else None + ) + + # although valid, most viewers can't handle missing source properties + # This can have None source for "keep original", when original can't be found + if texture.source is None and remove_source is False: + return None, None, udim_image + + export_user_extensions('gather_texture_hook', export_settings, texture, blender_shader_sockets) + + return texture, factor, udim_image + + +def __filter_texture(blender_shader_sockets, export_settings): + # User doesn't want to export textures + if export_settings['gltf_image_format'] == "NONE": + return None + return True + + +def __gather_extensions(blender_shader_sockets, source, webp_image, image_data, export_settings): + + extensions = {} + + remove_source = False + required = False + + ext_webp = {} + + # If user want to keep original textures, and these textures are WebP, we need to remove source from + # gltf2_io.Texture, and populate extension + if export_settings['gltf_keep_original_textures'] is True \ + and source is not None \ + and source.mime_type == "image/webp": + ext_webp["source"] = source + remove_source = True + required = True + +# If user want to export in WebP format (so without fallback in png/jpg) + if export_settings['gltf_image_format'] == "WEBP": + # We create all image without fallback + ext_webp["source"] = source + remove_source = True + required = True + +# If user doesn't want to export in WebP format, but want WebP too. Texture is not WebP + if export_settings['gltf_image_format'] != "WEBP" \ + and export_settings['gltf_add_webp'] \ + and source is not None \ + and source.mime_type != "image/webp": + # We need here to create some WebP textures + + new_mime_type = "image/webp" + new_data, _ = image_data.encode(new_mime_type, export_settings) + + if export_settings['gltf_format'] == 'GLTF_SEPARATE': + + uri = ImageData( + data=new_data, + mime_type=new_mime_type, + name=source.uri.name + ) + buffer_view = None + name = source.uri.name + + else: + buffer_view = BinaryData(data=new_data) + uri = None + name = source.name + + webp_image = __make_webp_image(buffer_view, None, None, new_mime_type, name, uri, export_settings) + + ext_webp["source"] = webp_image + + +# If user doesn't want to export in WebP format, but want WebP too. Texture is WebP + if export_settings['gltf_image_format'] != "WEBP" \ + and source is not None \ + and source.mime_type == "image/webp": + + # User does not want fallback + if export_settings['gltf_webp_fallback'] is False: + ext_webp["source"] = source + remove_source = True + required = True + +# If user doesn't want to export in webp format, but want WebP too as fallback. Texture is WebP + if export_settings['gltf_image_format'] != "WEBP" \ + and webp_image is not None \ + and export_settings['gltf_webp_fallback'] is True: + # Already managed in __gather_source, we only have to assign + ext_webp["source"] = webp_image + + # Not needed in code, for for documentation: + # remove_source = False + # required = False + + if len(ext_webp) > 0: + extensions["EXT_texture_webp"] = Extension('EXT_texture_webp', ext_webp, required) + return extensions, remove_source + else: + return None, False + + +@cached +def __make_webp_image(buffer_view, extensions, extras, mime_type, name, uri, export_settings): + return gltf2_io.Image( + buffer_view=buffer_view, + extensions=extensions, + extras=extras, + mime_type=mime_type, + name=name, + uri=uri + ) + + +def __gather_extras(blender_shader_sockets, export_settings): + return None + + +def __gather_name(blender_shader_sockets, export_settings): + return None + + +def __gather_sampler(blender_shader_sockets, export_settings): + shader_nodes = [get_texture_node_from_socket(socket, export_settings) for socket in blender_shader_sockets] + if len(shader_nodes) > 1: + export_settings['log'].warning( + "More than one shader node tex image used for a texture. " + "The resulting glTF sampler will behave like the first shader node tex image." + ) + first_valid_shader_node = next(filter(lambda x: x is not None, shader_nodes)) + + # group_path can't be a list, so transform it to str + + sep_item = "##~~gltf-sep~~##" + sep_inside_item = "##~~gltf-inside-sep~~##" + group_path_str = "" + if len(first_valid_shader_node.group_path) > 0: + # Retrieving the blender material using this shader tree + for mat in bpy.data.materials: + if mat.use_nodes is True and id(mat.node_tree) == id(first_valid_shader_node.group_path[0]): + group_path_str += mat.name # TODO if linked, we can have multiple materials with same name... + break + if len(first_valid_shader_node.group_path) > 1: + for idx, i in enumerate(first_valid_shader_node.group_path[1:]): + group_path_str += sep_item + if idx == 0: + group_path_str += first_valid_shader_node.group_path[0].name + else: + group_path_str += i.id_data.name + group_path_str += sep_inside_item + group_path_str += i.name + + return gather_sampler( + first_valid_shader_node.shader_node, + group_path_str, + export_settings) + + +def __gather_source(blender_shader_sockets, use_tile, export_settings): + source, image_data, factor, udim_image = gltf2_blender_gather_image.gather_image( + blender_shader_sockets, use_tile, export_settings) + + if export_settings['gltf_keep_original_textures'] is False \ + and export_settings['gltf_image_format'] != "WEBP" \ + and source is not None \ + and source.mime_type == "image/webp": + + if export_settings['gltf_webp_fallback'] is False: + # Already managed in __gather_extensions + return source, None, image_data, factor, udim_image + else: + # Need to create a PNG texture + + new_mime_type = "image/png" + new_data, _ = image_data.encode(new_mime_type, export_settings) + + if export_settings['gltf_format'] == 'GLTF_SEPARATE': + buffer_view = None + uri = ImageData( + data=new_data, + mime_type=new_mime_type, + name=source.uri.name + ) + name = source.uri.name + + else: + uri = None + buffer_view = BinaryData(data=new_data) + name = source.name + + png_image = __make_webp_image(buffer_view, None, None, new_mime_type, name, uri, export_settings) + + # We inverted the png & WebP image, to have the png as main source + return png_image, source, image_data, factor, udim_image + return source, None, image_data, factor, udim_image + +# Helpers diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_texture_info.py b/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_texture_info.py new file mode 100644 index 00000000000..c8b32ca9487 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_texture_info.py @@ -0,0 +1,287 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import typing +from ....io.com import gltf2_io +from ....io.com.gltf2_io_extensions import Extension +from ....io.exp.gltf2_io_user_extensions import export_user_extensions +from ..gltf2_blender_gather_sampler import detect_manual_uv_wrapping +from ..gltf2_blender_gather_cache import cached +from . import gltf2_blender_gather_texture +from .gltf2_blender_search_node_tree import \ + get_texture_node_from_socket, \ + from_socket, \ + FilterByType, \ + previous_node, \ + get_const_from_socket, \ + NodeSocket, \ + get_texture_transform_from_mapping_node + +# blender_shader_sockets determine the texture and primary_socket determines +# the textransform and UVMap. Ex: when combining an ORM texture, for +# occlusion the primary_socket would be the occlusion socket, and +# blender_shader_sockets would be the (O,R,M) sockets. + + +def gather_texture_info(primary_socket, blender_shader_sockets, export_settings, filter_type='ALL'): + export_settings['current_texture_transform'] = {} # For KHR_animation_pointer + return __gather_texture_info_helper(primary_socket, blender_shader_sockets, 'DEFAULT', filter_type, export_settings) + + +def gather_material_normal_texture_info_class( + primary_socket, + blender_shader_sockets, + export_settings, + filter_type='ALL'): + export_settings['current_texture_transform'] = {} # For KHR_animation_pointer + export_settings['current_normal_scale'] = {} # For KHR_animation_pointer + return __gather_texture_info_helper(primary_socket, blender_shader_sockets, 'NORMAL', filter_type, export_settings) + + +def gather_material_occlusion_texture_info_class( + primary_socket, + blender_shader_sockets, + export_settings, + filter_type='ALL'): + export_settings['current_texture_transform'] = {} # For KHR_animation_pointer + export_settings['current_occlusion_strength'] = {} # For KHR_animation_pointer + return __gather_texture_info_helper( + primary_socket, + blender_shader_sockets, + 'OCCLUSION', + filter_type, + export_settings) + + +@cached +def __gather_texture_info_helper( + primary_socket: bpy.types.NodeSocket, + blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket], + kind: str, + filter_type: str, + export_settings): + if not __filter_texture_info(primary_socket, blender_shader_sockets, filter_type, export_settings): + return None, {}, {}, None + + tex_transform, uvmap_info = __gather_texture_transform_and_tex_coord(primary_socket, export_settings) + + index, factor, udim_image = __gather_index(blender_shader_sockets, None, export_settings) + if udim_image is not None: + udim_info = {'udim': udim_image is not None, 'image': udim_image, 'sockets': blender_shader_sockets} + else: + udim_info = {} + + fields = { + 'extensions': __gather_extensions(tex_transform, export_settings), + 'extras': __gather_extras(blender_shader_sockets, export_settings), + 'index': index, + 'tex_coord': None # This will be set later, as some data are dependant of mesh or object + } + + if kind == 'DEFAULT': + texture_info = gltf2_io.TextureInfo(**fields) + + elif kind == 'NORMAL': + fields['scale'] = __gather_normal_scale(primary_socket, export_settings) + texture_info = gltf2_io.MaterialNormalTextureInfoClass(**fields) + + elif kind == 'OCCLUSION': + fields['strength'] = __gather_occlusion_strength(primary_socket, export_settings) + texture_info = gltf2_io.MaterialOcclusionTextureInfoClass(**fields) + + if texture_info.index is None: + return None, {} if udim_image is None else uvmap_info, udim_info, None + + export_user_extensions('gather_texture_info_hook', export_settings, texture_info, blender_shader_sockets) + + return texture_info, uvmap_info, udim_info, factor + + +def gather_udim_texture_info( + primary_socket: bpy.types.NodeSocket, + blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket], + udim_info, + tex, + export_settings): + + tex_transform, _ = __gather_texture_transform_and_tex_coord(primary_socket, export_settings) + export_settings['current_udim_info'] = udim_info + index, _, _ = __gather_index(blender_shader_sockets, + udim_info['image'].name + str(udim_info['tile']), export_settings) + export_settings['current_udim_info'] = {} + + fields = { + 'extensions': __gather_extensions(tex_transform, export_settings), + 'extras': __gather_extras(blender_shader_sockets, export_settings), + 'index': index, + 'tex_coord': None # This will be set later, as some data are dependant of mesh or object + } + + if tex in ["normalTexture", "clearcoatNormalTexture"]: + fields['scale'] = __gather_normal_scale(primary_socket, export_settings) + texture_info = gltf2_io.MaterialNormalTextureInfoClass(**fields) + elif tex in "occlusionTexture": + fields['strength'] = __gather_occlusion_strength(primary_socket, export_settings) + texture_info = gltf2_io.MaterialOcclusionTextureInfoClass(**fields) + else: + texture_info = gltf2_io.TextureInfo(**fields) + + export_user_extensions('gather_udim_texture_info_hook', export_settings, texture_info, blender_shader_sockets) + + return texture_info + + +def __filter_texture_info(primary_socket, blender_shader_sockets, filter_type, export_settings): + if primary_socket is None: + return False + if get_texture_node_from_socket(primary_socket, export_settings) is None: + return False + if not blender_shader_sockets: + return False + if not all([elem is not None for elem in blender_shader_sockets]): + return False + if filter_type == "ALL": + # Check that all sockets link to texture + if any([get_texture_node_from_socket(socket, export_settings) is None for socket in blender_shader_sockets]): + # sockets do not lead to a texture --> discard + return False + elif filter_type == "ANY": + # Check that at least one socket link to texture + if all([get_texture_node_from_socket(socket, export_settings) is None for socket in blender_shader_sockets]): + return False + elif filter_type == "NONE": + # No check + pass + + return True + + +def __gather_extensions(texture_transform, export_settings): + if texture_transform is None: + return None + extension = Extension("KHR_texture_transform", texture_transform) + return {"KHR_texture_transform": extension} + + +def __gather_extras(blender_shader_sockets, export_settings): + return None + + +# MaterialNormalTextureInfo only +def __gather_normal_scale(primary_socket, export_settings): + result = from_socket( + primary_socket, + FilterByType(bpy.types.ShaderNodeNormalMap)) + if not result: + return None + strengthInput = result[0].shader_node.inputs['Strength'] + normal_scale = None + if not strengthInput.is_linked and strengthInput.default_value != 1: + normal_scale = strengthInput.default_value + + # Storing path for KHR_animation_pointer + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/YYY/scale" + export_settings['current_normal_scale']["node_tree." + strengthInput.path_from_id() + ".default_value"] = path_ + + return normal_scale + + +# MaterialOcclusionTextureInfo only +def __gather_occlusion_strength(primary_socket, export_settings): + # Look for a MixRGB node that mixes with pure white in front of + # primary_socket. The mix factor gives the occlusion strength. + nav = primary_socket.to_node_nav() + nav.move_back() + + reverse = False + strength = None + + if nav.moved and nav.node.type == 'MIX' and nav.node.blend_type == 'MIX': + fac, path = nav.get_constant('Factor') + if fac is not None: + col1, _ = nav.get_constant('#A_Color') + col2, _ = nav.get_constant('#B_Color') + if col1 == [1.0, 1.0, 1.0] and col2 is None: + strength = fac + if col1 is None and col2 == [1.0, 1.0, 1.0]: + strength = 1.0 - fac # reversed for reversed inputs + reverse = True + + # Storing path for KHR_animation_pointer + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/occlusionTexture/strength" + path_['reverse'] = reverse + export_settings['current_occlusion_strength'][path] = path_ + + return strength + + +def __gather_index(blender_shader_sockets, use_tile, export_settings): + # We just put the actual shader into the 'index' member + return gltf2_blender_gather_texture.gather_texture(blender_shader_sockets, use_tile, export_settings) + + +def __gather_texture_transform_and_tex_coord(primary_socket, export_settings): + # We're expecting + # + # [UV Map] => [Mapping] => [UV Wrapping] => [Texture Node] => ... => primary_socket + # + # The [UV Wrapping] is for wrap modes like MIRROR that use nodes, + # [Mapping] is for KHR_texture_transform, and [UV Map] is for texCoord. + result_tex = get_texture_node_from_socket(primary_socket, export_settings) + blender_shader_node = result_tex.shader_node + + blender_shader_node['used'] = True + + # Skip over UV wrapping stuff (it goes in the sampler) + result = detect_manual_uv_wrapping(blender_shader_node, result_tex.group_path) + if result: + node = previous_node(result['next_socket']) + else: + node = previous_node(NodeSocket(blender_shader_node.inputs['Vector'], result_tex.group_path)) + + texture_transform = None + if node.node and node.node.type == 'MAPPING': + texture_transform = get_texture_transform_from_mapping_node(node, export_settings) + node = previous_node(NodeSocket(node.node.inputs['Vector'], node.group_path)) + + uvmap_info = {} + + if node.node and node.node.type == 'UVMAP' and node.node.uv_map: + uvmap_info['type'] = "Fixed" + uvmap_info['value'] = node.node.uv_map + + elif node and node.node and node.node.type == 'ATTRIBUTE' \ + and node.node.attribute_type == "GEOMETRY" \ + and node.node.attribute_name: + uvmap_info['type'] = 'Attribute' + uvmap_info['value'] = node.node.attribute_name + + else: + uvmap_info['type'] = 'Active' + + return texture_transform, uvmap_info + + +def check_same_size_images( + blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket], + export_settings +) -> bool: + """Check that all sockets leads to images of the same size.""" + if not blender_shader_sockets or not all(blender_shader_sockets): + return False + + sizes = set() + for socket in blender_shader_sockets: + tex = get_texture_node_from_socket(socket, export_settings) + if tex is None: + return False + size = tex.shader_node.image.size + sizes.add((size[0], size[1])) + + return len(sizes) == 1 diff --git a/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_search_node_tree.py b/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_search_node_tree.py new file mode 100644 index 00000000000..2a733ad0ef5 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/exp/material/gltf2_blender_search_node_tree.py @@ -0,0 +1,845 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +# +# Imports +# + +import bpy +from mathutils import Vector, Matrix +from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached +from ...com.gltf2_blender_material_helpers import get_gltf_node_name, get_gltf_node_old_name, get_gltf_old_group_node_name +from ....blender.com.gltf2_blender_conversion import texture_transform_blender_to_gltf, inverted_trs_mapping_node +import typing + + +class Filter: + """Base class for all node tree filter operations.""" + + def __init__(self): + pass + + def __call__(self, shader_node): + return True + + +class FilterByName(Filter): + """ + Filter the material node tree by name. + + example usage: + find_from_socket(start_socket, ShaderNodeFilterByName("Normal")) + """ + + def __init__(self, name): + self.name = name + super(FilterByName, self).__init__() + + def __call__(self, shader_node): + return shader_node.name == self.name + + +class FilterByType(Filter): + """Filter the material node tree by type.""" + + def __init__(self, type): + self.type = type + super(FilterByType, self).__init__() + + def __call__(self, shader_node): + return isinstance(shader_node, self.type) + + +class NodeTreeSearchResult: + def __init__(self, + shader_node: bpy.types.Node, + path: typing.List[bpy.types.NodeLink], + group_path: typing.List[bpy.types.Node]): + self.shader_node = shader_node + self.path = path + self.group_path = group_path + + +# TODO: cache these searches +def from_socket(start_socket: NodeTreeSearchResult, + shader_node_filter: typing.Union[Filter, typing.Callable]) -> typing.List[NodeTreeSearchResult]: + """ + Find shader nodes where the filter expression is true. + + :param start_socket: the beginning of the traversal + :param shader_node_filter: should be a function(x: shader_node) -> bool + :return: a list of shader nodes for which filter is true + """ + # hide implementation (especially the search path) + def __search_from_socket(start_socket: bpy.types.NodeSocket, + shader_node_filter: typing.Union[Filter, typing.Callable], + search_path: typing.List[bpy.types.NodeLink], + group_path: typing.List[bpy.types.Node]) -> typing.List[NodeTreeSearchResult]: + results = [] + for link in start_socket.links: + # follow the link to a shader node + linked_node = link.from_node + + if linked_node.type == "GROUP": + group_output_node = [node for node in linked_node.node_tree.nodes if node.type == "GROUP_OUTPUT"][0] + socket = [sock for sock in group_output_node.inputs if sock.name == link.from_socket.name][0] + group_path.append(linked_node) + linked_results = __search_from_socket( + socket, shader_node_filter, search_path + [link], group_path.copy()) + if linked_results: + # add the link to the current path + search_path.append(link) + results += linked_results + continue + + if linked_node.type == "GROUP_INPUT": + socket = [sock for sock in group_path[-1].inputs if sock.name == link.from_socket.name][0] + linked_results = __search_from_socket(socket, shader_node_filter, search_path + [link], group_path[:-1]) + if linked_results: + # add the link to the current path + search_path.append(link) + results += linked_results + continue + + # check if the node matches the filter + if shader_node_filter(linked_node): + results.append(NodeTreeSearchResult(linked_node, search_path + [link], group_path)) + # traverse into inputs of the node + for input_socket in linked_node.inputs: + linked_results = __search_from_socket( + input_socket, shader_node_filter, search_path + [link], group_path.copy()) + if linked_results: + # add the link to the current path + search_path.append(link) + results += linked_results + + return results + + if start_socket.socket is None: + return [] + + # Search if direct node of the socket matches the filter + if shader_node_filter(start_socket.socket.node): + return [NodeTreeSearchResult(start_socket.socket.node, [], start_socket.group_path.copy())] + + return __search_from_socket(start_socket.socket, shader_node_filter, [], start_socket.group_path) + + +@cached +def get_texture_node_from_socket(socket, export_settings): + result = from_socket( + socket, + FilterByType(bpy.types.ShaderNodeTexImage)) + if not result: + return None + if result[0].shader_node.image is None: + return None + return result[0] + + +def has_image_node_from_socket(socket, export_settings): + result = get_texture_node_from_socket(socket, export_settings) + return result is not None + +# return the default value of a socket, even if this socket is linked + + +def get_const_from_default_value_socket(socket, kind): + if kind == 'RGB': + if socket.socket.type != 'RGBA': + return None + return list(socket.socket.default_value)[:3], "node_tree." + socket.socket.path_from_id() + ".default_value" + if kind == 'VALUE': + if socket.socket.type != 'VALUE': + return None + return socket.socket.default_value, "node_tree." + socket.socket.path_from_id() + ".default_value" + return None, None + +# TODOSNode : @cached? If yes, need to use id of node tree, has this is probably not fully hashable +# For now, not caching it. If we encounter performance issue, we will see later + + +def get_material_nodes(node_tree: bpy.types.NodeTree, group_path, type): + """ + For a given tree, recursively return all nodes including node groups. + """ + + nodes = [] + for node in [n for n in node_tree.nodes if isinstance(n, type) and not n.mute]: + nodes.append((node, group_path.copy())) + + # Some weird node groups with missing datablock can have no node_tree, so checking n.node_tree (See #1797) + for node in [n for n in node_tree.nodes if n.type == "GROUP" and n.node_tree is not None and not n.mute and n.node_tree.name != + get_gltf_old_group_node_name()]: # Do not enter the olf glTF node group + new_group_path = group_path.copy() + new_group_path.append(node) + nodes.extend(get_material_nodes(node.node_tree, new_group_path, type)) + + return nodes + + +def get_socket_from_gltf_material_node(blender_material_nodetree, use_nodes: bool, name: str): + """ + For a given material input name, retrieve the corresponding node tree socket in the special glTF node group. + + :param blender_material: a blender material for which to get the socket + :param name: the name of the socket + :return: a blender NodeSocket + """ + gltf_node_group_names = [get_gltf_node_name().lower(), get_gltf_node_old_name().lower()] + if blender_material_nodetree and use_nodes: + nodes = get_material_nodes(blender_material_nodetree, [blender_material_nodetree], bpy.types.ShaderNodeGroup) + # Some weird node groups with missing datablock can have no node_tree, so checking n.node_tree (See #1797) + nodes = [n for n in nodes if n[0].node_tree is not None and any( + [[n[0].node_tree.name.lower().startswith(g) for g in gltf_node_group_names]])] + inputs = sum([[(input, node[1]) for input in node[0].inputs if input.name == name] for node in nodes], []) + if inputs: + return NodeSocket(inputs[0][0], inputs[0][1]) + + return NodeSocket(None, None) + + +class NodeNav: + """Helper for navigating through node trees.""" + + def __init__(self, node, in_socket=None, out_socket=None): + self.node = node # Current node + self.out_socket = out_socket # Socket through which we arrived at this node + self.in_socket = in_socket # Socket through which we will leave this node + self.stack = [] # Stack of (group node, socket) pairs descended through to get here + self.moved = False # Whether the last move_back call moved back or not + + def copy(self): + new = NodeNav(self.node) + new.assign(self) + return new + + def assign(self, other): + self.node = other.node + self.in_socket = other.in_socket + self.out_socket = other.out_socket + self.stack = other.stack.copy() + self.moved = other.moved + + def select_input_socket(self, in_soc): + """Selects an input socket. + + Most operations that operate on the input socket can be passed an in_soc + parameter to select an input socket before running. + """ + if in_soc is None: + # Keep current selected input socket + return + elif isinstance(in_soc, bpy.types.NodeSocket): + assert in_soc.node == self.node + self.in_socket = in_soc + elif isinstance(in_soc, int): + self.in_socket = self.node.inputs[in_soc] + else: + assert isinstance(in_soc, str) + # An identifier like "#A_Color" selects a socket by + # identifier. This is useful for sockets that cannot be + # selected because of non-unique names. + if in_soc.startswith('#'): + ident = in_soc.removeprefix('#') + for socket in self.node.inputs: + if socket.identifier == ident: + self.in_socket = socket + return + # Select by regular name + self.in_socket = self.node.inputs[in_soc] + + def get_out_socket_index(self): + assert self.out_socket + for i, soc in enumerate(self.node.outputs): + if soc == self.out_socket: + return i + assert False + + def descend(self): + """Descend into a group node.""" + if self.node and self.node.type == 'GROUP' and self.node.node_tree and self.out_socket: + i = self.get_out_socket_index() + self.stack.append((self.node, self.out_socket)) + self.node = next(node for node in self.node.node_tree.nodes if node.type == 'GROUP_OUTPUT') + self.in_socket = self.node.inputs[i] + self.out_socket = None + + def ascend(self): + """Ascend from a group input node back to the group node.""" + if self.stack and self.node and self.node.type == 'GROUP_INPUT' and self.out_socket: + i = self.get_out_socket_index() + self.node, self.out_socket = self.stack.pop() + self.in_socket = self.node.inputs[i] + + def move_back(self, in_soc=None): + """Move backwards through an input socket to the next node.""" + self.moved = False + + self.select_input_socket(in_soc) + + if not self.in_socket or not self.in_socket.is_linked: + return + + # Warning, slow! socket.links is O(total number of links)! + link = self.in_socket.links[0] + + self.node = link.from_node + self.out_socket = link.from_socket + self.in_socket = None + self.moved = True + + # Continue moving + if self.node.type == 'REROUTE': + self.move_back(0) + elif self.node.type == 'GROUP': + self.descend() + self.move_back() + elif self.node.type == 'GROUP_INPUT': + self.ascend() + self.move_back() + + def peek_back(self, in_soc=None): + """Peeks backwards through an input socket without modifying self.""" + s = self.copy() + s.select_input_socket(in_soc) + s.move_back() + return s + + def get_constant(self, in_soc=None): + """Gets a constant from an input socket. Returns None if non-constant.""" + self.select_input_socket(in_soc) + + if not self.in_socket: + return None, None + + # Get constant from unlinked socket's default value + if not self.in_socket.is_linked: + if self.in_socket.type == 'RGBA': + color = list(self.in_socket.default_value) + color = color[:3] # drop unused alpha component (assumes shader tree) + return color, "node_tree." + self.in_socket.path_from_id() + ".default_value" + + elif self.in_socket.type == 'SHADER': + # Treat unlinked shader sockets as black + return [0.0, 0.0, 0.0], None + + elif self.in_socket.type == 'VECTOR': + return list(self.in_socket.default_value), None + + elif self.in_socket.type == 'VALUE': + return self.in_socket.default_value, "node_tree." + self.in_socket.path_from_id() + ".default_value" + + else: + return None, None + + # Check for a constant in the next node + nav = self.peek_back() + if nav.moved: + if self.in_socket.type == 'RGBA': + if nav.node.type == 'RGB': + color = list(nav.out_socket.default_value) + color = color[:3] # drop unused alpha component (assumes shader tree) + return color, "node_tree." + nav.out_socket.path_from_id() + ".default_value" + + elif self.in_socket.type == 'VALUE': + if nav.node.type == 'VALUE': + return nav.out_socket.default_value, "node_tree." + nav.out_socket.path_from_id() + ".default_value" + + return None, None + + def get_factor(self, in_soc=None): + """Gets a factor, eg. metallicFactor. Either a constant or constant multiplier.""" + self.select_input_socket(in_soc) + + if not self.in_socket: + return None, None + + # Constant + fac, path = self.get_constant() + if fac is not None: + return fac, path + + # Multiplied by constant + nav = self.peek_back() + if nav.moved: + x1, x2 = None, None + + if self.in_socket.type == 'RGBA': + is_mul = ( + nav.node.type == 'MIX' and + nav.node.data_type == 'RGBA' and + nav.node.blend_type == 'MULTIPLY' + ) + if is_mul: + # TODO: check factor is 1? + x1, path_1 = nav.get_constant('#A_Color') + x2, path_2 = nav.get_constant('#B_Color') + + elif self.in_socket.type == 'VALUE': + if nav.node.type == 'MATH' and nav.node.operation == 'MULTIPLY': + x1, path_1 = nav.get_constant(0) + x2, path_2 = nav.get_constant(1) + + if x1 is not None and x2 is None: + return x1, path_1 + if x2 is not None and x1 is None: + return x2, path_2 + + return None, None + + +class NodeSocket: + def __init__(self, socket, group_path): + self.socket = socket + self.group_path = group_path + + def to_node_nav(self): + assert self.socket + nav = NodeNav( + self.socket.node, + out_socket=self.socket if self.socket.is_output else None, + in_socket=self.socket if not self.socket.is_output else None, + ) + # No output socket information + nav.stack = [(node, None) for node in self.group_path] + return nav + + +class ShNode: + def __init__(self, node, group_path): + self.node = node + self.group_path = group_path + + +def get_node_socket(blender_material_node_tree, type, name): + """ + For a given material input name, retrieve the corresponding node tree socket for a given node type. + + :param blender_material: a blender material for which to get the socket + :return: a blender NodeSocket for a given type + """ + nodes = get_material_nodes(blender_material_node_tree, [blender_material_node_tree], type) + # TODOSNode : Why checking outputs[0] ? What about alpha for texture node, that is outputs[1] ???? + nodes = [node for node in nodes if check_if_is_linked_to_active_output(node[0].outputs[0], node[1])] + inputs = sum([[(input, node[1]) for input in node[0].inputs if input.name == name] for node in nodes], []) + if inputs: + return NodeSocket(inputs[0][0], inputs[0][1]) + return NodeSocket(None, None) + + +def get_socket(blender_material_nodetree, use_nodes: bool, name: str, volume=False): + """ + For a given material input name, retrieve the corresponding node tree socket. + + :param blender_material: a blender material for which to get the socket + :param name: the name of the socket + :return: a blender NodeSocket + """ + if blender_material_nodetree and use_nodes: + #i = [input for input in blender_material.node_tree.inputs] + #o = [output for output in blender_material.node_tree.outputs] + if name == "Emissive": + # Check for a dedicated Emission node first, it must supersede the newer built-in one + # because the newer one is always present in all Principled BSDF materials. + emissive_socket = get_node_socket(blender_material_nodetree, bpy.types.ShaderNodeEmission, "Color") + if emissive_socket.socket is not None: + return emissive_socket + # If a dedicated Emission node was not found, fall back to the Principled BSDF Emission Color socket. + name = "Emission Color" + type = bpy.types.ShaderNodeBsdfPrincipled + elif name == "Background": + type = bpy.types.ShaderNodeBackground + name = "Color" + else: + if volume is False: + type = bpy.types.ShaderNodeBsdfPrincipled + else: + type = bpy.types.ShaderNodeVolumeAbsorption + + return get_node_socket(blender_material_nodetree, type, name) + + return NodeSocket(None, None) + + +# Old, prefer NodeNav.get_factor in new code +def get_factor_from_socket(socket, kind): + return socket.to_node_nav().get_factor() + + +# Old, prefer NodeNav.get_constant in new code +def get_const_from_socket(socket, kind): + return socket.to_node_nav().get_constant() + + +def previous_socket(socket: NodeSocket): + soc = socket.socket + group_path = socket.group_path.copy() + while True: + if not soc.is_linked: + return NodeSocket(None, None) + + from_socket = soc.links[0].from_socket + + # If we are entering a node group (from outputs) + if from_socket.node.type == "GROUP": + socket_name = from_socket.name + sockets = [n for n in from_socket.node.node_tree.nodes if n.type == "GROUP_OUTPUT"][0].inputs + socket = [s for s in sockets if s.name == socket_name][0] + group_path.append(from_socket.node) + soc = socket + continue + + # If we are exiting a node group (from inputs) + if from_socket.node.type == "GROUP_INPUT": + socket_name = from_socket.name + sockets = group_path[-1].inputs + socket = [s for s in sockets if s.name == socket_name][0] + group_path = group_path[:-1] + soc = socket + continue + + # Skip over reroute nodes + if from_socket.node.type == 'REROUTE': + soc = from_socket.node.inputs[0] + continue + + return NodeSocket(from_socket, group_path) + + +def previous_node(socket: NodeSocket): + prev_socket = previous_socket(socket) + if prev_socket.socket is not None: + return ShNode(prev_socket.socket.node, prev_socket.group_path) + return ShNode(None, None) + + +def get_texture_transform_from_mapping_node(mapping_node, export_settings): + if mapping_node.node.vector_type not in ["TEXTURE", "POINT", "VECTOR"]: + export_settings['log'].warning( + "Skipping exporting texture transform because it had type " + + mapping_node.node.vector_type + "; recommend using POINT instead" + ) + return None + + rotation_0, rotation_1 = mapping_node.node.inputs['Rotation'].default_value[0], mapping_node.node.inputs['Rotation'].default_value[1] + if rotation_0 or rotation_1: + # TODO: can we handle this? + export_settings['log'].warning( + "Skipping exporting texture transform because it had non-zero " + "rotations in the X/Y direction; only a Z rotation can be exported!" + ) + return None + + mapping_transform = {} + if mapping_node.node.vector_type != "VECTOR": + mapping_transform["offset"] = [ + mapping_node.node.inputs['Location'].default_value[0], + mapping_node.node.inputs['Location'].default_value[1]] + mapping_transform["rotation"] = mapping_node.node.inputs['Rotation'].default_value[2] + mapping_transform["scale"] = [ + mapping_node.node.inputs['Scale'].default_value[0], + mapping_node.node.inputs['Scale'].default_value[1]] + + if mapping_node.node.vector_type == "TEXTURE": + mapping_transform = inverted_trs_mapping_node(mapping_transform) + if mapping_transform is None: + export_settings['log'].warning( + "Skipping exporting texture transform with type TEXTURE because " + "we couldn't convert it to TRS; recommend using POINT instead" + ) + return None + + elif mapping_node.node.vector_type == "VECTOR": + # Vectors don't get translated + mapping_transform["offset"] = [0, 0] + + texture_transform = texture_transform_blender_to_gltf(mapping_transform) + + if all([component == 0 for component in texture_transform["offset"]]): + del(texture_transform["offset"]) + if all([component == 1 for component in texture_transform["scale"]]): + del(texture_transform["scale"]) + if texture_transform["rotation"] == 0: + del(texture_transform["rotation"]) + + # glTF Offset needs: offset, rotation, scale (note that Offset is not used for Vector mapping) + # glTF Rotation needs: rotation + # glTF Scale needs: scale + + if mapping_node.node.vector_type != "VECTOR": + path_ = {} + path_['length'] = 2 + path_['path'] = "/materials/XXX/YYY/KHR_texture_transform/offset" + path_['vector_type'] = mapping_node.node.vector_type + export_settings['current_texture_transform']["node_tree." + \ + mapping_node.node.inputs['Location'].path_from_id() + ".default_value"] = path_ + + path_ = {} + path_['length'] = 2 + path_['path'] = "/materials/XXX/YYY/KHR_texture_transform/scale" + path_['vector_type'] = mapping_node.node.vector_type + export_settings['current_texture_transform']["node_tree." + \ + mapping_node.node.inputs['Scale'].path_from_id() + ".default_value"] = path_ + + path_ = {} + path_['length'] = 1 + path_['path'] = "/materials/XXX/YYY/KHR_texture_transform/rotation" + path_['vector_type'] = mapping_node.node.vector_type + export_settings['current_texture_transform']["node_tree." + \ + mapping_node.node.inputs['Rotation'].path_from_id() + ".default_value[2]"] = path_ + + return texture_transform + + +def check_if_is_linked_to_active_output(shader_socket, group_path): + for link in shader_socket.links: + + # If we are entering a node group + if link.to_node.type == "GROUP": + socket_name = link.to_socket.name + sockets = [n for n in link.to_node.node_tree.nodes if n.type == "GROUP_INPUT"][0].outputs + socket = [s for s in sockets if s.name == socket_name][0] + group_path.append(link.to_node) + # TODOSNode : Why checking outputs[0] ? What about alpha for texture node, that is outputs[1] ???? + # recursive until find an output material node + ret = check_if_is_linked_to_active_output(socket, group_path) + if ret is True: + return True + continue + + # If we are exiting a node group + if link.to_node.type == "GROUP_OUTPUT": + socket_name = link.to_socket.name + sockets = group_path[-1].outputs + socket = [s for s in sockets if s.name == socket_name][0] + group_path = group_path[:-1] + # TODOSNode : Why checking outputs[0] ? What about alpha for texture node, that is outputs[1] ???? + # recursive until find an output material node + ret = check_if_is_linked_to_active_output(socket, group_path) + if ret is True: + return True + continue + + if isinstance(link.to_node, bpy.types.ShaderNodeOutputMaterial) and link.to_node.is_active_output is True: + return True + + if len(link.to_node.outputs) > 0: # ignore non active output, not having output sockets + # TODOSNode : Why checking outputs[0] ? What about alpha for texture node, that is outputs[1] ???? + ret = check_if_is_linked_to_active_output( + link.to_node.outputs[0], + group_path) # recursive until find an output material node + if ret is True: + return True + + return False + + +def get_vertex_color_info(color_socket, alpha_socket, export_settings): + + attribute_color = None + attribute_alpha = None + attribute_color_type = None + attribute_alpha_type = None + + # Retrieve Attribute used as vertex color for Color + if color_socket is not None and color_socket.socket is not None: + node = previous_node(color_socket) + if node.node is not None: + if node.node.type == 'MIX' and node.node.data_type == "RGBA" and node.node.blend_type == 'MULTIPLY': + use_vc, attribute_color, use_active = get_attribute_name( + NodeSocket(node.node.inputs[6], node.group_path), export_settings) + if use_vc is False: + use_vc, attribute_color, use_active = get_attribute_name( + NodeSocket(node.node.inputs[7], node.group_path), export_settings) + if use_vc is True and use_active is True: + attribute_color_type = "active" + elif use_vc is True and use_active is None and attribute_color is not None: + attribute_color_type = "name" + elif node.node.type in ["ATTRIBUTE", "VERTEX_COLOR"]: + use_vc, attribute_color, use_active = get_attribute_name( + NodeSocket(node.node.outputs[0], node.group_path), export_settings) + if use_vc is True and use_active is True: + attribute_color_type = "active" + elif use_vc is True and use_active is None and attribute_color is not None: + attribute_color_type = "name" + + if alpha_socket is not None and alpha_socket.socket is not None: + node = previous_node(alpha_socket) + if node.node is not None: + if node.node.type == 'MATH' and node.node.operation == 'MULTIPLY': + use_vc, attribute_alpha, use_active = get_attribute_name( + NodeSocket(node.node.inputs[0], node.group_path), export_settings) + if use_vc is False: + use_vc, attribute_alpha, use_active = get_attribute_name( + NodeSocket(node.node.inputs[1], node.group_path), export_settings) + if use_vc is True and use_active is True: + attribute_alpha_type = "active" + elif use_vc is True and use_active is None and attribute_alpha is not None: + attribute_alpha_type = "name" + elif node.node.type in ["ATTRIBUTE", "VERTEX_COLOR"]: + use_vc, attribute_color, use_active = get_attribute_name( + NodeSocket(node.node.outputs[0], node.group_path), export_settings) + if use_vc is True and use_active is True: + attribute_color_type = "active" + elif use_vc is True and use_active is None and attribute_color is not None: + attribute_color_type = "name" + + return { + "color": attribute_color, + "alpha": attribute_alpha, + "color_type": attribute_color_type, + "alpha_type": attribute_alpha_type} + + +def get_attribute_name(socket, export_settings): + node = previous_node(socket) + if node.node is not None and node.node.type == "ATTRIBUTE" \ + and node.node.attribute_type == "GEOMETRY" \ + and node.node.attribute_name is not None \ + and node.node.attribute_name != "": + return True, node.node.attribute_name, None + elif node.node is not None and node.node.type == "ATTRIBUTE" \ + and node.node.attribute_type == "GEOMETRY" \ + and node.node.attribute_name == "": + return True, None, True + + if node.node is not None and node.node.type == "VERTEX_COLOR" \ + and node.node.layer_name is not None \ + and node.node.layer_name != "": + return True, node.node.layer_name, None + elif node.node is not None and node.node.type == "VERTEX_COLOR" \ + and node.node.layer_name == "": + return True, None, True + + return False, None, None + + +def detect_anisotropy_nodes( + anisotropy_socket, + anisotropy_rotation_socket, + anisotropy_tangent_socket, + export_settings): + """ + Detects if the material uses anisotropy and returns the corresponding data. + + :param anisotropy_socket: the anisotropy socket + :param anisotropy_rotation_socket: the anisotropy rotation socket + :param anisotropy_tangent_socket: the anisotropy tangent socket + :param export_settings: the export settings + :return: a tuple (is_anisotropy, anisotropy_data) + """ + + if anisotropy_socket.socket is None: + return False, None + if anisotropy_rotation_socket.socket is None: + return False, None + if anisotropy_tangent_socket.socket is None: + return False, None + + # Check that tangent is linked to a tangent node, with UVMap as input + tangent_node = previous_node(anisotropy_tangent_socket) + if tangent_node.node is None or tangent_node.node.type != "TANGENT": + return False, None + if tangent_node.node.direction_type != "UV_MAP": + return False, None + + # Check that anisotropy is linked to a multiply node + if not anisotropy_socket.socket.is_linked: + return False, None + if not anisotropy_rotation_socket.socket.is_linked: + return False, None + if not anisotropy_tangent_socket.socket.is_linked: + return False, None + anisotropy_multiply_node = anisotropy_socket.socket.links[0].from_node + if anisotropy_multiply_node is None or anisotropy_multiply_node.type != "MATH": + return False, None + if anisotropy_multiply_node.operation != "MULTIPLY": + return False, None + # this multiply node should have the first input linked to separate XYZ, on Z + if not anisotropy_multiply_node.inputs[0].is_linked: + return False, None + separate_xyz_node = anisotropy_multiply_node.inputs[0].links[0].from_node + if separate_xyz_node is None or separate_xyz_node.type != "SEPXYZ": + return False, None + separate_xyz_z_socket = anisotropy_multiply_node.inputs[0].links[0].from_socket + if separate_xyz_z_socket.name != "Z": + return False, None + # This separate XYZ node output should be linked to ArcTan2 node (X on inputs[1], Y on inputs[0]) + if not separate_xyz_node.outputs[0].is_linked: + return False, None + arctan2_node = separate_xyz_node.outputs[0].links[0].to_node + if arctan2_node.type != "MATH": + return False, None + if arctan2_node.operation != "ARCTAN2": + return False, None + if arctan2_node.inputs[0].links[0].from_socket.name != "Y": + return False, None + if arctan2_node.inputs[1].links[0].from_socket.name != "X": + return False, None + # This arctan2 node output should be linked to anisotropy rotation (Math add node) + if not arctan2_node.outputs[0].is_linked: + return False, None + anisotropy_rotation_node = arctan2_node.outputs[0].links[0].to_node + if anisotropy_rotation_node.type != "MATH": + return False, None + if anisotropy_rotation_node.operation != "ADD": + return False, None + # This anisotropy rotation node should have the output linked to rotation conversion node + if not anisotropy_rotation_node.outputs[0].is_linked: + return False, None + rotation_conversion_node = anisotropy_rotation_node.outputs[0].links[0].to_node + if rotation_conversion_node.type != "MATH": + return False, None + if rotation_conversion_node.operation != "DIVIDE": + return False, None + # This rotation conversion node should have the second input value PI + if abs(rotation_conversion_node.inputs[1].default_value - 6.283185) > 0.0001: + return False, None + # This rotation conversion node should have the output linked to anisotropy rotation socket of Principled BSDF + if not rotation_conversion_node.outputs[0].is_linked: + return False, None + if rotation_conversion_node.outputs[0].links[0].to_socket.name != "Anisotropic Rotation": + return False, None + if rotation_conversion_node.outputs[0].links[0].to_node.type != "BSDF_PRINCIPLED": + return False, None + + # Separate XYZ node should have the input linked to anisotropy multiply Add node (for normalization) + if not separate_xyz_node.inputs[0].is_linked: + return False, None + anisotropy_multiply_add_node = separate_xyz_node.inputs[0].links[0].from_node + if anisotropy_multiply_add_node.type != "VECT_MATH": + return False, None + if anisotropy_multiply_add_node.operation != "MULTIPLY_ADD": + return False, None + if list(anisotropy_multiply_add_node.inputs[1].default_value) != [2.0, 2.0, 1.0]: + return False, None + if list(anisotropy_multiply_add_node.inputs[2].default_value) != [-1.0, -1.0, 0.0]: + return False, None + if not anisotropy_multiply_add_node.inputs[0].is_linked: + return False, None + # This anisotropy multiply Add node should have the first input linked to a texture node + anisotropy_texture_node = anisotropy_multiply_add_node.inputs[0].links[0].from_node + if anisotropy_texture_node.type != "TEX_IMAGE": + return False, None + + tex_ok = has_image_node_from_socket( + NodeSocket( + anisotropy_multiply_add_node.inputs[0], + anisotropy_socket.group_path), + export_settings) + if tex_ok is False: + return False, None + + strength, path_strength = get_const_from_socket(NodeSocket( + anisotropy_multiply_node.inputs[1], anisotropy_socket.group_path), 'VALUE') + rotation, path_rotation = get_const_from_socket(NodeSocket( + anisotropy_rotation_node.inputs[1], anisotropy_socket.group_path), 'VALUE') + + return True, { + 'anisotropyStrength': (strength, path_strength), + 'anisotropyRotation': (rotation, path_rotation), + 'tangent': tangent_node.node.uv_map, + 'tex_socket': NodeSocket(anisotropy_multiply_add_node.inputs[0], anisotropy_socket.group_path), + } diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_KHR_materials_anisotropy.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_KHR_materials_anisotropy.py new file mode 100644 index 00000000000..cfe7de7b4db --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_KHR_materials_anisotropy.py @@ -0,0 +1,116 @@ +# SPDX-FileCopyrightText: 2018-2023 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from ...io.com.gltf2_io import TextureInfo +from .gltf2_blender_texture import texture +from ..com.gltf2_blender_conversion import get_anisotropy_rotation_gltf_to_blender +from math import pi +from mathutils import Vector + + +def anisotropy( + mh, + location, + anisotropy_socket, + anisotropy_rotation_socket, + anisotropy_tangent_socket +): + + if anisotropy_socket is None or anisotropy_rotation_socket is None or anisotropy_tangent_socket is None: + return + + x, y = location + try: + ext = mh.pymat.extensions['KHR_materials_anisotropy'] + # Needed for KHR_animation_pointer + mh.pymat.extensions['KHR_materials_anisotropy']['blender_nodetree'] = mh.node_tree + mh.pymat.extensions['KHR_materials_anisotropy']['blender_mat'] = mh.mat # Needed for KHR_animation_pointer + except Exception: + return + + anisotropy_strength = ext.get('anisotropyStrength', 0) + anisotropy_rotation = ext.get('anisotropyRotation', 0) + tex_info = ext.get('anisotropyTexture') + if tex_info is not None: + tex_info = TextureInfo.from_dict(tex_info) + + # We are going to use UVMap of Normal map if it exists, as input for the anisotropy tangent + + if tex_info is None: + anisotropy_socket.default_value = anisotropy_strength + anisotropy_rotation_socket.default_value = get_anisotropy_rotation_gltf_to_blender(anisotropy_rotation) + return + + # Tangent node + node = mh.node_tree.nodes.new('ShaderNodeTangent') + node.direction_type = "UV_MAP" + node.location = x - 180, y - 200 + uv_idx = tex_info.tex_coord or 0 + + # Get the UVMap of the normal map if available (if not, keeping the first UVMap available, uv_idx = 0) + tex_info_normal = mh.pymat.normal_texture + if tex_info_normal is not None: + try: + uv_idx = tex_info.extensions['KHR_texture_transform']['texCoord'] + except Exception: + pass + + node.uv_map = 'UVMap' if uv_idx == 0 else 'UVMap.%03d' % uv_idx + mh.node_tree.links.new(anisotropy_tangent_socket, node.outputs['Tangent']) + + # Multiply node + multiply_node = mh.node_tree.nodes.new('ShaderNodeMath') + multiply_node.label = 'Anisotropy strength' + multiply_node.operation = 'MULTIPLY' + multiply_node.location = x - 180, y + 200 + mh.node_tree.links.new(anisotropy_socket, multiply_node.outputs[0]) + multiply_node.inputs[1].default_value = anisotropy_strength + + # Divide node + divide_node = mh.node_tree.nodes.new('ShaderNodeMath') + divide_node.label = 'Rotation conversion' + divide_node.operation = 'DIVIDE' + divide_node.location = x - 180, y + mh.node_tree.links.new(anisotropy_rotation_socket, divide_node.outputs[0]) + divide_node.inputs[1].default_value = 2 * pi + + # Rotation node + rotation_node = mh.node_tree.nodes.new('ShaderNodeMath') + rotation_node.label = 'Anisotropy rotation' + rotation_node.operation = 'ADD' + rotation_node.location = x - 180 * 2, y + mh.node_tree.links.new(divide_node.inputs[0], rotation_node.outputs[0]) + rotation_node.inputs[1].default_value = anisotropy_rotation + + # ArcTan node + arctan_node = mh.node_tree.nodes.new('ShaderNodeMath') + arctan_node.label = 'ArcTan2' + arctan_node.operation = 'ARCTAN2' + arctan_node.location = x - 180 * 3, y + mh.node_tree.links.new(rotation_node.inputs[0], arctan_node.outputs[0]) + + # Separate XYZ + sep_node = mh.node_tree.nodes.new('ShaderNodeSeparateXYZ') + sep_node.location = x - 180 * 4, y + mh.node_tree.links.new(arctan_node.inputs[0], sep_node.outputs[1]) + mh.node_tree.links.new(arctan_node.inputs[1], sep_node.outputs[0]) + mh.node_tree.links.new(multiply_node.inputs[0], sep_node.outputs[2]) + + # Multiply add node + multiply_add_node = mh.node_tree.nodes.new('ShaderNodeVectorMath') + multiply_add_node.location = x - 180 * 5, y + multiply_add_node.operation = 'MULTIPLY_ADD' + multiply_add_node.inputs[1].default_value = Vector((2, 2, 1)) + multiply_add_node.inputs[2].default_value = Vector((-1, -1, 0)) + mh.node_tree.links.new(sep_node.inputs[0], multiply_add_node.outputs[0]) + + # Texture + texture( + mh, + tex_info=tex_info, + label='ANISOTROPY', + location=(x - 180 * 6, y), + is_data=True, + color_socket=multiply_add_node.inputs[0] + ) diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_KHR_materials_pbrSpecularGlossiness.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_KHR_materials_pbrSpecularGlossiness.py new file mode 100755 index 00000000000..6391b93fb0e --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_KHR_materials_pbrSpecularGlossiness.py @@ -0,0 +1,203 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from ...io.com.gltf2_io import TextureInfo +from .gltf2_blender_pbrMetallicRoughness import \ + base_color, emission, normal, occlusion, make_settings_node +from .gltf2_blender_material_utils import color_factor_and_texture +from .gltf2_blender_texture import texture, get_source +from .gltf2_blender_image import BlenderImage +import numpy as np + + +def pbr_specular_glossiness(mh): + """Creates node tree for pbrSpecularGlossiness materials.""" + ext = mh.get_ext('KHR_materials_pbrSpecularGlossiness', {}) + + pbr_node = mh.nodes.new('ShaderNodeBsdfPrincipled') + out_node = mh.nodes.new('ShaderNodeOutputMaterial') + pbr_node.location = 10, 300 + out_node.location = 300, 300 + mh.links.new(pbr_node.outputs[0], out_node.inputs[0]) + + locs = calc_locations(mh, ext) + + base_color( + mh, + is_diffuse=True, + location=locs['diffuse'], + color_socket=pbr_node.inputs['Base Color'], + alpha_socket=pbr_node.inputs['Alpha'] if not mh.is_opaque() else None, + ) + + emission( + mh, + location=locs['emission'], + color_socket=pbr_node.inputs['Emission Color'], + strength_socket=pbr_node.inputs['Emission Strength'], + ) + + normal( + mh, + location=locs['normal'], + normal_socket=pbr_node.inputs['Normal'], + ) + + if mh.pymat.occlusion_texture is not None: + if mh.settings_node is None: + mh.settings_node = make_settings_node(mh) + mh.settings_node.location = 10, 425 + mh.settings_node.width = 240 + occlusion( + mh, + location=locs['occlusion'], + occlusion_socket=mh.settings_node.inputs['Occlusion'], + ) + + # The F0 color is the specular tint modulated by + # ((1-IOR)/(1+IOR))^2. Setting IOR=1000 makes this factor + # approximately 1. + pbr_node.inputs['IOR'].default_value = 1000 + + # Specular + color_factor_and_texture( + mh, + location=locs['specular'], + label='Specular Color', + socket=pbr_node.inputs['Specular Tint'], + factor=ext.get('specularFactor', [1, 1, 1]), + tex_info=ext.get('specularGlossinessTexture'), + ) + + # Glossiness + glossiness( + mh, + ext, + location=locs['glossiness'], + roughness_socket=pbr_node.inputs['Roughness'], + ) + + +def glossiness(mh, ext, location, roughness_socket): + # Glossiness = glossinessFactor * specularGlossinessTexture.alpha + # Roughness = 1 - Glossiness + + factor = ext.get('glossinessFactor', 1) + tex_info = ext.get('specularGlossinessTexture') + if tex_info is not None: + tex_info = TextureInfo.from_dict(tex_info) + + # Simple case: no texture + if tex_info is None or factor == 0: + roughness_socket.default_value = 1 - factor + return + + # Bake an image with the roughness. The reason we don't do + # 1-X with a node is that won't export. + roughness_img = make_roughness_image(mh, factor, tex_info) + if roughness_img is None: + return + + texture( + mh, + tex_info, + location=location, + label='ROUGHNESS', + color_socket=None, + alpha_socket=roughness_socket, + is_data=False, + forced_image=roughness_img, + ) + + +def make_roughness_image(mh, glossiness_factor, tex_info): + """ + Bakes the roughness (1-glossiness) into an image. The + roughness is in the alpha channel. + """ + pytexture = mh.gltf.data.textures[tex_info.index] + source = get_source(mh, pytexture) + + if source is None: + return None + + pyimg = mh.gltf.data.images[source] + BlenderImage.create(mh.gltf, source) + + # See if cached roughness texture already exists + if hasattr(pyimg, 'blender_roughness_image_name'): + return bpy.data.images[pyimg.blender_roughness_image_name] + + orig_image = bpy.data.images[pyimg.blender_image_name] + # TODO: check for placeholder image and bail + + # Make a copy of the specularGlossiness texture + # Avoids interfering if it's used elsewhere + image = orig_image.copy() + + w, h = image.size + pixels = np.empty(w * h * 4, dtype=np.float32) + image.pixels.foreach_get(pixels) + pixels = pixels.reshape((w, h, 4)) + + # Glossiness = GlossinessFactor * Texture.alpha + # Roughness = 1 - Glossiness + if glossiness_factor != 1: + pixels[:, :, 3] *= glossiness_factor + pixels[:, :, 3] *= -1 + pixels[:, :, 3] += 1 + + pixels = pixels.reshape(w * h * 4) + image.pixels.foreach_set(pixels) + + image.pack() + + # Cache for reuse + pyimg.blender_roughness_image_name = image.name + + return image + + +def calc_locations(mh, ext): + """Calculate locations to place each bit of the node graph at.""" + # Lay the blocks out top-to-bottom, aligned on the right + x = -200 + y = 0 + height = 460 # height of each block + locs = {} + + locs['occlusion'] = (x, y) + if mh.pymat.occlusion_texture is not None: + y -= height + + locs['diffuse'] = (x, y) + if 'diffuseTexture' in ext or mh.vertex_color: + y -= height + + locs['glossiness'] = (x, y) + gloss_factor = ext.get('glossinessFactor', 1) + if 'specularGlossinessTexture' in ext and gloss_factor != 0: + y -= height + + locs['normal'] = (x, y) + if mh.pymat.normal_texture is not None: + y -= height + + locs['specular'] = (x, y) + if 'specularGlossinessTexture' in ext: + y -= height + + locs['emission'] = (x, y) + if mh.pymat.emissive_texture is not None: + y -= height + + # Center things + total_height = -y + y_offset = total_height / 2 - 20 + for key in locs: + x, y = locs[key] + locs[key] = (x, y + y_offset) + + return locs diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_KHR_materials_unlit.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_KHR_materials_unlit.py new file mode 100644 index 00000000000..baa6cd0902d --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_KHR_materials_unlit.py @@ -0,0 +1,56 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from .gltf2_blender_pbrMetallicRoughness import base_color + + +def unlit(mh): + """Creates node tree for unlit materials.""" + # Emission node for the base color + emission_node = mh.nodes.new('ShaderNodeEmission') + emission_node.location = 10, 126 + + # Create a "Lightpath trick": makes Emission visible only to + # camera rays, so it won't "glow" in Cycles. + # + # [Is Camera Ray] => [Mix] => + # [Transparent] => [ ] + # [Emission] => [ ] + lightpath_node = mh.nodes.new('ShaderNodeLightPath') + transparent_node = mh.nodes.new('ShaderNodeBsdfTransparent') + mix_node = mh.nodes.new('ShaderNodeMixShader') + lightpath_node.location = 10, 600 + transparent_node.location = 10, 240 + mix_node.location = 260, 320 + mh.links.new(mix_node.inputs['Fac'], lightpath_node.outputs['Is Camera Ray']) + mh.links.new(mix_node.inputs[1], transparent_node.outputs[0]) + mh.links.new(mix_node.inputs[2], emission_node.outputs[0]) + + # Material output + alpha_socket = None + out_node = mh.nodes.new('ShaderNodeOutputMaterial') + if mh.is_opaque(): + out_node.location = 490, 290 + mh.links.new(out_node.inputs[0], mix_node.outputs[0]) + else: + # Create a "Mix with Transparent" setup so there's a + # place to put Alpha. + # + # Alpha => [Mix] => [Output] + # [Transparent] => [ ] + # Color => [ ] + mix2_node = mh.nodes.new('ShaderNodeMixShader') + alpha_socket = mix2_node.inputs['Fac'] + mix2_node.location = 490, -50 + out_node.location = 700, -70 + mh.links.new(mix2_node.inputs[1], transparent_node.outputs[0]) + mh.links.new(mix2_node.inputs[2], mix_node.outputs[0]) + mh.links.new(out_node.inputs[0], mix2_node.outputs[0]) + + base_color( + mh, + location=(-200, 380), + color_socket=emission_node.inputs['Color'], + alpha_socket=alpha_socket, + ) diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation.py new file mode 100755 index 00000000000..7d2437c6c3a --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation.py @@ -0,0 +1,161 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from ...io.imp.gltf2_io_user_extensions import import_user_extensions +from .gltf2_blender_animation_node import BlenderNodeAnim +from .gltf2_blender_animation_weight import BlenderWeightAnim +from .gltf2_blender_animation_pointer import BlenderPointerAnim +from .gltf2_blender_animation_utils import simulate_stash, restore_animation_on_object +from .gltf2_blender_vnode import VNode + + +class BlenderAnimation(): + """Dispatch Animation to node or morph weights animation, or via KHR_animation_pointer""" + def __new__(cls, *args, **kwargs): + raise RuntimeError("%s should not be instantiated" % cls) + + @staticmethod + def anim(gltf, anim_idx): + """Create actions/tracks for one animation.""" + # Caches the action for each object (keyed by object name) + gltf.action_cache = {} + # Things we need to stash when we're done. + gltf.needs_stash = [] + + import_user_extensions('gather_import_animation_before_hook', gltf, anim_idx) + + for vnode_id in gltf.vnodes: + if isinstance(vnode_id, int): + BlenderNodeAnim.anim(gltf, anim_idx, vnode_id) + BlenderWeightAnim.anim(gltf, anim_idx, vnode_id) + + if gltf.data.extensions_used is not None and "KHR_animation_pointer" in gltf.data.extensions_used: + for cam_idx, cam in enumerate(gltf.data.cameras if gltf.data.cameras else []): + if len(cam.animations) == 0: + continue + BlenderPointerAnim.anim(gltf, anim_idx, cam, cam_idx, 'CAMERA') + + if gltf.data.extensions is not None and "KHR_lights_punctual" in gltf.data.extensions: + for light_idx, light in enumerate(gltf.data.extensions["KHR_lights_punctual"]["lights"]): + if len(light["animations"]) == 0: + continue + BlenderPointerAnim.anim(gltf, anim_idx, light, light_idx, 'LIGHT') + + for mat_idx, mat in enumerate(gltf.data.materials if gltf.data.materials else []): + if len(mat.animations) != 0: + BlenderPointerAnim.anim(gltf, anim_idx, mat, mat_idx, 'MATERIAL') + if mat.normal_texture is not None and len(mat.normal_texture.animations) != 0: + BlenderPointerAnim.anim(gltf, anim_idx, mat.normal_texture, mat_idx, 'MATERIAL_PBR', name=mat.name) + if mat.occlusion_texture is not None and len(mat.occlusion_texture.animations) != 0: + BlenderPointerAnim.anim(gltf, anim_idx, mat.occlusion_texture, + mat_idx, 'MATERIAL_PBR', name=mat.name) + if mat.pbr_metallic_roughness is not None and len(mat.pbr_metallic_roughness.animations) != 0: + # This can be a regulat PBR or unlit material + is_unlit = mat.extensions is not None and "KHR_materials_unlit" in mat.extensions + BlenderPointerAnim.anim(gltf, anim_idx, mat.pbr_metallic_roughness, mat_idx, + 'MATERIAL_PBR', name=mat.name, is_unlit=is_unlit) + + texs = [ + mat.emissive_texture, + mat.normal_texture, + mat.occlusion_texture, + mat.pbr_metallic_roughness.base_color_texture if mat.pbr_metallic_roughness is not None else None, + mat.pbr_metallic_roughness.metallic_roughness_texture if mat.pbr_metallic_roughness is not None else None, + ] + + for tex in [t for t in texs if t is not None]: + if tex.extensions is not None and "KHR_texture_transform" in tex.extensions: + # This can be a regulat PBR or unlit material + is_unlit = mat.extensions is not None and "KHR_materials_unlit" in mat.extensions + BlenderPointerAnim.anim( + gltf, + anim_idx, + tex.extensions["KHR_texture_transform"], + mat_idx, + 'TEX_TRANSFORM', + name=mat.name, + is_unlit=is_unlit) + + if mat.extensions is not None: + texs = [ + mat.extensions["KHR_materials_volume"].get("thicknessTexture") if "KHR_materials_volume" in mat.extensions else None, + mat.extensions["KHR_materials_transmission"].get("transmissionTexture") if "KHR_materials_transmission" in mat.extensions else None, + mat.extensions["KHR_materials_specular"].get("specularTexture") if "KHR_materials_specular" in mat.extensions else None, + mat.extensions["KHR_materials_specular"].get("specularColorTexture") if "KHR_materials_specular" in mat.extensions else None, + mat.extensions["KHR_materials_sheen"].get("sheenColorTexture") if "KHR_materials_sheen" in mat.extensions else None, + mat.extensions["KHR_materials_sheen"].get("sheenRoughnessTexture") if "KHR_materials_sheen" in mat.extensions else None, + mat.extensions["KHR_materials_clearcoat"].get("clearcoatTexture") if "KHR_materials_clearcoat" in mat.extensions else None, + mat.extensions["KHR_materials_clearcoat"].get("clearcoatRoughnessTexture") if "KHR_materials_clearcoat" in mat.extensions else None, + mat.extensions["KHR_materials_clearcoat"].get("clearcoatNormalTexture") if "KHR_materials_clearcoat" in mat.extensions else None, + mat.extensions["KHR_materials_anisotropy"].get("anisotropyTexture") if "KHR_materials_anisotropy" in mat.extensions else None, + ] + + for tex in [t for t in texs if t is not None]: + if 'extensions' in tex and "KHR_texture_transform" in tex['extensions']: + BlenderPointerAnim.anim( + gltf, + anim_idx, + tex['extensions']["KHR_texture_transform"], + mat_idx, + 'TEX_TRANSFORM', + name=mat.name) + + for ext in [ + "KHR_materials_emissive_strength", + # "KHR_materials_iridescence", + "KHR_materials_volume", + "KHR_materials_ior", + "KHR_materials_transmission", + "KHR_materials_clearcoat", + "KHR_materials_sheen", + "KHR_materials_specular", + "KHR_materials_anisotropy" + ]: + if mat.extensions is not None and ext in mat.extensions: + BlenderPointerAnim.anim(gltf, anim_idx, mat.extensions[ext], mat_idx, 'EXT', name=mat.name) + + # Push all actions onto NLA tracks with this animation's name + track_name = gltf.data.animations[anim_idx].track_name + for (obj, action) in gltf.needs_stash: + simulate_stash(obj, track_name, action) + + import_user_extensions('gather_import_animation_after_hook', gltf, anim_idx, track_name) + + if hasattr(bpy.data.scenes[0], 'gltf2_animation_tracks') is False: + return + + if track_name not in [track.name for track in bpy.data.scenes[0].gltf2_animation_tracks]: + new_ = bpy.data.scenes[0].gltf2_animation_tracks.add() + new_.name = track_name + # reverse order, as animation are created in reverse order (because of NLA adding tracks are reverted) + bpy.data.scenes[0].gltf2_animation_tracks.move(len(bpy.data.scenes[0].gltf2_animation_tracks) - 1, 0) + + @staticmethod + def restore_animation(gltf, animation_name): + """Restores the actions for an animation by its track name.""" + for vnode_id in gltf.vnodes: + vnode = gltf.vnodes[vnode_id] + if vnode.type == VNode.Bone: + obj = gltf.vnodes[vnode.bone_arma].blender_object + elif vnode.type == VNode.Object: + obj = vnode.blender_object + else: + continue + + restore_animation_on_object(obj, animation_name) + if obj.data and hasattr(obj.data, 'shape_keys'): + restore_animation_on_object(obj.data.shape_keys, animation_name) + + if gltf.data.extensions_used is not None and "KHR_animation_pointer" in gltf.data.extensions_used: + for cam in gltf.data.cameras if gltf.data.cameras else []: + restore_animation_on_object(cam.blender_object_data, animation_name) + + if gltf.data.extensions and "KHR_lights_punctual" in gltf.data.extensions: + for light in gltf.data.extensions['KHR_lights_punctual']['lights']: + restore_animation_on_object(light['blender_object_data'], animation_name) + + for mat in gltf.data.materials if gltf.data.materials else []: + restore_animation_on_object(mat.blender_nodetree, animation_name) + restore_animation_on_object(mat.blender_mat, animation_name) diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation_node.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation_node.py new file mode 100755 index 00000000000..2623fb8dfce --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation_node.py @@ -0,0 +1,189 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from mathutils import Vector + +from ...io.imp.gltf2_io_user_extensions import import_user_extensions +from ...io.imp.gltf2_io_binary import BinaryData +from .gltf2_blender_animation_utils import make_fcurve +from .gltf2_blender_vnode import VNode + + +class BlenderNodeAnim(): + """Blender Object Animation.""" + def __new__(cls, *args, **kwargs): + raise RuntimeError("%s should not be instantiated" % cls) + + @staticmethod + def anim(gltf, anim_idx, node_idx): + """Manage animation targeting a node's TRS.""" + animation = gltf.data.animations[anim_idx] + node = gltf.data.nodes[node_idx] + + if anim_idx not in node.animations.keys(): + return + + for channel_idx in node.animations[anim_idx]: + channel = animation.channels[channel_idx] + if channel.target.path not in ['translation', 'rotation', 'scale', 'pointer']: + continue + + if channel.target.path == "pointer" and channel.target.extensions is None: + continue + + if channel.target.path == "pointer" and ( + "KHR_animation_pointer" not in channel.target.extensions or "pointer" not in channel.target.extensions["KHR_animation_pointer"]): + continue + + if channel.target.path == "pointer": + pointer_tab = channel.target.extensions["KHR_animation_pointer"]["pointer"].split("/") + if not( + len(pointer_tab) >= 4 and pointer_tab[1] == "nodes" and pointer_tab[3] in [ + "translation", + "rotation", + "scale"]): + continue + + BlenderNodeAnim.do_channel(gltf, anim_idx, node_idx, channel) + + @staticmethod + def do_channel(gltf, anim_idx, node_idx, channel): + animation = gltf.data.animations[anim_idx] + vnode = gltf.vnodes[node_idx] + path = channel.target.path + + if path == "pointer": + path = channel.target.extensions["KHR_animation_pointer"]["pointer"].split("/")[3] + + import_user_extensions('gather_import_animation_channel_before_hook', gltf, animation, vnode, path, channel) + + action = BlenderNodeAnim.get_or_create_action(gltf, node_idx, animation.track_name) + + keys = BinaryData.get_data_from_accessor(gltf, animation.samplers[channel.sampler].input) + values = BinaryData.get_data_from_accessor(gltf, animation.samplers[channel.sampler].output) + + if animation.samplers[channel.sampler].interpolation == "CUBICSPLINE": + # TODO manage tangent? + values = values[1::3] + + # Convert the curve from glTF to Blender. + + if path == "translation": + blender_path = "location" + group_name = "Location" + num_components = 3 + values = [gltf.loc_gltf_to_blender(vals) for vals in values] + values = vnode.base_locs_to_final_locs(values) + + elif path == "rotation": + blender_path = "rotation_quaternion" + group_name = "Rotation" + num_components = 4 + values = [gltf.quaternion_gltf_to_blender(vals) for vals in values] + values = vnode.base_rots_to_final_rots(values) + + elif path == "scale": + blender_path = "scale" + group_name = "Scale" + num_components = 3 + values = [gltf.scale_gltf_to_blender(vals) for vals in values] + values = vnode.base_scales_to_final_scales(values) + + # Objects parented to a bone are translated to the bone tip by default. + # Correct for this by translating backwards from the tip to the root. + if vnode.type == VNode.Object and path == "translation": + if vnode.parent is not None and gltf.vnodes[vnode.parent].type == VNode.Bone: + bone_length = gltf.vnodes[vnode.parent].bone_length + off = Vector((0, -bone_length, 0)) + values = [vals + off for vals in values] + + if vnode.type == VNode.Bone: + # Need to animate the pose bone when the node is a bone. + group_name = vnode.blender_bone_name + blender_path = 'pose.bones["%s"].%s' % ( + bpy.utils.escape_identifier(vnode.blender_bone_name), + blender_path + ) + + # We have the final TRS of the bone in values. We need to give + # the TRS of the pose bone though, which is relative to the edit + # bone. + # + # Final = EditBone * PoseBone + # where + # Final = Trans[ft] Rot[fr] Scale[fs] + # EditBone = Trans[et] Rot[er] + # PoseBone = Trans[pt] Rot[pr] Scale[ps] + # + # Solving for PoseBone gives + # + # pt = Rot[er^{-1}] (ft - et) + # pr = er^{-1} fr + # ps = fs + + if path == 'translation': + edit_trans, edit_rot = vnode.editbone_trans, vnode.editbone_rot + edit_rot_inv = edit_rot.conjugated() + values = [ + edit_rot_inv @ (trans - edit_trans) + for trans in values + ] + + elif path == 'rotation': + edit_rot = vnode.editbone_rot + edit_rot_inv = edit_rot.conjugated() + values = [ + edit_rot_inv @ rot + for rot in values + ] + + elif path == 'scale': + pass # no change needed + + # To ensure rotations always take the shortest path, we flip + # adjacent antipodal quaternions. + if path == 'rotation': + for i in range(1, len(values)): + if values[i].dot(values[i - 1]) < 0: + values[i] = -values[i] + + fps = (bpy.context.scene.render.fps * bpy.context.scene.render.fps_base) + + coords = [0] * (2 * len(keys)) + coords[::2] = (key[0] * fps for key in keys) + + for i in range(0, num_components): + coords[1::2] = (vals[i] for vals in values) + make_fcurve( + action, + coords, + data_path=blender_path, + index=i, + group_name=group_name, + interpolation=animation.samplers[channel.sampler].interpolation, + ) + + import_user_extensions('gather_import_animation_channel_after_hook', + gltf, animation, vnode, path, channel, action) + + @staticmethod + def get_or_create_action(gltf, node_idx, anim_name): + vnode = gltf.vnodes[node_idx] + + if vnode.type == VNode.Bone: + # For bones, the action goes on the armature. + vnode = gltf.vnodes[vnode.bone_arma] + + obj = vnode.blender_object + + action = gltf.action_cache.get(obj.name) + if not action: + name = anim_name + "_" + obj.name + action = bpy.data.actions.new(name) + action.id_root = 'OBJECT' + gltf.needs_stash.append((obj, action)) + gltf.action_cache[obj.name] = action + + return action diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation_pointer.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation_pointer.py new file mode 100644 index 00000000000..5d6efb8c1ec --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation_pointer.py @@ -0,0 +1,714 @@ +# SPDX-FileCopyrightText: 2018-2023 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from ...io.imp.gltf2_io_user_extensions import import_user_extensions +from ...io.imp.gltf2_io_binary import BinaryData +from ..exp.material.gltf2_blender_search_node_tree import NodeSocket, previous_node, from_socket, get_socket, FilterByType, get_socket_from_gltf_material_node, get_texture_node_from_socket # TODO move to COM +from ..exp.gltf2_blender_gather_sampler import detect_manual_uv_wrapping # TODO move to COM +from ..exp.material.gltf2_blender_gather_materials_unlit import detect_shadeless_material # TODO move to COM +from ..com.gltf2_blender_conversion import texture_transform_gltf_to_blender +from .gltf2_blender_animation_utils import make_fcurve +from .gltf2_blender_light import BlenderLight +from .gltf2_blender_camera import BlenderCamera + + +class BlenderPointerAnim(): + """Blender Pointer Animation.""" + def __new__(cls, *args, **kwargs): + raise RuntimeError("%s should not be instantiated" % cls) + + @staticmethod + def anim(gltf, anim_idx, asset, asset_idx, asset_type, name=None, is_unlit=False): + animation = gltf.data.animations[anim_idx] + + if asset_type in ["LIGHT", "TEX_TRANSFORM", "EXT"]: + if anim_idx not in asset['animations'].keys(): + return + tab = asset['animations'] + else: + if anim_idx not in asset.animations.keys(): + return + tab = asset.animations + + for channel_idx in tab[anim_idx]: + channel = animation.channels[channel_idx] + BlenderPointerAnim.do_channel(gltf, anim_idx, channel, asset, asset_idx, + asset_type, name=name, is_unlit=is_unlit) + + @staticmethod + def do_channel(gltf, anim_idx, channel, asset, asset_idx, asset_type, name=None, is_unlit=False): + animation = gltf.data.animations[anim_idx] + pointer_tab = channel.target.extensions["KHR_animation_pointer"]["pointer"].split("/") + + import_user_extensions('gather_import_animation_pointer_channel_before_hook', gltf, animation, channel) + + # For some asset_type, we need to check what is the real id_root + if asset_type == "MATERIAL": + if len(pointer_tab) == 4 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "alphaCutoff": + id_root = "MATERIAL" + else: + id_root = "NODETREE" + elif asset_type == "MATERIAL_PBR": + id_root = "NODETREE" + else: + id_root = asset_type + + action = BlenderPointerAnim.get_or_create_action( + gltf, asset, asset_idx, animation.track_name, id_root, name=name) + + keys = BinaryData.get_data_from_accessor(gltf, animation.samplers[channel.sampler].input) + values = BinaryData.get_data_from_accessor(gltf, animation.samplers[channel.sampler].output) + + if animation.samplers[channel.sampler].interpolation == "CUBICSPLINE": + # TODO manage tangent? + values = values[1::3] + + # Convert the curve from glTF to Blender. + blender_path = None + num_components = None + group_name = '' + # Camera + if len(pointer_tab) == 5 and pointer_tab[1] == "cameras" and \ + pointer_tab[3] in ["perspective"] and \ + pointer_tab[4] in ["znear", "zfar"]: # Aspect Ratio is not something we can animate in Blender + blender_path = { + "znear": "clip_start", + "zfar": "clip_end" + }.get(pointer_tab[4]) + num_components = 1 + + if len(pointer_tab) == 5 and pointer_tab[1] == "cameras" and \ + pointer_tab[3] in ["perspective"] and \ + pointer_tab[4] == "yfov": + + blender_path = "lens" + num_components = 1 + + old_values = values.copy() + sensor = asset.blender_object_data.sensor_height + for idx, i in enumerate(old_values): + values[idx] = [BlenderCamera.calc_lens_from_fov(gltf, i[0], sensor)] + + if len(pointer_tab) == 5 and pointer_tab[1] == "cameras" and \ + pointer_tab[3] in ["orthographic"] and \ + pointer_tab[4] in ["ymag", "xmag"]: + + if len(asset.multiple_channels_mag) != 0: + + # We need to calculate the value, based on ymag and xmag + if "xmag" in asset.multiple_channels_mag.keys(): + xmag_animation = gltf.data.animations[asset.multiple_channels_mag['xmag'][0]] + xmag_channel = xmag_animation.channels[asset.multiple_channels_mag['xmag'][1]] + xmag_keys = BinaryData.get_data_from_accessor( + gltf, xmag_animation.samplers[xmag_channel.sampler].input) + xmag_values = BinaryData.get_data_from_accessor( + gltf, xmag_animation.samplers[xmag_channel.sampler].output) + else: + xmag_keys == keys.copy() + xmag_values = [asset.orthographic.xmag] * len(keys) + + if "ymag" in asset.multiple_channels_mag.keys(): + ymag_animation = gltf.data.animations[asset.multiple_channels_mag['ymag'][0]] + ymag_channel = ymag_animation.channels[asset.multiple_channels_mag['ymag'][1]] + ymag_keys = BinaryData.get_data_from_accessor( + gltf, ymag_animation.samplers[ymag_channel.sampler].input) + ymag_values = BinaryData.get_data_from_accessor( + gltf, ymag_animation.samplers[ymag_channel.sampler].output) + else: + ymag_keys == keys.copy() + ymag_values = [asset.orthographic.ymag] * len(keys) + + # We will manage it only if keys are the same... TODO ? + if xmag_keys == ymag_keys: + + blender_path = "ortho_scale" + num_components = 1 + + old_values = values.copy() + for idx, i in enumerate(old_values): + values[idx] = max(xmag_values[idx], ymag_values[idx]) * 2 + + # Delete values, as we don't need to add keyframes again for ortho_scale + # (xmag + ymag channels => only 1 ortho_scale channel in blender) + asset.multiple_channels_mag = {} + + # Light + if len(pointer_tab) == 6 and pointer_tab[1] == "extensions" and \ + pointer_tab[2] == "KHR_lights_punctual" and \ + pointer_tab[3] == "lights" and \ + pointer_tab[5] in ["intensity", "color", "range"]: + + blender_path = { + "color": "color", + "intensity": "energy" + }.get(pointer_tab[5]) + group_name = 'Color' + num_components = 3 if blender_path == "color" else 1 + + # TODO perf, using numpy + if blender_path == "energy": + old_values = values.copy() + for idx, i in enumerate(old_values): + if asset['type'] in ["SPOT", "POINT"]: + values[idx] = [BlenderLight.calc_energy_pointlike(gltf, i[0])] + else: + values[idx] = [BlenderLight.calc_energy_directional(gltf, i[0])] + + # TODO range, not implemented (even not in static import) + + if len(pointer_tab) == 6 and pointer_tab[1] == "extensions" and \ + pointer_tab[2] == "KHR_lights_punctual" and \ + pointer_tab[3] == "lights" and \ + pointer_tab[5] in ["spot.outerConeAngle", "spot.innerConeAngle"]: + + if pointer_tab[5] == "spot.outerConeAngle": + blender_path = "spot_size" + num_components = 1 + + old_values = values.copy() + for idx, i in enumerate(old_values): + values[idx] = [values[idx][0] * 2] + + if pointer_tab[5] == "spot.innerConeAngle": + if "spot.outerConeAngle" in asset["multiple_channels"].keys(): + outer_animation = gltf.data.animations[asset['multiple_channels']['spot.outerConeAngle'][0]] + outer_channel = outer_animation.channels[asset['multiple_channels']['spot.outerConeAngle'][1]] + outer_keys = BinaryData.get_data_from_accessor( + gltf, outer_animation.samplers[outer_channel.sampler].input) + outer_values = BinaryData.get_data_from_accessor( + gltf, outer_animation.samplers[outer_channel.sampler].output) + else: + outer_keys = keys.copy() + outer_values = [[asset['spot']['outerConeAngle']]] * len(keys) + + # We will manage it only if keys are the same... TODO ? + if keys == outer_keys: + old_values = values.copy() + for idx, i in enumerate(old_values): + values[idx] = [BlenderLight.calc_spot_cone_inner(gltf, outer_values[idx][0], values[idx][0])] + blender_path = "spot_blend" + num_components = 1 + + # Materials + if len(pointer_tab) == 4 and pointer_tab[1] == "materials" and \ + pointer_tab[3] in ["emissiveFactor", "alphaCutoff"]: + + if pointer_tab[3] == "emissiveFactor": + emissive_socket = get_socket(asset.blender_nodetree, True, "Emissive") + if emissive_socket.socket.is_linked: + # We need to find the correct node value to animate (An Emissive Factor node) + mix_node = emissive_socket.socket.links[0].from_node + if mix_node.type == "MIX": + blender_path = mix_node.inputs[7].path_from_id() + ".default_value" + num_components = 3 + else: + print("Error, something is wrong, we didn't detect adding a Mix Node because of Pointers") + else: + blender_path = emissive_socket.socket.path_from_id() + ".default_value" + num_components = 3 + elif pointer_tab[3] == "alphaCutoff": + blender_path = "alpha_threshold" + num_components = 1 + + if len(pointer_tab) == 5 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "normalTexture" and \ + pointer_tab[4] == "scale": + + normal_socket = get_socket(asset.blender_nodetree, True, "Normal") + if normal_socket.socket.is_linked: + normal_node = normal_socket.socket.links[0].from_node + if normal_node.type == "NORMAL_MAP": + blender_path = normal_node.inputs[0].path_from_id() + ".default_value" + num_components = 1 + + if len(pointer_tab) == 5 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "occlusionTexture" and \ + pointer_tab[4] == "strength": + + occlusion_socket = get_socket(asset.blender_nodetree, True, "Occlusion") + if occlusion_socket.socket is None: + occlusion_socket = get_socket_from_gltf_material_node(asset.blender_mat.node_tree, True, "Occlusion") + if occlusion_socket.socket.is_linked: + mix_node = occlusion_socket.socket.links[0].from_node + if mix_node.type == "MIX": + blender_path = mix_node.inputs[0].path_from_id() + ".default_value" + num_components = 1 + else: + print("Error, something is wrong, we didn't detect adding a Mix Node because of Pointers") + else: + blender_path = occlusion_socket.socket.path_from_id() + ".default_value" + num_components = 1 + + if len(pointer_tab) == 5 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "pbrMetallicRoughness" and \ + pointer_tab[4] in ["baseColorFactor", "roughnessFactor", "metallicFactor"]: + + if pointer_tab[4] == "baseColorFactor": + + # This can be regular PBR, or unlit + if is_unlit is False: + + base_color_socket = get_socket(asset.blender_nodetree, True, "Base Color") + if base_color_socket.socket.is_linked: + # We need to find the correct node value to animate (An Mix Factor node) + mix_node = base_color_socket.links[0].from_node + if mix_node.type == "MIX": + blender_path = mix_node.inputs[7].path_from_id() + ".default_value" + num_components = 3 # Do not use alpha here, will be managed later + else: + print("Error, something is wrong, we didn't detect adding a Mix Node because of Pointers") + else: + blender_path = base_color_socket.socket.path_from_id() + ".default_value" + num_components = 3 # Do not use alpha here, will be managed later + + else: + unlit_info = detect_shadeless_material(asset.blender_nodetree, True, {}) + if 'rgb_socket' in unlit_info: + socket = unlit_info['rgb_socket'] + blender_path = socket.socket.path_from_id() + ".default_value" + num_components = 3 + else: + socket = NodeSocket(None, None) + + if pointer_tab[4] == "roughnessFactor": + roughness_socket = get_socket(asset.blender_nodetree, True, "Roughness") + if roughness_socket.socket.is_linked: + # We need to find the correct node value to animate (An Mix Factor node) + mix_node = roughness_socket.links[0].from_node + if mix_node.type == "MATH": + blender_path = mix_node.inputs[1].path_from_id() + ".default_value" + num_components = 1 + else: + print("Error, something is wrong, we didn't detect adding a Mix Node because of Pointers") + else: + blender_path = roughness_socket.socket.path_from_id() + ".default_value" + num_components = 1 + + if pointer_tab[4] == "metallicFactor": + metallic_socket = get_socket(asset.blender_nodetree, True, "Metallic") + if metallic_socket.socket.is_linked: + # We need to find the correct node value to animate (An Mix Factor node) + mix_node = metallic_socket.links[0].from_node + if mix_node.type == "MATH": + blender_path = mix_node.inputs[1].path_from_id() + ".default_value" + num_components = 1 + else: + print("Error, something is wrong, we didn't detect adding a Mix Node because of Pointers") + else: + blender_path = metallic_socket.socket.path_from_id() + ".default_value" + num_components = 1 + + if len(pointer_tab) >= 7 and pointer_tab[1] == "materials" and \ + pointer_tab[-3] == "extensions" and \ + pointer_tab[-2] == "KHR_texture_transform" and \ + pointer_tab[-1] in ["scale", "offset", "rotation"]: + + socket = None + if pointer_tab[-4] == "baseColorTexture": + # This can be regular PBR, or unlit + if is_unlit is False: + socket = get_socket(asset['blender_nodetree'], True, "Base Color") + else: + unlit_info = detect_shadeless_material(asset['blender_nodetree'], True, {}) + if 'rgb_socket' in unlit_info: + socket = unlit_info['rgb_socket'] + else: + socket = NodeSocket(None, None) + elif pointer_tab[-4] == "emissiveTexture": + socket = get_socket(asset.blender_nodetree, True, "Emission Color") + elif pointer_tab[-4] == "normalTexture": + socket = get_socket(asset.blender_nodetree, True, "Normal") + elif pointer_tab[-4] == "occlusionTexture": + socket = get_socket(asset.blender_nodetree, True, "Occlusion") + if socket is None: + socket = get_socket_from_gltf_material_node(asset.blender_nodetree, True, "Occlusion") + elif pointer_tab[-4] == "metallicRoughnessTexture": + socket = get_socket(asset.blender_nodetree, True, "Roughness") + elif pointer_tab[-4] == "specularTexture": + socket = get_socket(asset['blender_nodetree'], True, "Specular IOR Level") + elif pointer_tab[-4] == "specularColorTexture": + socket = get_socket(asset['blender_nodetree'], True, "Specular Tint") + elif pointer_tab[-4] == "sheenColorTexture": + socket = get_socket(asset['blender_nodetree'], True, "Sheen Tint") + elif pointer_tab[-4] == "sheenRoughnessTexture": + socket = get_socket(asset['blender_nodetree'], True, "Sheen Roughness") + elif pointer_tab[-4] == "clearcoatTexture": + socket = get_socket(asset['blender_nodetree'], True, "Coat Weight") + elif pointer_tab[-4] == "clearcoatRoughnessTexture": + socket = get_socket(asset['blender_nodetree'], True, "Coat Roughness") + elif pointer_tab[-4] == "clearcoatNormalTexture": + socket = get_socket(asset['blender_nodetree'], True, "Coat Normal") + elif pointer_tab[-4] == "thicknessTexture": + socket = get_socket_from_gltf_material_node(asset['blender_nodetree'], True, "Thickness") + elif pointer_tab[-4] == "transmissionTexture": + socket = get_socket(asset['blender_nodetree'], True, "Transmission Weight") + else: + print("Some Texture are not managed for KHR_animation_pointer / KHR_texture_transform") + + tex = get_texture_node_from_socket(socket, {}) if socket.socket is not None else None + tex_node = tex.shader_node if tex is not None else None + if tex_node is not None: + result = detect_manual_uv_wrapping(tex_node, tex.group_path) + if result: + mapping_node = previous_node(result['next_socket']) + else: + mapping_node = previous_node(NodeSocket(tex_node.inputs['Vector'], tex.group_path)) + else: + mapping_node = None + + if mapping_node is not None: + if pointer_tab[-1] == "offset": + blender_path = mapping_node.node.inputs[1].path_from_id() + ".default_value" + num_components = 2 + elif pointer_tab[-1] == "rotation": + blender_path = mapping_node.node.inputs[2].path_from_id() + ".default_value" + num_components = 2 + elif pointer_tab[-1] == "scale": + blender_path = mapping_node.node.inputs[3].path_from_id() + ".default_value" + num_components = 2 + + if pointer_tab[-1] == "rotation": + pass # No conversion needed + elif pointer_tab[-1] == "scale": + pass # No conversion needed + elif pointer_tab[-1] == "offset": + # This need scale and rotation + if 'rotation' in asset['multiple_channels'].keys(): + animation_rotation = gltf.data.animations[asset['multiple_channels']['rotation'][0]] + channel_rotation = animation_rotation.channels[asset['multiple_channels']['rotation'][1]] + keys_rotation = BinaryData.get_data_from_accessor( + gltf, animation_rotation.samplers[channel_rotation.sampler].input) + values_rotation = BinaryData.get_data_from_accessor( + gltf, animation_rotation.samplers[channel_rotation.sampler].output) + else: + keys_rotation = keys.copy() + values_rotation = [asset.get('rotation', 0.0)] * len(keys) + + if 'scale' in asset['multiple_channels'].keys(): + animation_scale = gltf.data.animations[asset['multiple_channels']['scale'][0]] + channel_scale = animation_scale.channels[asset['multiple_channels']['scale'][1]] + keys_scale = BinaryData.get_data_from_accessor( + gltf, animation_scale.samplers[channel_scale.sampler].input) + values_scale = BinaryData.get_data_from_accessor( + gltf, animation_scale.samplers[channel_scale.sampler].output) + else: + keys_scale = keys.copy() + values_scale = [asset.get('scale', [1.0, 1.0])] * len(keys) + + # We will manage it only if keys are the same... TODO ? + if keys == keys_rotation == keys_scale: + old_values = values.copy() + for idx, i in enumerate(old_values): + values[idx] = texture_transform_gltf_to_blender( + {'rotation': values_rotation[idx], 'scale': values_scale[idx], 'offset': i}).get('offset') + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_emissive_strength" and \ + pointer_tab[5] == "emissiveStrength": + + socket = get_socket(asset['blender_nodetree'], True, "Emission Strength") + blender_path = socket.socket.path_from_id() + ".default_value" + num_components = 1 + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_volume" and \ + pointer_tab[5] in ["thicknessFactor", "attenuationDistance", "attenuationColor"]: + + if pointer_tab[5] == "thicknessFactor": + thicknesss_socket = get_socket_from_gltf_material_node(asset['blender_nodetree'], True, 'Thickness') + if thicknesss_socket.socket.is_linked: + mix_node = thicknesss_socket.socket.links[0].from_node + if mix_node.type == "MATH": + blender_path = mix_node.inputs[1].path_from_id() + ".default_value" + num_components = 1 + else: + print("Error, something is wrong, we didn't detect adding a Mix Node because of Pointers") + else: + blender_path = thicknesss_socket.socket.path_from_id() + ".default_value" + num_components = 1 + + if pointer_tab[5] == "attenuationDistance": + density_socket = get_socket(asset['blender_nodetree'], True, 'Density', volume=True) + blender_path = density_socket.socket.path_from_id() + ".default_value" + num_components = 1 + + old_values = values.copy() + for idx, i in enumerate(old_values): + values[idx] = [1.0 / old_values[idx][0]] + + if pointer_tab[5] == "attenuationColor": + attenuation_color_socket = get_socket(asset['blender_nodetree'], True, 'Color', volume=True) + blender_path = attenuation_color_socket.socket.path_from_id() + ".default_value" + num_components = 3 + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_ior" and \ + pointer_tab[5] == "ior": + + ior_socket = get_socket(asset['blender_nodetree'], True, 'IOR') + blender_path = ior_socket.socket.path_from_id() + ".default_value" + num_components = 1 + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_transmission" and \ + pointer_tab[5] == "transmissionFactor": + + transmission_socket = get_socket(asset['blender_nodetree'], True, 'Transmission Weight') + if transmission_socket.socket.is_linked: + mix_node = transmission_socket.links[0].from_node + if mix_node.type == "MATH": + blender_path = mix_node.inputs[1].path_from_id() + ".default_value" + num_components = 1 + else: + print("Error, something is wrong, we didn't detect adding a Mix Node because of Pointers") + else: + blender_path = transmission_socket.socket.path_from_id() + ".default_value" + num_components = 1 + + if len(pointer_tab) == 7 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_clearcoat" and \ + pointer_tab[5] == "clearcoatNormalTexture" and \ + pointer_tab[6] == "scale": + result = from_socket( + get_socket(asset['blender_nodetree'], True, 'Coat Normal'), + FilterByType(bpy.types.ShaderNodeNormalMap)) + if result: + blender_path = result[0].shader_node.inputs['Strength'].path_from_id() + ".default_value" + num_components = 1 + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_clearcoat" and \ + pointer_tab[5] == "clearcoatFactor": + clearcoat_socket = get_socket(asset['blender_nodetree'], True, 'Coat Weight') + if clearcoat_socket.socket.is_linked: + mix_node = clearcoat_socket.socket.links[0].from_node + if mix_node.type == "MATH": + blender_path = mix_node.inputs[1].path_from_id() + ".default_value" + num_components = 1 + else: + print("Error, something is wrong, we didn't detect adding a Mix Node because of Pointers") + else: + blender_path = clearcoat_socket.path_from_id() + ".default_value" + num_components = 1 + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_clearcoat" and \ + pointer_tab[5] == "clearcoatRoughnessFactor": + clearcoat_roughness_socket = get_socket(asset['blender_nodetree'], True, 'Coat Roughness') + if clearcoat_roughness_socket.socket.is_linked: + mix_node = clearcoat_roughness_socket.socket.links[0].from_node + if mix_node.type == "MATH": + blender_path = mix_node.inputs[1].path_from_id() + ".default_value" + num_components = 1 + else: + print("Error, something is wrong, we didn't detect adding a Mix Node because of Pointers") + else: + blender_path = clearcoat_roughness_socket.path_from_id() + ".default_value" + num_components = 1 + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_sheen" and \ + pointer_tab[5] == "sheenColorFactor": + sheen_color_socket = get_socket(asset['blender_nodetree'], True, 'Sheen Tint') + if sheen_color_socket.socket.is_linked: + mix_node = sheen_color_socket.socket.links[0].from_node + if mix_node.type == "MIX": + blender_path = mix_node.inputs[7].path_from_id() + ".default_value" + num_components = 3 + else: + print("Error, something is wrong, we didn't detect adding a Mix Node because of Pointers") + else: + blender_path = sheen_color_socket.path_from_id() + ".default_value" + num_components = 3 + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_sheen" and \ + pointer_tab[5] == "sheenRoughnessFactor": + sheen_roughness_socket = get_socket(asset['blender_nodetree'], True, 'Sheen Roughness') + if sheen_roughness_socket.socket.is_linked: + mix_node = sheen_roughness_socket.socket.links[0].from_node + if mix_node.type == "MATH": + blender_path = mix_node.inputs[1].path_from_id() + ".default_value" + num_components = 1 + else: + print("Error, something is wrong, we didn't detect adding a Mix Node because of Pointers") + else: + blender_path = sheen_roughness_socket.socket.path_from_id() + ".default_value" + num_components = 1 + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_specular" and \ + pointer_tab[5] == "specularFactor": + specular_socket = get_socket(asset['blender_nodetree'], True, 'Specular IOR Level') + if specular_socket.socket.is_linked: + mix_node = specular_socket.socket.links[0].from_node + if mix_node.type == "MATH": + blender_path = mix_node.inputs[1].path_from_id() + ".default_value" + num_components = 1 + else: + print("Error, something is wrong, we didn't detect adding a Mix Node because of Pointers") + else: + blender_path = specular_socket.socket.path_from_id() + ".default_value" + num_components = 1 + + old_values = values.copy() + for idx, i in enumerate(old_values): + values[idx] = [i[0] / 2.0] + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_specular" and \ + pointer_tab[5] == "specularColorFactor": + specular_color_socket = get_socket(asset['blender_nodetree'], True, 'Specular Tint') + if specular_color_socket.socket.is_linked: + mix_node = specular_color_socket.socket.links[0].from_node + if mix_node.type == "MIX": + blender_path = mix_node.inputs[7].path_from_id() + ".default_value" + num_components = 3 + else: + print("Error, something is wrong, we didn't detect adding a Mix Node because of Pointers") + else: + blender_path = specular_color_socket.socket.path_from_id() + ".default_value" + num_components = 3 + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_anisotropy" and \ + pointer_tab[5] == "anisotropyStrength": + anisotropy_socket = get_socket(asset['blender_nodetree'], True, 'Anisotropic') + if anisotropy_socket.socket.is_linked: + mix_node = anisotropy_socket.socket.links[0].from_node + if mix_node.type == "MATH": + blender_path = mix_node.inputs[1].path_from_id() + ".default_value" + num_components = 1 + else: + print("Error, something is wrong, we didn't detect adding a Mix Node because of Pointers") + else: + blender_path = anisotropy_socket.socket.path_from_id() + ".default_value" + num_components = 1 + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_anisotropy" and \ + pointer_tab[5] == "anisotropyRotation": + anisotropy_rotation_socket = get_socket(asset['blender_nodetree'], True, 'Anisotropic Rotation') + if anisotropy_rotation_socket.socket.is_linked: + mix_node = anisotropy_rotation_socket.socket.links[0].from_node + if mix_node.type == "MATH": + blender_path = mix_node.inputs[1].path_from_id() + ".default_value" + num_components = 1 + else: + print("Error, something is wrong, we didn't detect adding a Mix Node because of Pointers") + else: + blender_path = anisotropy_rotation_socket.socket.path_from_id() + ".default_value" + num_components = 1 + + if blender_path is None: + return # Should not happen if all specification is managed + + fps = bpy.context.scene.render.fps + + coords = [0] * (2 * len(keys)) + coords[::2] = (key[0] * fps for key in keys) + + for i in range(0, num_components): + coords[1::2] = (vals[i] for vals in values) + make_fcurve( + action, + coords, + data_path=blender_path, + index=i, + group_name=group_name, + interpolation=animation.samplers[channel.sampler].interpolation, + ) + + # For baseColorFactor, we also need to add keyframes to alpha socket + if len(pointer_tab) == 5 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "pbrMetallicRoughness" and \ + pointer_tab[4] == "baseColorFactor": + + if is_unlit is False: + alpha_socket = get_socket(asset.blender_nodetree, True, "Alpha") + else: + unlit_info = detect_shadeless_material(asset.blender_nodetree, True, {}) + if 'alpha_socket' in unlit_info: + alpha_socket = unlit_info['alpha_socket'] + if alpha_socket.socket.is_linked: + # We need to find the correct node value to animate (An Mix Factor node) + mix_node = alpha_socket.socket.links[0].from_node + if mix_node.type == "MATH": + blender_path = mix_node.inputs[1].path_from_id() + ".default_value" + else: + print("Error, something is wrong, we didn't detect adding a Mix Node because of Pointers") + else: + blender_path = alpha_socket.socket.path_from_id() + ".default_value" + + coords[1::2] = (vals[3] for vals in values) + make_fcurve( + action, + coords, + data_path=blender_path, + index=0, + group_name=group_name, + interpolation=animation.samplers[channel.sampler].interpolation, + ) + + @staticmethod + def get_or_create_action(gltf, asset, asset_idx, anim_name, asset_type, name=None): + + action = None + if asset_type == "CAMERA": + data_name = "camera_" + asset.name or "Camera%d" % asset_idx + action = gltf.action_cache.get(data_name) + id_root = "CAMERA" + stash = asset.blender_object_data + elif asset_type == "LIGHT": + data_name = "light_" + asset['name'] or "Light%d" % asset_idx + action = gltf.action_cache.get(data_name) + id_root = "LIGHT" + stash = asset['blender_object_data'] + elif asset_type == "MATERIAL": + data_name = "material_" + asset.name or "Material%d" % asset_idx + action = gltf.action_cache.get(data_name) + id_root = "MATERIAL" + stash = asset.blender_mat + elif asset_type == "NODETREE": + name_ = name if name is not None else asset.name + data_name = "nodetree_" + name_ or "Nodetree%d" % asset_idx + action = gltf.action_cache.get(data_name) + id_root = "NODETREE" + stash = asset.blender_nodetree + elif asset_type == "TEX_TRANSFORM": + name_ = name if name is not None else asset.name + data_name = "nodetree_" + name_ or "Nodetree%d" % asset_idx + action = gltf.action_cache.get(data_name) + id_root = "NODETREE" + stash = asset['blender_nodetree'] + elif asset_type == "EXT": + name_ = name if name is not None else asset.name + data_name = "nodetree_" + name_ or "Nodetree%d" % asset_idx + action = gltf.action_cache.get(data_name) + id_root = "NODETREE" + stash = asset['blender_nodetree'] + + if not action: + name = anim_name + "_" + data_name + action = bpy.data.actions.new(name) + action.id_root = id_root + gltf.needs_stash.append((stash, action)) + gltf.action_cache[data_name] = action + + return action diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation_utils.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation_utils.py new file mode 100644 index 00000000000..aaf2d2bcf5c --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation_utils.py @@ -0,0 +1,69 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy + + +def simulate_stash(obj, track_name, action, start_frame=None): + # Simulate stash : + # * add a track + # * add an action on track + # * lock & mute the track + if not obj.animation_data: + obj.animation_data_create() + tracks = obj.animation_data.nla_tracks + new_track = tracks.new(prev=None) + new_track.name = track_name + if start_frame is None: + start_frame = bpy.context.scene.frame_start + _strip = new_track.strips.new(action.name, start_frame, action) + new_track.lock = True + new_track.mute = True + + +def restore_animation_on_object(obj, anim_name): + """ here, obj can be an object, shapekeys, camera or light data """ + if not getattr(obj, 'animation_data', None): + return + + for track in obj.animation_data.nla_tracks: + if track.name != anim_name: + continue + if not track.strips: + continue + + obj.animation_data.action = track.strips[0].action + return + + obj.animation_data.action = None + + +def make_fcurve(action, co, data_path, index=0, group_name='', interpolation=None): + try: + fcurve = action.fcurves.new(data_path=data_path, index=index, action_group=group_name) + except: + # Some non valid files can have multiple target path + return None + + fcurve.keyframe_points.add(len(co) // 2) + fcurve.keyframe_points.foreach_set('co', co) + + # Setting interpolation + ipo = { + 'CUBICSPLINE': 'BEZIER', + 'LINEAR': 'LINEAR', + 'STEP': 'CONSTANT', + }[interpolation or 'LINEAR'] + ipo = bpy.types.Keyframe.bl_rna.properties['interpolation'].enum_items[ipo].value + fcurve.keyframe_points.foreach_set('interpolation', [ipo] * len(fcurve.keyframe_points)) + + # For CUBICSPLINE, also set the handle types to AUTO + if interpolation == 'CUBICSPLINE': + ty = bpy.types.Keyframe.bl_rna.properties['handle_left_type'].enum_items['AUTO'].value + fcurve.keyframe_points.foreach_set('handle_left_type', [ty] * len(fcurve.keyframe_points)) + fcurve.keyframe_points.foreach_set('handle_right_type', [ty] * len(fcurve.keyframe_points)) + + fcurve.update() # force updating tangents (this may change when tangent will be managed) + + return fcurve diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation_weight.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation_weight.py new file mode 100644 index 00000000000..d631bc403eb --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_animation_weight.py @@ -0,0 +1,95 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy + +from ...io.imp.gltf2_io_user_extensions import import_user_extensions +from ...io.imp.gltf2_io_binary import BinaryData +from .gltf2_blender_animation_utils import make_fcurve + + +class BlenderWeightAnim(): + """Blender ShapeKey Animation.""" + def __new__(cls, *args, **kwargs): + raise RuntimeError("%s should not be instantiated" % cls) + + @staticmethod + def anim(gltf, anim_idx, vnode_id): + """Manage animation.""" + vnode = gltf.vnodes[vnode_id] + + node_idx = vnode.mesh_node_idx + + import_user_extensions('gather_import_animation_weight_before_hook', + gltf, vnode, gltf.data.animations[anim_idx]) + + if node_idx is None: + return + + node = gltf.data.nodes[node_idx] + obj = vnode.blender_object + fps = (bpy.context.scene.render.fps * bpy.context.scene.render.fps_base) + + animation = gltf.data.animations[anim_idx] + + if anim_idx not in node.animations.keys(): + return + + for channel_idx in node.animations[anim_idx]: + channel = animation.channels[channel_idx] + if channel.target.path == "weights": + break + if channel.target.path == "pointer": + pointer_tab = channel.target.extensions["KHR_animation_pointer"]["pointer"].split("/") + if len(pointer_tab) >= 4 and pointer_tab[1] in ["nodes", "meshes"] and pointer_tab[3] == "weights": + break + else: + return + + name = animation.track_name + "_" + obj.name + action = bpy.data.actions.new(name) + action.id_root = "KEY" + gltf.needs_stash.append((obj.data.shape_keys, action)) + + keys = BinaryData.get_data_from_accessor(gltf, animation.samplers[channel.sampler].input) + values = BinaryData.get_data_from_accessor(gltf, animation.samplers[channel.sampler].output) + + # retrieve number of targets + pymesh = gltf.data.meshes[gltf.data.nodes[node_idx].mesh] + nb_targets = len(pymesh.shapekey_names) + + if animation.samplers[channel.sampler].interpolation == "CUBICSPLINE": + offset = nb_targets + stride = 3 * nb_targets + else: + offset = 0 + stride = nb_targets + + coords = [0] * (2 * len(keys)) + coords[::2] = (key[0] * fps for key in keys) + + for sk in range(nb_targets): + if pymesh.shapekey_names[sk] is not None: # Do not animate shapekeys not created + coords[1::2] = (values[offset + stride * i + sk][0] for i in range(len(keys))) + kb_name = pymesh.shapekey_names[sk] + data_path = 'key_blocks["%s"].value' % bpy.utils.escape_identifier(kb_name) + + make_fcurve( + action, + coords, + data_path=data_path, + group_name="ShapeKeys", + interpolation=animation.samplers[channel.sampler].interpolation, + ) + + # Expand weight range if needed + kb = obj.data.shape_keys.key_blocks[kb_name] + min_weight = min(coords[1:2]) + max_weight = max(coords[1:2]) + if min_weight < kb.slider_min: + kb.slider_min = min_weight + if max_weight > kb.slider_max: + kb.slider_max = max_weight + + import_user_extensions('gather_import_animation_weight_after_hook', gltf, vnode, animation) diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_camera.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_camera.py new file mode 100755 index 00000000000..803fb5e1f9c --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_camera.py @@ -0,0 +1,75 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from math import tan +from ..com.gltf2_blender_extras import set_extras +from ...io.imp.gltf2_io_user_extensions import import_user_extensions + + +class BlenderCamera(): + """Blender Camera.""" + def __new__(cls, *args, **kwargs): + raise RuntimeError("%s should not be instantiated" % cls) + + @staticmethod + def create(gltf, vnode, camera_id): + """Camera creation.""" + pycamera = gltf.data.cameras[camera_id] + + import_user_extensions('gather_import_camera_before_hook', gltf, vnode, pycamera) + + if not pycamera.name: + pycamera.name = "Camera" + + cam = bpy.data.cameras.new(pycamera.name) + set_extras(cam, pycamera.extras) + + # Blender create a perspective camera by default + if pycamera.type == "orthographic": + cam.type = "ORTHO" + + cam.ortho_scale = max(pycamera.orthographic.xmag, pycamera.orthographic.ymag) * 2 + + cam.clip_start = pycamera.orthographic.znear + cam.clip_end = pycamera.orthographic.zfar + + # Store multiple channel data, as we will need all channels to convert to + # blender data when animated by KHR_animation_pointer + if gltf.data.extensions_used is not None and "KHR_animation_pointer" in gltf.data.extensions_used: + if len(pycamera.animations) > 0: + for anim_idx in pycamera.animations.keys(): + for channel_idx in pycamera.animations[anim_idx]: + channel = gltf.data.animations[anim_idx].channels[channel_idx] + pointer_tab = channel.target.extensions["KHR_animation_pointer"]["pointer"].split("/") + if len(pointer_tab) == 5 and pointer_tab[1] == "cameras" and \ + pointer_tab[3] == "orthographic" and \ + pointer_tab[4] in ["xmag", "ymag"]: + # Store multiple channel data, as we will need all channels to convert to + # blender data when animated + if not hasattr(pycamera, "multiple_channels_mag"): + pycamera.multiple_channels_mag = {} + pycamera.multiple_channels_mag[pointer_tab[4]] = (anim_idx, channel_idx) + + else: + cam.angle_y = pycamera.perspective.yfov + cam.lens_unit = "FOV" + cam.sensor_fit = "VERTICAL" + + # TODO: fov/aspect ratio + + cam.clip_start = pycamera.perspective.znear + if pycamera.perspective.zfar is not None: + cam.clip_end = pycamera.perspective.zfar + else: + # Infinite projection + cam.clip_end = 1e12 # some big number + + pycamera.blender_object_data = cam # Needed in case of KHR_animation_pointer + + return cam + + @staticmethod + def calc_lens_from_fov(gltf, input_value, sensor): + return (sensor / 2.0) / tan(input_value * 0.5) diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_gltf.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_gltf.py new file mode 100755 index 00000000000..2df111203e2 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_gltf.py @@ -0,0 +1,624 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from mathutils import Vector, Quaternion, Matrix +from ...io.imp.gltf2_io_user_extensions import import_user_extensions +from .gltf2_blender_scene import BlenderScene + + +class BlenderGlTF(): + """Main glTF import class.""" + def __new__(cls, *args, **kwargs): + raise RuntimeError("%s should not be instantiated" % cls) + + @staticmethod + def create(gltf): + """Create glTF main method, with optional profiling""" + + import_user_extensions('gather_import_gltf_before_hook', gltf) + + profile = bpy.app.debug_value == 102 + if profile: + import cProfile + import pstats + import io + from pstats import SortKey + pr = cProfile.Profile() + pr.enable() + BlenderGlTF._create(gltf) + pr.disable() + s = io.StringIO() + sortby = SortKey.TIME + ps = pstats.Stats(pr, stream=s).sort_stats(sortby) + ps.print_stats() + print(s.getvalue()) + else: + BlenderGlTF._create(gltf) + + @staticmethod + def _create(gltf): + """Create glTF main worker method.""" + BlenderGlTF.set_convert_functions(gltf) + BlenderGlTF.pre_compute(gltf) + BlenderScene.create(gltf) + + @staticmethod + def set_convert_functions(gltf): + if bpy.app.debug_value != 100: + # Unit conversion factor in (Blender units) per meter + u = 1.0 / bpy.context.scene.unit_settings.scale_length + + # glTF Y-Up space --> Blender Z-up space + # X,Y,Z --> X,-Z,Y + def convert_loc(x): return u * Vector([x[0], -x[2], x[1]]) + def convert_quat(q): return Quaternion([q[3], q[0], -q[2], q[1]]) + def convert_scale(s): return Vector([s[0], s[2], s[1]]) + + def convert_matrix(m): + return Matrix([ + [m[0], -m[8], m[4], m[12] * u], + [-m[2], m[10], -m[6], -m[14] * u], + [m[1], -m[9], m[5], m[13] * u], + [m[3] / u, -m[11] / u, m[7] / u, m[15]], + ]) + + # Batch versions operate in place on a numpy array + def convert_locs_batch(locs): + # x,y,z -> x,-z,y + locs[:, [1, 2]] = locs[:, [2, 1]] + locs[:, 1] *= -1 + # Unit conversion + if u != 1: + locs *= u + + def convert_normals_batch(ns): + ns[:, [1, 2]] = ns[:, [2, 1]] + ns[:, 1] *= -1 + + # Correction for cameras and lights. + # glTF: right = +X, forward = -Z, up = +Y + # glTF after Yup2Zup: right = +X, forward = +Y, up = +Z + # Blender: right = +X, forward = -Z, up = +Y + # Need to carry Blender --> glTF after Yup2Zup + gltf.camera_correction = Quaternion((2**0.5 / 2, 2**0.5 / 2, 0.0, 0.0)) + + else: + def convert_loc(x): return Vector(x) + def convert_quat(q): return Quaternion([q[3], q[0], q[1], q[2]]) + def convert_scale(s): return Vector(s) + + def convert_matrix(m): + return Matrix([m[0::4], m[1::4], m[2::4], m[3::4]]) + + def convert_locs_batch(_locs): return + def convert_normals_batch(_ns): return + + # Same convention, no correction needed. + gltf.camera_correction = None + + gltf.loc_gltf_to_blender = convert_loc + gltf.locs_batch_gltf_to_blender = convert_locs_batch + gltf.quaternion_gltf_to_blender = convert_quat + gltf.normals_batch_gltf_to_blender = convert_normals_batch + gltf.scale_gltf_to_blender = convert_scale + gltf.matrix_gltf_to_blender = convert_matrix + + @staticmethod + def pre_compute(gltf): + """Pre compute, just before creation.""" + # default scene used + gltf.blender_scene = None + + # Check if there is animation on object + # Init is to False, and will be set to True during creation + gltf.animation_object = False + + # Blender material + if gltf.data.materials: + for material in gltf.data.materials: + material.blender_material = {} + + # images + if gltf.data.images is not None: + for img in gltf.data.images: + img.blender_image_name = None + + if gltf.data.nodes is None: + # Something is wrong in file, there is no nodes + return + + for node in gltf.data.nodes: + # Weight animation management + node.weight_animation = False + + # Meshes initialization + if gltf.data.meshes: + for mesh in gltf.data.meshes: + mesh.blender_name = {} # caches Blender mesh name + + if gltf.data.extensions_used is not None and "KHR_animation_pointer" in gltf.data.extensions_used: + # Meshes initialization + if gltf.data.meshes: + for mesh in gltf.data.meshes: + mesh.blender_name = {} # caches Blender mesh name + mesh.weight_animation_on_mesh = None # For KHR_animation_pointer, weights on mesh + + for cam in gltf.data.cameras if gltf.data.cameras is not None else []: + cam.animations = {} + + for mat in gltf.data.materials if gltf.data.materials is not None else []: + mat.animations = {} + if mat.normal_texture is not None: + mat.normal_texture.animations = {} + if mat.occlusion_texture is not None: + mat.occlusion_texture.animations = {} + if mat.emissive_texture is not None: + mat.emissive_texture.animations = {} + if mat.pbr_metallic_roughness is not None: + mat.pbr_metallic_roughness.animations = {} + + for ext in [ + "KHR_materials_emissive_strength", + # "KHR_materials_iridescence", + "KHR_materials_volume", + "KHR_materials_ior", + "KHR_materials_transmission", + "KHR_materials_clearcoat", + "KHR_materials_sheen", + "KHR_materials_specular", + "KHR_materials_anisotropy" + ]: + if mat.extensions is not None and ext in mat.extensions: + mat.extensions[ext]["animations"] = {} + + texs = [ + mat.emissive_texture, + mat.normal_texture, + mat.occlusion_texture, + mat.pbr_metallic_roughness.base_color_texture if mat.pbr_metallic_roughness is not None else None, + mat.pbr_metallic_roughness.metallic_roughness_texture if mat.pbr_metallic_roughness is not None else None, + ] + + for tex in [t for t in texs if t is not None]: + if tex.extensions is not None and "KHR_texture_transform" in tex.extensions: + tex.extensions["KHR_texture_transform"]["animations"] = {} + + texs_ext = [ + mat.extensions["KHR_materials_volume"].get("thicknessTexture") if mat.extensions and "KHR_materials_volume" in mat.extensions else None, + mat.extensions["KHR_materials_transmission"].get("transmissionTexture") if mat.extensions and "KHR_materials_transmission" in mat.extensions else None, + mat.extensions["KHR_materials_specular"].get("specularTexture") if mat.extensions and "KHR_materials_specular" in mat.extensions else None, + mat.extensions["KHR_materials_specular"].get("specularColorTexture") if mat.extensions and "KHR_materials_specular" in mat.extensions else None, + mat.extensions["KHR_materials_sheen"].get("sheenColorTexture") if mat.extensions and "KHR_materials_sheen" in mat.extensions else None, + mat.extensions["KHR_materials_sheen"].get("sheenRoughnessTexture") if mat.extensions and "KHR_materials_sheen" in mat.extensions else None, + mat.extensions["KHR_materials_clearcoat"].get("clearcoatTexture") if mat.extensions and "KHR_materials_clearcoat" in mat.extensions else None, + mat.extensions["KHR_materials_clearcoat"].get("clearcoatRoughnessTexture") if mat.extensions and "KHR_materials_clearcoat" in mat.extensions else None, + mat.extensions["KHR_materials_clearcoat"].get("clearcoatNormalTexture") if mat.extensions and "KHR_materials_clearcoat" in mat.extensions else None, + mat.extensions["KHR_materials_anisotropy"].get("anisotropyTexture") if mat.extensions and "KHR_materials_anisotropy" in mat.extensions else None, + ] + + for tex in [t for t in texs_ext if t is not None]: + if 'extensions' in tex and "KHR_texture_transform" in tex['extensions']: + tex['extensions']["KHR_texture_transform"]["animations"] = {} + + for light in gltf.data.extensions["KHR_lights_punctual"]["lights"] \ + if gltf.data.extensions is not None and "KHR_lights_punctual" in gltf.data.extensions \ + and "lights" in gltf.data.extensions["KHR_lights_punctual"] else []: + light["animations"] = {} + if "spot" in light: + light["spot"]["animations"] = {} + + # Dispatch animation + if gltf.data.animations: + for node in gltf.data.nodes: + node.animations = {} + + track_names = set() + for anim_idx, anim in enumerate(gltf.data.animations): + # Pick pair-wise unique name for each animation to use as a name + # for its NLA tracks. + desired_name = anim.name or "Anim_%d" % anim_idx + # TRS animations & Pointer will be created as separate tracks + anim.track_name = BlenderGlTF.find_unused_name(track_names, desired_name) + track_names.add(anim.track_name) + + for channel_idx, channel in enumerate(anim.channels): + if channel.target.node is None: + # Manage KHR_animation_pointer for node TRS and weights + BlenderGlTF.dispatch_animation_pointer(gltf, anim, anim_idx, channel, channel_idx) + + # Core glTF animations + else: + if anim_idx not in gltf.data.nodes[channel.target.node].animations.keys(): + gltf.data.nodes[channel.target.node].animations[anim_idx] = [] + gltf.data.nodes[channel.target.node].animations[anim_idx].append(channel_idx) + # Manage node with animation on weights, that are animated in meshes in Blender (ShapeKeys) + if channel.target.path == "weights": + gltf.data.nodes[channel.target.node].weight_animation = True + + # For KHR_animation_pointer, weight on meshes + # We broadcast mesh weight animations to corresponding nodes + if gltf.data.extensions_used is not None and "KHR_animation_pointer" in gltf.data.extensions_used: + for node in gltf.data.nodes: + if node.mesh is not None and gltf.data.meshes[node.mesh].weight_animation_on_mesh is not None: + anim_idx, channel_idx = gltf.data.meshes[node.mesh].weight_animation_on_mesh + if anim_idx not in node.animations.keys(): + node.animations[anim_idx] = [] + node.animations[anim_idx].append(channel_idx) + node.weight_animation = True + + # Calculate names for each mesh's shapekeys + for mesh in gltf.data.meshes or []: + mesh.shapekey_names = [] + used_names = set(['Basis']) # Be sure to not use 'Basis' name at import, this is a reserved name + + # Look for primitive with morph targets + for prim in (mesh.primitives or []): + if not prim.targets: + continue + + for sk, _ in enumerate(prim.targets): + # Skip shape key for target that doesn't morph POSITION + morphs_position = any( + (prim.targets and 'POSITION' in prim.targets[sk]) + for prim in mesh.primitives + ) + if not morphs_position: + mesh.shapekey_names.append(None) + continue + + shapekey_name = None + + # Try to use name from extras.targetNames + try: + shapekey_name = str(mesh.extras['targetNames'][sk]) + if shapekey_name == "": # Issue when shapekey name is empty + shapekey_name = None + except Exception: + pass + + # Try to get name from first primitive's POSITION accessor + if shapekey_name is None: + try: + shapekey_name = gltf.data.accessors[mesh.primitives[0].targets[sk]['POSITION']].name + except Exception: + pass + + if shapekey_name is None: + shapekey_name = "target_" + str(sk) + + shapekey_name = BlenderGlTF.find_unused_name(used_names, shapekey_name) + used_names.add(shapekey_name) + + mesh.shapekey_names.append(shapekey_name) + + break + + # Manage KHR_materials_variants + BlenderGlTF.manage_material_variants(gltf) + + @staticmethod + def dispatch_animation_pointer(gltf, anim, anim_idx, channel, channel_idx): + if channel.target.path != "pointer": + return + + if channel.target.extensions is None: + return + + if "KHR_animation_pointer" not in channel.target.extensions and "pointer" not in channel.target.extensions[ + "KHR_animation_pointer"]: + return + + pointer_tab = channel.target.extensions["KHR_animation_pointer"]["pointer"].split("/") + + ### Nodes and Meshes + if len(pointer_tab) >= 4 and pointer_tab[1] == "nodes" and pointer_tab[3] in [ + "translation", "rotation", "scale", "weights"]: + if anim_idx not in gltf.data.nodes[int(pointer_tab[2])].animations.keys(): + gltf.data.nodes[int(pointer_tab[2])].animations[anim_idx] = [] + gltf.data.nodes[int(pointer_tab[2])].animations[anim_idx].append(channel_idx) + if pointer_tab[3] == "weights": + gltf.data.nodes[int(pointer_tab[2])].weight_animation = True + elif len(pointer_tab) >= 4 and pointer_tab[1] == "meshes" and pointer_tab[3] == "weights": + gltf.data.meshes[int(pointer_tab[2])].weight_animation_on_mesh = (anim_idx, channel_idx) + + # Camera + if len(pointer_tab) == 5 and pointer_tab[1] == "cameras" and \ + pointer_tab[3] in ["perspective"] and \ + pointer_tab[4] in ["yfov", "znear", "zfar"]: + + if anim_idx not in gltf.data.cameras[int(pointer_tab[2])].animations.keys(): + gltf.data.cameras[int(pointer_tab[2])].animations[anim_idx] = [] + gltf.data.cameras[int(pointer_tab[2])].animations[anim_idx].append(channel_idx) + + if len(pointer_tab) == 5 and pointer_tab[1] == "cameras" and \ + pointer_tab[3] in ["orthographic"] and \ + pointer_tab[4] in ["ymag", "xmag"]: + + if anim_idx not in gltf.data.cameras[int(pointer_tab[2])].animations.keys(): + gltf.data.cameras[int(pointer_tab[2])].animations[anim_idx] = [] + gltf.data.cameras[int(pointer_tab[2])].animations[anim_idx].append(channel_idx) + + # Light + if len(pointer_tab) == 6 and pointer_tab[1] == "extensions" and \ + pointer_tab[2] == "KHR_lights_punctual" and \ + pointer_tab[3] == "lights" and \ + pointer_tab[5] in ["intensity", "color", "range"]: + + if anim_idx not in gltf.data.extensions["KHR_lights_punctual"]["lights"][int( + pointer_tab[4])]["animations"].keys(): + gltf.data.extensions["KHR_lights_punctual"]["lights"][int(pointer_tab[4])]["animations"][anim_idx] = [] + gltf.data.extensions["KHR_lights_punctual"]["lights"][int( + pointer_tab[4])]["animations"][anim_idx].append(channel_idx) + + if len(pointer_tab) == 6 and pointer_tab[1] == "extensions" and \ + pointer_tab[2] == "KHR_lights_punctual" and \ + pointer_tab[3] == "lights" and \ + pointer_tab[5] in ["spot.outerConeAngle", "spot.innerConeAngle"]: + + if anim_idx not in gltf.data.extensions["KHR_lights_punctual"]["lights"][int( + pointer_tab[4])]["animations"].keys(): + gltf.data.extensions["KHR_lights_punctual"]["lights"][int(pointer_tab[4])]["animations"][anim_idx] = [] + gltf.data.extensions["KHR_lights_punctual"]["lights"][int( + pointer_tab[4])]["animations"][anim_idx].append(channel_idx) + + # Materials + if len(pointer_tab) == 4 and pointer_tab[1] == "materials" and \ + pointer_tab[3] in ["emissiveFactor", "alphaCutoff"]: + + if anim_idx not in gltf.data.materials[int(pointer_tab[2])].animations.keys(): + gltf.data.materials[int(pointer_tab[2])].animations[anim_idx] = [] + gltf.data.materials[int(pointer_tab[2])].animations[anim_idx].append(channel_idx) + + if len(pointer_tab) == 7 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "emissiveTexture" and \ + pointer_tab[4] == "extensions" and \ + pointer_tab[5] == "KHR_texture_transform" and \ + pointer_tab[6] in ["scale", "offset"]: + + if anim_idx not in gltf.data.materials[int( + pointer_tab[2])].emissive_texture.extensions["KHR_texture_transform"]["animations"].keys(): + gltf.data.materials[int( + pointer_tab[2])].emissive_texture.extensions["KHR_texture_transform"]["animations"][anim_idx] = [] + gltf.data.materials[int( + pointer_tab[2])].emissive_texture.extensions["KHR_texture_transform"]["animations"][anim_idx].append(channel_idx) + + if len(pointer_tab) == 5 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "normalTexture" and \ + pointer_tab[4] == "scale": + + if anim_idx not in gltf.data.materials[int(pointer_tab[2])].normal_texture.animations.keys(): + gltf.data.materials[int(pointer_tab[2])].normal_texture.animations[anim_idx] = [] + gltf.data.materials[int(pointer_tab[2])].normal_texture.animations[anim_idx].append(channel_idx) + + if len(pointer_tab) == 7 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "normalTexture" and \ + pointer_tab[4] == "extensions" and \ + pointer_tab[5] == "KHR_texture_transform" and \ + pointer_tab[6] in ["scale", "offset"]: + + if anim_idx not in gltf.data.materials[int( + pointer_tab[2])].normal_texture.extensions["KHR_texture_transform"]["animations"].keys(): + gltf.data.materials[int(pointer_tab[2]) + ].normal_texture.extensions["KHR_texture_transform"]["animations"][anim_idx] = [] + gltf.data.materials[int( + pointer_tab[2])].normal_texture.extensions["KHR_texture_transform"]["animations"][anim_idx].append(channel_idx) + + if len(pointer_tab) == 5 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "occlusionTexture" and \ + pointer_tab[4] == "strength": + + if anim_idx not in gltf.data.materials[int(pointer_tab[2])].occlusion_texture.animations.keys(): + gltf.data.materials[int(pointer_tab[2])].occlusion_texture.animations[anim_idx] = [] + gltf.data.materials[int(pointer_tab[2])].occlusion_texture.animations[anim_idx].append(channel_idx) + + if len(pointer_tab) == 7 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "occlusionTexture" and \ + pointer_tab[4] == "extensions" and \ + pointer_tab[5] == "KHR_texture_transform" and \ + pointer_tab[6] in ["scale", "offset"]: + + if anim_idx not in gltf.data.materials[int( + pointer_tab[2])].occlusion_texture.extensions["KHR_texture_transform"]["animations"].keys(): + gltf.data.materials[int( + pointer_tab[2])].occlusion_texture.extensions["KHR_texture_transform"]["animations"][anim_idx] = [] + gltf.data.materials[int( + pointer_tab[2])].occlusion_texture.extensions["KHR_texture_transform"]["animations"][anim_idx].append(channel_idx) + + if len(pointer_tab) == 9 and pointer_tab[1] == "materials" and \ + pointer_tab[-1] in ["scale", "offset"] and \ + pointer_tab[-2] == "KHR_texture_transform" and \ + pointer_tab[-3] == "extensions" and \ + pointer_tab[3] == "extensions": + + if anim_idx not in gltf.data.materials[int( + pointer_tab[2])].extensions[pointer_tab[4]][pointer_tab[5]]['extensions']["KHR_texture_transform"]["animations"].keys(): + gltf.data.materials[int(pointer_tab[2])].extensions[pointer_tab[4]][pointer_tab[5] + ]['extensions']["KHR_texture_transform"]["animations"][anim_idx] = [] + gltf.data.materials[int(pointer_tab[2])].extensions[pointer_tab[4]][pointer_tab[5] + ]['extensions']["KHR_texture_transform"]["animations"][anim_idx].append(channel_idx) + + if len(pointer_tab) == 5 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "pbrMetallicRoughness" and \ + pointer_tab[4] in ["baseColorFactor", "roughnessFactor", "metallicFactor"]: + + # This can be unlit (baseColorFactor) or pbr + + if anim_idx not in gltf.data.materials[int(pointer_tab[2])].pbr_metallic_roughness.animations.keys(): + gltf.data.materials[int(pointer_tab[2])].pbr_metallic_roughness.animations[anim_idx] = [] + gltf.data.materials[int(pointer_tab[2])].pbr_metallic_roughness.animations[anim_idx].append(channel_idx) + + if len(pointer_tab) == 8 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "pbrMetallicRoughness" and \ + pointer_tab[4] == "baseColorTexture" and \ + pointer_tab[5] == "extensions" and \ + pointer_tab[6] == "KHR_texture_transform" and \ + pointer_tab[7] in ["scale", "offset"]: + + # This can be unlit or pbr + + if anim_idx not in gltf.data.materials[int( + pointer_tab[2])].pbr_metallic_roughness.base_color_texture.extensions["KHR_texture_transform"]["animations"].keys(): + gltf.data.materials[int( + pointer_tab[2])].pbr_metallic_roughness.base_color_texture.extensions["KHR_texture_transform"]["animations"][anim_idx] = [] + gltf.data.materials[int( + pointer_tab[2])].pbr_metallic_roughness.base_color_texture.extensions["KHR_texture_transform"]["animations"][anim_idx].append(channel_idx) + + if len(pointer_tab) == 8 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "pbrMetallicRoughness" and \ + pointer_tab[4] == "metallicRoughnessTexture" and \ + pointer_tab[5] == "extensions" and \ + pointer_tab[6] == "KHR_texture_transform" and \ + pointer_tab[7] in ["scale", "offset"]: + + if anim_idx not in gltf.data.materials[int( + pointer_tab[2])].pbr_metallic_roughness.metallic_roughness_texture.extensions["KHR_texture_transform"]["animations"].keys(): + gltf.data.materials[int( + pointer_tab[2])].pbr_metallic_roughness.metallic_roughness_texture.extensions["KHR_texture_transform"]["animations"][anim_idx] = [] + gltf.data.materials[int( + pointer_tab[2])].pbr_metallic_roughness.metallic_roughness_texture.extensions["KHR_texture_transform"]["animations"][anim_idx].append(channel_idx) + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_emissive_strength" and \ + pointer_tab[5] == "emissiveStrength": + + if anim_idx not in gltf.data.materials[int( + pointer_tab[2])].extensions["KHR_materials_emissive_strength"]["animations"].keys(): + gltf.data.materials[int(pointer_tab[2]) + ].extensions["KHR_materials_emissive_strength"]["animations"][anim_idx] = [] + gltf.data.materials[int(pointer_tab[2])].extensions["KHR_materials_emissive_strength"]["animations"][anim_idx].append( + channel_idx) + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_volume" and \ + pointer_tab[5] in ["thicknessFactor", "attenuationDistance", "attenuationColor"]: + + if anim_idx not in gltf.data.materials[int( + pointer_tab[2])].extensions["KHR_materials_volume"]["animations"].keys(): + gltf.data.materials[int(pointer_tab[2])].extensions["KHR_materials_volume"]["animations"][anim_idx] = [] + gltf.data.materials[int(pointer_tab[2]) + ].extensions["KHR_materials_volume"]["animations"][anim_idx].append(channel_idx) + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_ior" and \ + pointer_tab[5] == "ior": + + if anim_idx not in gltf.data.materials[int( + pointer_tab[2])].extensions["KHR_materials_ior"]["animations"].keys(): + gltf.data.materials[int(pointer_tab[2])].extensions["KHR_materials_ior"]["animations"][anim_idx] = [] + gltf.data.materials[int(pointer_tab[2]) + ].extensions["KHR_materials_ior"]["animations"][anim_idx].append(channel_idx) + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_transmission" and \ + pointer_tab[5] == "transmissionFactor": + + if anim_idx not in gltf.data.materials[int( + pointer_tab[2])].extensions["KHR_materials_transmission"]["animations"].keys(): + gltf.data.materials[int(pointer_tab[2]) + ].extensions["KHR_materials_transmission"]["animations"][anim_idx] = [] + gltf.data.materials[int(pointer_tab[2])].extensions["KHR_materials_transmission"]["animations"][anim_idx].append( + channel_idx) + + if len(pointer_tab) == 7 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_clearcoat" and \ + pointer_tab[5] == "clearcoatNormalTexture" and \ + pointer_tab[6] == "scale": + if anim_idx not in gltf.data.materials[int( + pointer_tab[2])].extensions["KHR_materials_clearcoat"]["animations"].keys(): + gltf.data.materials[int(pointer_tab[2]) + ].extensions["KHR_materials_clearcoat"]["animations"][anim_idx] = [] + gltf.data.materials[int(pointer_tab[2]) + ].extensions["KHR_materials_clearcoat"]["animations"][anim_idx].append(channel_idx) + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_clearcoat" and \ + pointer_tab[5] in ["clearcoatFactor", "clearcoatRoughnessFactor"]: + if anim_idx not in gltf.data.materials[int( + pointer_tab[2])].extensions["KHR_materials_clearcoat"]["animations"].keys(): + gltf.data.materials[int(pointer_tab[2]) + ].extensions["KHR_materials_clearcoat"]["animations"][anim_idx] = [] + gltf.data.materials[int(pointer_tab[2]) + ].extensions["KHR_materials_clearcoat"]["animations"][anim_idx].append(channel_idx) + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_sheen" and \ + pointer_tab[5] in ["sheenColorFactor", "sheenRoughnessFactor"]: + if anim_idx not in gltf.data.materials[int( + pointer_tab[2])].extensions["KHR_materials_sheen"]["animations"].keys(): + gltf.data.materials[int(pointer_tab[2])].extensions["KHR_materials_sheen"]["animations"][anim_idx] = [] + gltf.data.materials[int(pointer_tab[2]) + ].extensions["KHR_materials_sheen"]["animations"][anim_idx].append(channel_idx) + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_specular" and \ + pointer_tab[5] in ["specularFactor", "specularColorFactor"]: + if anim_idx not in gltf.data.materials[int( + pointer_tab[2])].extensions["KHR_materials_specular"]["animations"].keys(): + gltf.data.materials[int(pointer_tab[2]) + ].extensions["KHR_materials_specular"]["animations"][anim_idx] = [] + gltf.data.materials[int(pointer_tab[2]) + ].extensions["KHR_materials_specular"]["animations"][anim_idx].append(channel_idx) + + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_anisotropy" and \ + pointer_tab[5] in ["anisotropyStrength", "anisotropyRotation"]: + if anim_idx not in gltf.data.materials[int( + pointer_tab[2])].extensions["KHR_materials_anisotropy"]["animations"].keys(): + gltf.data.materials[int(pointer_tab[2]) + ].extensions["KHR_materials_anisotropy"]["animations"][anim_idx] = [] + gltf.data.materials[int(pointer_tab[2]) + ].extensions["KHR_materials_anisotropy"]["animations"][anim_idx].append(channel_idx) + + @staticmethod + def find_unused_name(haystack, desired_name): + """Finds a name not in haystack and <= 63 UTF-8 bytes. + (the limit on the size of a Blender name.) + If a is taken, tries a.001, then a.002, etc. + """ + stem = desired_name[:63] + suffix = '' + cntr = 1 + while True: + name = stem + suffix + + if len(name.encode('utf-8')) > 63: + stem = stem[:-1] + continue + + if name not in haystack: + return name + + suffix = '.%03d' % cntr + cntr += 1 + + @staticmethod + def manage_material_variants(gltf): + if not (gltf.data.extensions is not None and 'KHR_materials_variants' in gltf.data.extensions.keys()): + gltf.KHR_materials_variants = False + return + + gltf.KHR_materials_variants = True + # If there is no KHR_materials_variants data in scene, create it + if bpy.context.preferences.addons['io_scene_gltf2'].preferences.KHR_materials_variants_ui is False: + bpy.context.preferences.addons['io_scene_gltf2'].preferences.KHR_materials_variants_ui = True + # Setting preferences as dirty, to be sure that option is saved + bpy.context.preferences.is_dirty = True + + if len(bpy.data.scenes[0].gltf2_KHR_materials_variants_variants) > 0: + bpy.data.scenes[0].gltf2_KHR_materials_variants_variants.clear() + + for idx_variant, variant in enumerate(gltf.data.extensions['KHR_materials_variants']['variants']): + var = bpy.data.scenes[0].gltf2_KHR_materials_variants_variants.add() + var.name = variant['name'] + var.variant_idx = idx_variant diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_image.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_image.py new file mode 100755 index 00000000000..185d6785d7c --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_image.py @@ -0,0 +1,102 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +import os +from os.path import dirname, join, basename + +from ...io.com.gltf2_io_path import uri_to_path +from ...io.imp.gltf2_io_binary import BinaryData +from ...io.imp.gltf2_io_user_extensions import import_user_extensions + + +# Note that Image is not a glTF2.0 object +class BlenderImage(): + """Manage Image.""" + def __new__(cls, *args, **kwargs): + raise RuntimeError("%s should not be instantiated" % cls) + + @staticmethod + def create(gltf, img_idx): + """Image creation.""" + img = gltf.data.images[img_idx] + + if img.blender_image_name is not None: + # Image is already used somewhere + return + + import_user_extensions('gather_import_image_before_hook', gltf, img) + + if img.uri is not None and not img.uri.startswith('data:'): + blender_image = create_from_file(gltf, img_idx) + else: + blender_image = create_from_data(gltf, img_idx) + + if blender_image: + blender_image.alpha_mode = 'CHANNEL_PACKED' + img.blender_image_name = blender_image.name + + import_user_extensions('gather_import_image_after_hook', gltf, img, blender_image) + + +def create_from_file(gltf, img_idx): + # Image stored in a file + + num_images = len(bpy.data.images) + + img = gltf.data.images[img_idx] + + path = join(dirname(gltf.filename), uri_to_path(img.uri)) + path = os.path.abspath(path) + if bpy.data.is_saved and bpy.context.preferences.filepaths.use_relative_paths: + try: + path = bpy.path.relpath(path) + except: + # May happen on Windows if on different drives, eg. C:\ and D:\ + pass + + img_name = img.name or basename(path) + + try: + blender_image = bpy.data.images.load( + path, + check_existing=True, + ) + + needs_pack = gltf.import_settings['import_pack_images'] + if needs_pack: + blender_image.pack() + + except RuntimeError: + gltf.log.error("Missing image file (index %d): %s" % (img_idx, path)) + blender_image = _placeholder_image(img_name, os.path.abspath(path)) + + if len(bpy.data.images) != num_images: # If created a new image + blender_image.name = img_name + + return blender_image + + +def create_from_data(gltf, img_idx): + # Image stored as data => pack + img_data = BinaryData.get_image_data(gltf, img_idx) + if img_data is None: + return + img_name = gltf.data.images[img_idx].name or 'Image_%d' % img_idx + + # Create image, width and height are dummy values + blender_image = bpy.data.images.new(img_name, 8, 8) + # Set packed file data + blender_image.pack(data=img_data.tobytes(), data_len=len(img_data)) + blender_image.source = 'FILE' + + return blender_image + + +def _placeholder_image(name, path): + image = bpy.data.images.new(name, 128, 128) + # allow the path to be resolved later + image.filepath = path + image.source = 'FILE' + return image diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_light.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_light.py new file mode 100644 index 00000000000..92dfd3aa08c --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_light.py @@ -0,0 +1,143 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from math import pi + +from ...io.imp.gltf2_io_user_extensions import import_user_extensions +from ..com.gltf2_blender_conversion import PBR_WATTS_TO_LUMENS +from ..com.gltf2_blender_extras import set_extras + + +class BlenderLight(): + """Blender Light.""" + def __new__(cls, *args, **kwargs): + raise RuntimeError("%s should not be instantiated" % cls) + + @staticmethod + def create(gltf, vnode, light_id): + """Light creation.""" + pylight = gltf.data.extensions['KHR_lights_punctual']['lights'][light_id] + + import_user_extensions('gather_import_light_before_hook', gltf, vnode, pylight) + + if pylight['type'] == "directional": + light = BlenderLight.create_directional(gltf, light_id) # ...Why not pass the pylight? + elif pylight['type'] == "point": + light = BlenderLight.create_point(gltf, light_id) + elif pylight['type'] == "spot": + light = BlenderLight.create_spot(gltf, light_id) + + if 'color' in pylight.keys(): + light.color = pylight['color'] + + # TODO range + + set_extras(light, pylight.get('extras')) + + pylight['blender_object_data'] = light # Needed in case of KHR_animation_pointer + + return light + + @staticmethod + def create_directional(gltf, light_id): + pylight = gltf.data.extensions['KHR_lights_punctual']['lights'][light_id] + + if 'name' not in pylight.keys(): + pylight['name'] = "Sun" # Uh... Is it okay to mutate the import data? + + sun = bpy.data.lights.new(name=pylight['name'], type="SUN") + + if 'intensity' in pylight.keys(): + sun.energy = BlenderLight.calc_energy_directional(gltf, pylight['intensity']) + + return sun + + @staticmethod + def calc_energy_directional(gltf, pylight_data): + if gltf.import_settings['export_import_convert_lighting_mode'] == 'SPEC': + return pylight_data / PBR_WATTS_TO_LUMENS + elif gltf.import_settings['export_import_convert_lighting_mode'] == 'COMPAT': + return pylight_data + elif gltf.import_settings['export_import_convert_lighting_mode'] == 'RAW': + return pylight_data + else: + raise ValueError(gltf.import_settings['export_import_convert_lighting_mode']) + + @staticmethod + def calc_energy_pointlike(gltf, pylight_data): + if gltf.import_settings['export_import_convert_lighting_mode'] == 'SPEC': + return pylight_data / PBR_WATTS_TO_LUMENS * 4 * pi + elif gltf.import_settings['export_import_convert_lighting_mode'] == 'COMPAT': + return pylight_data * 4 * pi + elif gltf.import_settings['export_import_convert_lighting_mode'] == 'RAW': + return pylight_data + else: + raise ValueError(gltf.import_settings['export_import_convert_lighting_mode']) + + @staticmethod + def create_point(gltf, light_id): + pylight = gltf.data.extensions['KHR_lights_punctual']['lights'][light_id] + + if 'name' not in pylight.keys(): + pylight['name'] = "Point" + + point = bpy.data.lights.new(name=pylight['name'], type="POINT") + + if 'intensity' in pylight.keys(): + point.energy = BlenderLight.calc_energy_pointlike(gltf, pylight['intensity']) + + return point + + @staticmethod + def create_spot(gltf, light_id): + pylight = gltf.data.extensions['KHR_lights_punctual']['lights'][light_id] + + if 'name' not in pylight.keys(): + pylight['name'] = "Spot" + + spot = bpy.data.lights.new(name=pylight['name'], type="SPOT") + + # Angles + if 'spot' in pylight.keys() and 'outerConeAngle' in pylight['spot']: + spot.spot_size = BlenderLight.calc_spot_cone_outer(gltf, pylight['spot']['outerConeAngle']) + else: + spot.spot_size = pi / 2 + + if 'spot' in pylight.keys() and 'innerConeAngle' in pylight['spot']: + spot.spot_blend = BlenderLight.calc_spot_cone_inner( + gltf, pylight['spot']['outerConeAngle'], pylight['spot']['innerConeAngle']) + else: + spot.spot_blend = 1.0 + + if 'intensity' in pylight.keys(): + spot.energy = BlenderLight.calc_energy_pointlike(gltf, pylight['intensity']) + + # Store multiple channel data, as we will need all channels to convert to + # blender data when animated by KHR_animation_pointer + if gltf.data.extensions_used is not None and "KHR_animation_pointer" in gltf.data.extensions_used: + if len(pylight['animations']) > 0: + for anim_idx in pylight['animations'].keys(): + for channel_idx in pylight['animations'][anim_idx]: + channel = gltf.data.animations[anim_idx].channels[channel_idx] + pointer_tab = channel.target.extensions["KHR_animation_pointer"]["pointer"].split("/") + if len(pointer_tab) == 6 and pointer_tab[1] == "extensions" and \ + pointer_tab[2] == "KHR_lights_punctual" and \ + pointer_tab[3] == "lights" and \ + pointer_tab[5] in ["spot.innerConeAngle", "spot.outerConeAngle"]: + # Store multiple channel data, as we will need all channels to convert to + # blender data when animated + if "multiple_channels" not in pylight.keys(): + pylight['multiple_channels'] = {} + pylight['multiple_channels'][pointer_tab[5]] = (anim_idx, channel_idx) + + return spot + + @staticmethod + def calc_spot_cone_outer(gltf, outercone): + return outercone * 2 + + @staticmethod + def calc_spot_cone_inner(gltf, outercone, innercone): + return 1 - (innercone / outercone) diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_material.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_material.py new file mode 100755 index 00000000000..17388e58637 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_material.py @@ -0,0 +1,99 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy + +from ...io.imp.gltf2_io_user_extensions import import_user_extensions +from ..com.gltf2_blender_extras import set_extras +from .gltf2_blender_pbrMetallicRoughness import MaterialHelper, pbr_metallic_roughness +from .gltf2_blender_KHR_materials_pbrSpecularGlossiness import pbr_specular_glossiness +from .gltf2_blender_KHR_materials_unlit import unlit + + +class BlenderMaterial(): + """Blender Material.""" + def __new__(cls, *args, **kwargs): + raise RuntimeError("%s should not be instantiated" % cls) + + @staticmethod + def create(gltf, material_idx, vertex_color): + """Material creation.""" + pymaterial = gltf.data.materials[material_idx] + + import_user_extensions('gather_import_material_before_hook', gltf, pymaterial, vertex_color) + + name = pymaterial.name + if name is None: + name = "Material_" + str(material_idx) + + mat = bpy.data.materials.new(name) + pymaterial.blender_material[vertex_color] = mat.name + + set_extras(mat, pymaterial.extras) + BlenderMaterial.set_double_sided(pymaterial, mat) + BlenderMaterial.set_alpha_mode(pymaterial, mat) + BlenderMaterial.set_viewport_color(pymaterial, mat, vertex_color) + + mat.use_nodes = True + while mat.node_tree.nodes: # clear all nodes + mat.node_tree.nodes.remove(mat.node_tree.nodes[0]) + + mh = MaterialHelper(gltf, pymaterial, mat, vertex_color) + + exts = pymaterial.extensions or {} + if 'KHR_materials_unlit' in exts: + unlit(mh) + pymaterial.pbr_metallic_roughness.blender_nodetree = mat.node_tree # Used in case of for KHR_animation_pointer + # Used in case of for KHR_animation_pointer #TODOPointer Vertex Color... + pymaterial.pbr_metallic_roughness.blender_mat = mat + elif 'KHR_materials_pbrSpecularGlossiness' in exts: + pbr_specular_glossiness(mh) + else: + pbr_metallic_roughness(mh) + pymaterial.pbr_metallic_roughness.blender_nodetree = mat.node_tree # Used in case of for KHR_animation_pointer + # Used in case of for KHR_animation_pointer #TODOPointer Vertex Color... + pymaterial.pbr_metallic_roughness.blender_mat = mat + + # Manage KHR_materials_variants + # We need to store link between material idx in glTF and Blender Material id + if gltf.KHR_materials_variants is True: + gltf.variant_mapping[str(material_idx) + str(vertex_color)] = mat + + pymaterial.blender_nodetree = mat.node_tree # Used in case of for KHR_animation_pointer + pymaterial.blender_mat = mat # Used in case of for KHR_animation_pointer #TODOPointer Vertex Color... + + import_user_extensions('gather_import_material_after_hook', gltf, pymaterial, vertex_color, mat) + + @staticmethod + def set_double_sided(pymaterial, mat): + mat.use_backface_culling = (pymaterial.double_sided != True) + + @staticmethod + def set_alpha_mode(pymaterial, mat): + alpha_mode = pymaterial.alpha_mode + if alpha_mode == 'BLEND': + mat.blend_method = 'BLEND' + elif alpha_mode == 'MASK': + mat.blend_method = 'CLIP' + alpha_cutoff = pymaterial.alpha_cutoff if pymaterial.alpha_cutoff is not None else 0.5 + mat.alpha_threshold = alpha_cutoff + + @staticmethod + def set_viewport_color(pymaterial, mat, vertex_color): + # If there is no texture and no vertex color, use the base color as + # the color for the Solid view. + if vertex_color: + return + + exts = pymaterial.extensions or {} + if 'KHR_materials_pbrSpecularGlossiness' in exts: + # TODO + return + else: + pbr = pymaterial.pbr_metallic_roughness + if pbr is None or pbr.base_color_texture is not None: + return + color = pbr.base_color_factor or [1, 1, 1, 1] + + mat.diffuse_color = color diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_material_utils.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_material_utils.py new file mode 100644 index 00000000000..10f910584ef --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_material_utils.py @@ -0,0 +1,196 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from ...io.com.gltf2_io import ( + TextureInfo, + MaterialNormalTextureInfoClass, + MaterialPBRMetallicRoughness, +) +from .gltf2_blender_texture import texture + + +class MaterialHelper: + """Helper class. Stores material stuff to be passed around everywhere.""" + + def __init__(self, gltf, pymat, mat, vertex_color): + self.gltf = gltf + self.pymat = pymat + self.mat = mat + self.node_tree = mat.node_tree + self.nodes = mat.node_tree.nodes + self.links = mat.node_tree.links + self.vertex_color = vertex_color + if pymat.pbr_metallic_roughness is None: + pymat.pbr_metallic_roughness = \ + MaterialPBRMetallicRoughness.from_dict({}) + # We need to initialize the animations array, for KHR_animation_pointer + pymat.pbr_metallic_roughness.animations = [] + self.settings_node = None + + def is_opaque(self): + alpha_mode = self.pymat.alpha_mode + return alpha_mode is None or alpha_mode == 'OPAQUE' + + def needs_emissive(self): + return ( + self.pymat.emissive_texture is not None or + (self.pymat.emissive_factor or [0, 0, 0]) != [0, 0, 0] + ) + + def get_ext(self, ext_name, default=None): + if not self.pymat.extensions: + return default + return self.pymat.extensions.get(ext_name, default) + + +# Creates nodes for multiplying a texture channel and scalar factor. +# [Texture] => [Sep RGB] => [Mul Factor] => socket +def scalar_factor_and_texture( + mh: MaterialHelper, + location, + label, + socket, # socket to connect to + factor, # scalar factor + tex_info, # texture + channel, # texture channel to use (0-4) + force_mix_node=False, # Needed for KHR_animation_pointer +): + if isinstance(tex_info, dict): + tex_info = TextureInfo.from_dict(tex_info) + + x, y = location + + if socket is None: + return + + if tex_info is None: + socket.default_value = factor + return + + if factor != 1.0 or force_mix_node: + node = mh.nodes.new('ShaderNodeMath') + node.label = f'{label} Factor' + node.location = x - 140, y + node.operation = 'MULTIPLY' + # Outputs + mh.links.new(socket, node.outputs[0]) + # Inputs + socket = node.inputs[0] + node.inputs[1].default_value = factor + + x -= 200 + + if channel != 4: + # Separate RGB + node = mh.nodes.new('ShaderNodeSeparateColor') + node.location = x - 150, y - 75 + # Outputs + mh.links.new(socket, node.outputs[channel]) + # Inputs + socket = node.inputs[0] + + x -= 200 + + texture( + mh, + tex_info=tex_info, + label=label.upper(), + location=(x, y), + is_data=channel < 4, + color_socket=socket if channel != 4 else None, + alpha_socket=socket if channel == 4 else None, + ) + + +# Creates nodes for multiplying a texture color and color factor. +# [Texture] => [Mix Factor] => socket +def color_factor_and_texture( + mh: MaterialHelper, + location, + label, + socket, # socket to connect to + factor, # color factor + tex_info, # texture + force_mix_node=False, # Needed for KHR_animation_pointer +): + if isinstance(tex_info, dict): + tex_info = TextureInfo.from_dict(tex_info) + + x, y = location + + if socket is None: + return + + if tex_info is None: + socket.default_value = [*factor, 1] + return + + if factor != [1, 1, 1] or force_mix_node: + node = mh.nodes.new('ShaderNodeMix') + node.data_type = 'RGBA' + node.label = f'{label} Factor' + node.location = x - 140, y + node.blend_type = 'MULTIPLY' + # Outputs + mh.links.new(socket, node.outputs[2]) + # Inputs + node.inputs['Factor'].default_value = 1 + socket = node.inputs[6] + node.inputs[7].default_value = [*factor, 1] + + x -= 200 + + texture( + mh, + tex_info=tex_info, + label=label.upper(), + location=(x, y), + is_data=False, + color_socket=socket, + ) + + +# [Texture] => [Normal Map] => socket +def normal_map( + mh: MaterialHelper, + location, + label, + socket, + tex_info, +): + if isinstance(tex_info, dict): + tex_info = MaterialNormalTextureInfoClass.from_dict(tex_info) + + if not tex_info: + return + + x, y = location + + # Normal map + node = mh.nodes.new('ShaderNodeNormalMap') + node.location = x - 150, y - 40 + # Set UVMap + uv_idx = tex_info.tex_coord or 0 + try: + uv_idx = tex_info.extensions['KHR_texture_transform']['texCoord'] + except Exception: + pass + node.uv_map = 'UVMap' if uv_idx == 0 else 'UVMap.%03d' % uv_idx + # Set strength + scale = tex_info.scale + scale = scale if scale is not None else 1 + node.inputs['Strength'].default_value = scale + # Outputs + mh.links.new(socket, node.outputs['Normal']) + + x -= 200 + + texture( + mh, + tex_info=tex_info, + label=label.upper(), + location=(x, y), + is_data=True, + color_socket=node.inputs['Color'], + ) diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_mesh.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_mesh.py new file mode 100755 index 00000000000..914e1ecad61 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_mesh.py @@ -0,0 +1,863 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from mathutils import Matrix +import numpy as np +from ...io.imp.gltf2_io_user_extensions import import_user_extensions +from ...io.imp.gltf2_io_binary import BinaryData +from ...io.com.gltf2_io_constants import DataType, ComponentType +from ...blender.com.gltf2_blender_conversion import get_attribute_type +from ..com.gltf2_blender_extras import set_extras +from ..com.gltf2_blender_utils import fast_structured_np_unique +from .gltf2_blender_material import BlenderMaterial +from .gltf2_io_draco_compression_extension import decode_primitive + + +class BlenderMesh(): + """Blender Mesh.""" + def __new__(cls, *args, **kwargs): + raise RuntimeError("%s should not be instantiated" % cls) + + @staticmethod + def create(gltf, mesh_idx, skin_idx): + """Mesh creation.""" + return create_mesh(gltf, mesh_idx, skin_idx) + + +# Maximum number of TEXCOORD_n/COLOR_n sets to import +UV_MAX = 8 +COLOR_MAX = 8 + + +def create_mesh(gltf, mesh_idx, skin_idx): + pymesh = gltf.data.meshes[mesh_idx] + + import_user_extensions('gather_import_mesh_before_hook', gltf, pymesh) + + name = pymesh.name or 'Mesh_%d' % mesh_idx + mesh = bpy.data.meshes.new(name) + + # Temporarily parent the mesh to an object. + # This is used to set skin weights and shapekeys. + tmp_ob = None + try: + tmp_ob = bpy.data.objects.new('##gltf-import:tmp-object##', mesh) + do_primitives(gltf, mesh_idx, skin_idx, mesh, tmp_ob) + set_extras(mesh, gltf.data.meshes[mesh_idx].extras, exclude=['targetNames']) + + finally: + if tmp_ob: + bpy.data.objects.remove(tmp_ob) + + import_user_extensions('gather_import_mesh_after_hook', gltf, pymesh, mesh) + + return mesh + + +def do_primitives(gltf, mesh_idx, skin_idx, mesh, ob): + """Put all primitive data into the mesh.""" + pymesh = gltf.data.meshes[mesh_idx] + + # Use a class here, to be able to pass data by reference to hook (to be able to change them inside hook) + class IMPORT_mesh_options: + def __init__(self, skinning: bool = True, skin_into_bind_pose: bool = True): + self.skinning = skinning + self.skin_into_bind_pose = skin_into_bind_pose + + mesh_options = IMPORT_mesh_options() + import_user_extensions('gather_import_mesh_options', gltf, mesh_options, pymesh, skin_idx) + + # Scan the primitives to find out what we need to create + + has_normals = False + num_uvs = 0 + num_cols = 0 + num_joint_sets = 0 + attributes = set({}) + attribute_data = [] + attribute_type = {} + attribute_component_type = {} + + for prim in pymesh.primitives: + if 'POSITION' not in prim.attributes: + continue + + if gltf.import_settings['import_shading'] == "NORMALS": + if 'NORMAL' in prim.attributes: + has_normals = True + + if skin_idx is not None: + i = 0 + while ('JOINTS_%d' % i) in prim.attributes and \ + ('WEIGHTS_%d' % i) in prim.attributes: + i += 1 + num_joint_sets = max(i, num_joint_sets) + + i = 0 + while i < UV_MAX and ('TEXCOORD_%d' % i) in prim.attributes: + i += 1 + num_uvs = max(i, num_uvs) + + i = 0 + while i < COLOR_MAX and ('COLOR_%d' % i) in prim.attributes: + i += 1 + num_cols = max(i, num_cols) + + custom_attrs = [k for k in prim.attributes if k.startswith('_')] + for attr in custom_attrs: + if attr not in attributes: + attribute_type[attr] = gltf.data.accessors[prim.attributes[attr]].type + attribute_component_type[attr] = gltf.data.accessors[prim.attributes[attr]].component_type + attribute_data.append( + np.empty( + dtype=ComponentType.to_numpy_dtype(attribute_component_type[attr]), + shape=(0, DataType.num_elements(attribute_type[attr]))) + ) + attributes.update(set(custom_attrs)) + + num_shapekeys = sum(sk_name is not None for sk_name in pymesh.shapekey_names) + + # ------------- + # We'll process all the primitives gathering arrays to feed into the + # various foreach_set function that create the mesh data. + + num_faces = 0 # total number of faces + vert_locs = np.empty(dtype=np.float32, shape=(0, 3)) # coordinate for each vert + vert_normals = np.empty(dtype=np.float32, shape=(0, 3)) # normal for each vert + edge_vidxs = np.array([], dtype=np.uint32) # vertex_index for each loose edge + loop_vidxs = np.array([], dtype=np.uint32) # vertex_index for each loop + loop_uvs = [ + np.empty(dtype=np.float32, shape=(0, 2)) # UV for each loop for each layer + for _ in range(num_uvs) + ] + loop_cols = [ + np.empty(dtype=np.float32, shape=(0, 4)) # color for each loop for each layer + for _ in range(num_cols) + ] + vert_joints = [ + np.empty(dtype=np.uint32, shape=(0, 4)) # 4 joints for each vert for each set + for _ in range(num_joint_sets) + ] + vert_weights = [ + np.empty(dtype=np.float32, shape=(0, 4)) # 4 weights for each vert for each set + for _ in range(num_joint_sets) + ] + sk_vert_locs = [ + np.empty(dtype=np.float32, shape=(0, 3)) # coordinate for each vert for each shapekey + for _ in range(num_shapekeys) + ] + + for prim in pymesh.primitives: + prim.num_faces = 0 + + if 'POSITION' not in prim.attributes: + continue + + vert_index_base = len(vert_locs) + + if prim.extensions is not None and 'KHR_draco_mesh_compression' in prim.extensions: + + gltf.log.info('Draco Decoder: Decode primitive {}'.format(pymesh.name or '[unnamed]')) + decode_primitive(gltf, prim) + + import_user_extensions('gather_import_decode_primitive', gltf, pymesh, prim, skin_idx) + + if prim.indices is not None: + indices = BinaryData.decode_accessor(gltf, prim.indices) + indices = indices.reshape(len(indices)) + else: + num_verts = gltf.data.accessors[prim.attributes['POSITION']].count + indices = np.arange(0, num_verts, dtype=np.uint32) + + mode = 4 if prim.mode is None else prim.mode + points, edges, tris = points_edges_tris(mode, indices) + if points is not None: + indices = points + elif edges is not None: + indices = edges + else: + indices = tris + + # We'll add one vert to the arrays for each index used in indices + unique_indices, inv_indices = np.unique(indices, return_inverse=True) + + vs = BinaryData.decode_accessor(gltf, prim.attributes['POSITION'], cache=True) + vert_locs = np.concatenate((vert_locs, vs[unique_indices])) + + if has_normals: + if 'NORMAL' in prim.attributes: + ns = BinaryData.decode_accessor(gltf, prim.attributes['NORMAL'], cache=True) + ns = ns[unique_indices] + else: + ns = np.zeros((len(unique_indices), 3), dtype=np.float32) + vert_normals = np.concatenate((vert_normals, ns)) + + for i in range(num_joint_sets): + if ('JOINTS_%d' % i) in prim.attributes and ('WEIGHTS_%d' % i) in prim.attributes: + js = BinaryData.decode_accessor(gltf, prim.attributes['JOINTS_%d' % i], cache=True) + ws = BinaryData.decode_accessor(gltf, prim.attributes['WEIGHTS_%d' % i], cache=True) + js = js[unique_indices] + ws = ws[unique_indices] + else: + js = np.zeros((len(unique_indices), 4), dtype=np.uint32) + ws = np.zeros((len(unique_indices), 4), dtype=np.float32) + vert_joints[i] = np.concatenate((vert_joints[i], js)) + vert_weights[i] = np.concatenate((vert_weights[i], ws)) + + sk_i = 0 + for sk, sk_name in enumerate(pymesh.shapekey_names): + if sk_name is None: + continue + if prim.targets and 'POSITION' in prim.targets[sk]: + morph_vs = BinaryData.decode_accessor(gltf, prim.targets[sk]['POSITION'], cache=True) + morph_vs = morph_vs[unique_indices] + else: + morph_vs = np.zeros((len(unique_indices), 3), dtype=np.float32) + sk_vert_locs[sk_i] = np.concatenate((sk_vert_locs[sk_i], morph_vs)) + sk_i += 1 + + # inv_indices are the indices into the verts just for this prim; + # calculate indices into the overall verts array + prim_vidxs = inv_indices.astype(np.uint32, copy=False) + prim_vidxs += vert_index_base # offset for verts from previous prims + + if edges is not None: + edge_vidxs = np.concatenate((edge_vidxs, prim_vidxs)) + + if tris is not None: + prim.num_faces = len(indices) // 3 + num_faces += prim.num_faces + + loop_vidxs = np.concatenate((loop_vidxs, prim_vidxs)) + + for uv_i in range(num_uvs): + if ('TEXCOORD_%d' % uv_i) in prim.attributes: + uvs = BinaryData.decode_accessor(gltf, prim.attributes['TEXCOORD_%d' % uv_i], cache=True) + uvs = uvs[indices] + else: + uvs = np.zeros((len(indices), 2), dtype=np.float32) + loop_uvs[uv_i] = np.concatenate((loop_uvs[uv_i], uvs)) + + for col_i in range(num_cols): + if ('COLOR_%d' % col_i) in prim.attributes: + cols = BinaryData.decode_accessor(gltf, prim.attributes['COLOR_%d' % col_i], cache=True) + cols = cols[indices] + if cols.shape[1] == 3: + cols = colors_rgb_to_rgba(cols) + else: + cols = np.ones((len(indices), 4), dtype=np.float32) + loop_cols[col_i] = np.concatenate((loop_cols[col_i], cols)) + + for idx, attr in enumerate(attributes): + if attr in prim.attributes: + attr_data = BinaryData.decode_accessor(gltf, prim.attributes[attr], cache=True) + attribute_data[idx] = np.concatenate((attribute_data[idx], attr_data[unique_indices])) + else: + attr_data = np.zeros( + (len(unique_indices), DataType.num_elements(attribute_type[attr])), + dtype=ComponentType.to_numpy_dtype(attribute_component_type[attr]) + ) + attribute_data[idx] = np.concatenate((attribute_data[idx], attr_data)) + + # Accessors are cached in case they are shared between primitives; clear + # the cache now that all prims are done. + gltf.decode_accessor_cache = {} + + if gltf.import_settings['merge_vertices']: + vert_locs, vert_normals, vert_joints, vert_weights, \ + sk_vert_locs, loop_vidxs, edge_vidxs, attribute_data = \ + merge_duplicate_verts( + vert_locs, vert_normals, vert_joints, vert_weights, + sk_vert_locs, loop_vidxs, edge_vidxs, attribute_data + ) + + # --------------- + # Convert all the arrays glTF -> Blender + + # Change from relative to absolute positions for morph locs + for sk_locs in sk_vert_locs: + sk_locs += vert_locs + + gltf.locs_batch_gltf_to_blender(vert_locs) + gltf.normals_batch_gltf_to_blender(vert_normals) + for sk_locs in sk_vert_locs: + gltf.locs_batch_gltf_to_blender(sk_locs) + + if num_joint_sets and mesh_options.skin_into_bind_pose: + skin_into_bind_pose( + gltf, skin_idx, vert_joints, vert_weights, + locs=[vert_locs] + sk_vert_locs, + vert_normals=vert_normals, + ) + + for uvs in loop_uvs: + uvs_gltf_to_blender(uvs) + + # --------------- + # Start creating things + + mesh.vertices.add(len(vert_locs)) + position_attribute = attribute_ensure(mesh.attributes, 'position', 'FLOAT_VECTOR', 'POINT') + position_attribute.data.foreach_set('vector', squish(vert_locs, np.float32)) + + mesh.loops.add(len(loop_vidxs)) + corner_vert_attribute = attribute_ensure(mesh.attributes, '.corner_vert', 'INT', 'CORNER') + corner_vert_attribute.data.foreach_set('value', squish(loop_vidxs, np.intc)) + + mesh.edges.add(len(edge_vidxs) // 2) + edge_verts_attribute = attribute_ensure(mesh.attributes, '.edge_verts', 'INT32_2D', 'EDGE') + edge_verts_attribute.data.foreach_set('value', squish(edge_vidxs, np.intc)) + + mesh.polygons.add(num_faces) + + # All polys are tris + loop_starts = np.arange(0, 3 * num_faces, step=3) + mesh.polygons.foreach_set('loop_start', loop_starts) + + for uv_i in range(num_uvs): + name = 'UVMap' if uv_i == 0 else 'UVMap.%03d' % uv_i + layer = mesh.uv_layers.new(name=name) + + if layer is None: + gltf.log.warning("WARNING: UV map is ignored because the maximum number of UV layers has been reached.") + break + + layer.uv.foreach_set('vector', squish(loop_uvs[uv_i], np.float32)) + + for col_i in range(num_cols): + name = 'Color' if col_i == 0 else 'Color.%03d' % col_i + layer = mesh.color_attributes.new(name, 'BYTE_COLOR', 'CORNER') + + layer.data.foreach_set('color', squish(loop_cols[col_i], np.float32)) + + # Make sure the first Vertex Color Attribute is the rendered one + if num_cols > 0: + mesh.color_attributes.render_color_index = 0 + + # Skinning + # TODO: this is slow :/ + if num_joint_sets and mesh_options.skinning: + pyskin = gltf.data.skins[skin_idx] + for i, node_idx in enumerate(pyskin.joints): + bone = gltf.vnodes[node_idx] + ob.vertex_groups.new(name=bone.blender_bone_name) + + vgs = list(ob.vertex_groups) + + for i in range(num_joint_sets): + js = vert_joints[i].tolist() # tolist() is faster + ws = vert_weights[i].tolist() + for vi in range(len(vert_locs)): + w0, w1, w2, w3 = ws[vi] + j0, j1, j2, j3 = js[vi] + if w0 != 0: + vgs[j0].add((vi,), w0, 'REPLACE') + if w1 != 0: + vgs[j1].add((vi,), w1, 'REPLACE') + if w2 != 0: + vgs[j2].add((vi,), w2, 'REPLACE') + if w3 != 0: + vgs[j3].add((vi,), w3, 'REPLACE') + + # Shapekeys + if num_shapekeys: + ob.shape_key_add(name='Basis') + mesh.shape_keys.name = mesh.name + + sk_i = 0 + for sk_name in pymesh.shapekey_names: + if sk_name is None: + continue + + ob.shape_key_add(name=sk_name) + key_block = mesh.shape_keys.key_blocks[sk_name] + key_block.points.foreach_set('co', squish(sk_vert_locs[sk_i], np.float32)) + + sk_i += 1 + + # ---- + # Assign materials to faces + has_materials = any(prim.material is not None for prim in pymesh.primitives) + # Even if no primitive have material, we need to create slots if some primitives have some variant + if has_materials is False: + has_materials = any(prim.extensions is not None and 'KHR_materials_variants' in prim.extensions.keys() + for prim in pymesh.primitives) + + has_variant = prim.extensions is not None and 'KHR_materials_variants' in prim.extensions.keys() \ + and 'mappings' in prim.extensions['KHR_materials_variants'].keys() + + if has_materials: + bl_material_index_dtype = np.intc + material_indices = np.empty(num_faces, dtype=bl_material_index_dtype) + empty_material_slot_index = None + f = 0 + + for idx_prim, prim in enumerate(pymesh.primitives): + if prim.material is not None: + # Get the material + pymaterial = gltf.data.materials[prim.material] + vertex_color = 'COLOR_0' if ('COLOR_0' in prim.attributes) else None + if vertex_color not in pymaterial.blender_material: + BlenderMaterial.create(gltf, prim.material, vertex_color) + material_name = pymaterial.blender_material[vertex_color] + + # Put material in slot (if not there) + if not has_variant: + if material_name not in mesh.materials: + mesh.materials.append(bpy.data.materials[material_name]) + material_index = mesh.materials.find(material_name) + else: + # In case of variant, do not merge slots + mesh.materials.append(bpy.data.materials[material_name]) + material_index = len(mesh.materials) - 1 + else: + if not has_variant: + if empty_material_slot_index is None: + mesh.materials.append(None) + empty_material_slot_index = len(mesh.materials) - 1 + material_index = empty_material_slot_index + else: + # In case of variant, do not merge slots + mesh.materials.append(None) + material_index = len(mesh.materials) - 1 + + material_indices[f:f + prim.num_faces].fill(material_index) + + f += prim.num_faces + + # Manage variants + if has_variant: + + # Store default material + default_mat = mesh.gltf2_variant_default_materials.add() + default_mat.material_slot_index = material_index + default_mat.default_material = bpy.data.materials[material_name] if prim.material is not None else None + + for mapping in prim.extensions['KHR_materials_variants']['mappings']: + # Store, for each variant, the material link to this primitive + + variant_primitive = mesh.gltf2_variant_mesh_data.add() + variant_primitive.material_slot_index = material_index + if 'material' not in mapping.keys(): + # Default material + variant_primitive.material = None + else: + vertex_color = 'COLOR_0' if 'COLOR_0' in prim.attributes else None + if str(mapping['material']) + str(vertex_color) not in gltf.variant_mapping.keys(): + BlenderMaterial.create(gltf, mapping['material'], vertex_color) + variant_primitive.material = gltf.variant_mapping[str(mapping['material']) + str(vertex_color)] + + for variant in mapping['variants']: + vari = variant_primitive.variants.add() + vari.variant.variant_idx = variant + + material_index_attribute = attribute_ensure(mesh.attributes, 'material_index', 'INT', 'FACE') + material_index_attribute.data.foreach_set('value', material_indices) + + # Custom Attributes + for idx, attr in enumerate(attributes): + + blender_attribute_data_type = get_attribute_type( + attribute_component_type[attr], + attribute_type[attr] + ) + + if blender_attribute_data_type is None: + continue + + blender_attribute = mesh.attributes.new(attr, blender_attribute_data_type, 'POINT') + if DataType.num_elements(attribute_type[attr]) == 1: + blender_attribute.data.foreach_set('value', attribute_data[idx].flatten()) + elif DataType.num_elements(attribute_type[attr]) > 1: + if blender_attribute_data_type in ["BYTE_COLOR", "FLOAT_COLOR"]: + blender_attribute.data.foreach_set('color', attribute_data[idx].flatten()) + else: + blender_attribute.data.foreach_set('vector', attribute_data[idx].flatten()) + + # ---- + # Normals + + # Set polys smooth/flat + set_poly_smoothing(gltf, pymesh, mesh, vert_normals, loop_vidxs) + + mesh.validate() + has_loose_edges = len(edge_vidxs) != 0 # need to calc_loose_edges for them to show up + mesh.update(calc_edges_loose=has_loose_edges) + + if has_normals: + mesh.normals_split_custom_set_from_vertices(vert_normals) + + +def points_edges_tris(mode, indices): + points = None + edges = None + tris = None + + if mode == 0: + # POINTS + points = indices + + elif mode == 1: + # LINES + # 1 3 + # / / + # 0 2 + edges = indices + + elif mode == 2: + # LINE LOOP + # 1---2 + # / \ + # 0-------3 + # in: 0123 + # out: 01122330 + edges = np.empty(2 * len(indices), dtype=np.uint32) + edges[[0, -1]] = indices[[0, 0]] # 0______0 + edges[1:-1] = np.repeat(indices[1:], 2) # 01122330 + + elif mode == 3: + # LINE STRIP + # 1---2 + # / \ + # 0 3 + # in: 0123 + # out: 011223 + edges = np.empty(2 * len(indices) - 2, dtype=np.uint32) + edges[[0, -1]] = indices[[0, -1]] # 0____3 + edges[1:-1] = np.repeat(indices[1:-1], 2) # 011223 + + elif mode == 4: + # TRIANGLES + # 2 3 + # / \ / \ + # 0---1 4---5 + tris = indices + + elif mode == 5: + # TRIANGLE STRIP + # 0---2---4 + # \ / \ / + # 1---3 + # in: 01234 + # out: 012132234 + # out (viewed as triplets): 012, 132, 234 + tris = np.empty((len(indices) - 2) * 3, dtype=np.uint32) + # 012__ + first_indices = indices[:-2] + # _123_ + second_indices = indices[1:-1] + # __234 + third_indices = indices[2:] + + # Each triplet starts with the first index + # 0__, 1__, 2__ <- 012__ + tris[0::3] = first_indices + + # Even triplets end with the next two indices in order + # _1_, ___, _3_ <- _1_3_ <- _123_ + # 01_, 1__, 23_ + tris[1::6] = second_indices[0::2] + # __2, ___, __4 <- __2_4 <- __234 + # 012, 1__, 234 + tris[2::6] = third_indices[0::2] + + # Odd triplets end with the next two indices in reverse order + # ___, _3_, ___ <- ___3_ <- __234 + # 012, 13_, 234 + tris[4::6] = third_indices[1::2] + # ___, __2, ___ <- __2__ <- _123_ + # 012, 132, 234 + tris[5::6] = second_indices[1::2] + + elif mode == 6: + # TRIANGLE FAN + # 3---2 + # / \ / \ + # 4---0---1 + # in: 01234 + # out: 012023034 + # out (viewed as triplets): 012, 023, 034 + # Start filled with the first index + # 000, 000, 000 + tris = np.full((len(indices) - 2) * 3, indices[0], dtype=np.uint32) + # _1_, _2_, _3_ <- _123_ + # 010, 020, 030 + tris[1::3] = indices[1:-1] + # __2, __3, __4 <- __234 + # 012, 023, 034 + tris[2::3] = indices[2:] + + else: + raise Exception('primitive mode unimplemented: %d' % mode) + + return points, edges, tris + + +def squish(array, dtype=None): + """Squish nD array into a C-contiguous (required for faster access with the buffer protocol in foreach_set) 1D array + (required by foreach_set). Optionally converting the array to a different dtype.""" + return np.ascontiguousarray(array, dtype=dtype).reshape(array.size) + + +def colors_rgb_to_rgba(rgb): + rgba = np.ones((len(rgb), 4), dtype=np.float32) + rgba[:, :3] = rgb + return rgba + + +def uvs_gltf_to_blender(uvs): + # u,v -> u,1-v + uvs[:, 1] *= -1 + uvs[:, 1] += 1 + + +def skin_into_bind_pose(gltf, skin_idx, vert_joints, vert_weights, locs, vert_normals): + # Skin each position/normal using the bind pose. + # Skinning equation: vert' = sum_(j,w) w * joint_mat[j] * vert + # where the sum is over all (joint,weight) pairs. + + # Calculate joint matrices + joint_mats = [] + pyskin = gltf.data.skins[skin_idx] + if pyskin.inverse_bind_matrices is not None: + inv_binds = BinaryData.get_data_from_accessor(gltf, pyskin.inverse_bind_matrices) + inv_binds = [gltf.matrix_gltf_to_blender(m) for m in inv_binds] + else: + inv_binds = [Matrix.Identity(4) for i in range(len(pyskin.joints))] + bind_mats = [gltf.vnodes[joint].bind_arma_mat for joint in pyskin.joints] + joint_mats = [bind_mat @ inv_bind for bind_mat, inv_bind in zip(bind_mats, inv_binds)] + + # TODO: check if joint_mats are all (approximately) 1, and skip skinning + + joint_mats = np.array(joint_mats, dtype=np.float32) + + # Compute the skinning matrices for every vert + num_verts = len(locs[0]) + skinning_mats = np.zeros((num_verts, 4, 4), dtype=np.float32) + weight_sums = np.zeros(num_verts, dtype=np.float32) + for js, ws in zip(vert_joints, vert_weights): + for i in range(4): + skinning_mats += ws[:, i].reshape(len(ws), 1, 1) * joint_mats[js[:, i]] + weight_sums += ws[:, i] + + # Some invalid files have 0 weight sum. + # To avoid to have this vertices at 0.0 / 0.0 / 0.0 + # We set all weight ( aka 1.0 ) to the first bone + zeros_indices = np.where(weight_sums == 0)[0] + if zeros_indices.shape[0] > 0: + gltf.log.error('File is invalid: Some vertices are not assigned to bone(s) ') + vert_weights[0][:, 0][zeros_indices] = 1.0 # Assign to first bone with all weight + + # Reprocess IBM for these vertices + skinning_mats[zeros_indices] = np.zeros((4, 4), dtype=np.float32) + for js, ws in zip(vert_joints, vert_weights): + for i in range(4): + skinning_mats[zeros_indices] += ws[:, + i][zeros_indices].reshape(len(ws[zeros_indices]), 1, 1) * joint_mats[js[:, i][zeros_indices]] + weight_sums[zeros_indices] += ws[:, i][zeros_indices] + + skinning_mats /= weight_sums.reshape(num_verts, 1, 1) + + skinning_mats_3x3 = skinning_mats[:, :3, :3] + skinning_trans = skinning_mats[:, :3, 3] + + for vs in locs: + vs[:] = mul_mats_vecs(skinning_mats_3x3, vs) + vs[:] += skinning_trans + + if len(vert_normals) != 0: + vert_normals[:] = mul_mats_vecs(skinning_mats_3x3, vert_normals) + # Don't translate normals! + normalize_vecs(vert_normals) + + +def mul_mats_vecs(mats, vecs): + """Given [m1,m2,...] and [v1,v2,...], returns [m1@v1,m2@v2,...]. 3D only.""" + return np.matmul(mats, vecs.reshape(len(vecs), 3, 1)).reshape(len(vecs), 3) + + +def normalize_vecs(vectors): + norms = np.linalg.norm(vectors, axis=1, keepdims=True) + np.divide(vectors, norms, out=vectors, where=norms != 0) + + +def attribute_ensure(attributes, name, data_type, domain): + attribute = attributes.get(name) + if attribute is None: + return attributes.new(name, data_type, domain) + if attribute.domain == domain and attribute.data_type == data_type: + return attribute + # There is an existing attribute, but it has the wrong domain or data_type. + attributes.remove(attribute) + return attributes.new(name, data_type, domain) + + +def set_poly_smoothing(gltf, pymesh, mesh, vert_normals, loop_vidxs): + num_polys = len(mesh.polygons) + + if gltf.import_settings['import_shading'] == "FLAT": + # Polys are smooth by default, setting to flat + mesh.shade_flat() + return + + if gltf.import_settings['import_shading'] == "SMOOTH": + poly_sharps = np.full(num_polys, False) + f = 0 + for prim in pymesh.primitives: + if 'NORMAL' not in prim.attributes: + # Primitives with no NORMALs should use flat shading + poly_sharps[f:f + prim.num_faces].fill(True) + f += prim.num_faces + sharp_face_attribute = attribute_ensure(mesh.attributes, 'sharp_face', 'BOOLEAN', 'FACE') + sharp_face_attribute.data.foreach_set('value', poly_sharps) + return + + assert gltf.import_settings['import_shading'] == "NORMALS" + + # Try to guess which polys should be flat based on the fact that all the + # loop normals for a flat poly are = the poly's normal. + + poly_sharps = np.empty(num_polys, dtype=bool) + + poly_normals = np.empty(num_polys * 3, dtype=np.float32) + mesh.polygon_normals.foreach_get('vector', poly_normals) + poly_normals = poly_normals.reshape(num_polys, 3) + + f = 0 + for prim in pymesh.primitives: + if 'NORMAL' not in prim.attributes: + # Primitives with no NORMALs should use flat shading + poly_sharps[f:f + prim.num_faces].fill(True) + f += prim.num_faces + continue + + # Check the normals at the three corners against the poly normal. + # Two normals are equal iff their dot product is 1. + + poly_ns = poly_normals[f:f + prim.num_faces] + + # Dot product against the first vertex normal in the tri + vert_ns = vert_normals[loop_vidxs[3 * f:3 * (f + prim.num_faces):3]] + dot_prods = np.sum(vert_ns * poly_ns, axis=1) # dot product + smooth = (dot_prods <= 0.9999999) + + # Same for the second vertex, etc. + vert_ns = vert_normals[loop_vidxs[3 * f + 1:3 * (f + prim.num_faces):3]] + dot_prods = np.sum(vert_ns * poly_ns, axis=1) + np.logical_or(smooth, dot_prods <= 0.9999999, out=smooth) + + vert_ns = vert_normals[loop_vidxs[3 * f + 2:3 * (f + prim.num_faces):3]] + dot_prods = np.sum(vert_ns * poly_ns, axis=1) + np.logical_or(smooth, dot_prods <= 0.9999999, out=smooth) + + np.logical_not(smooth, out=poly_sharps[f:f + prim.num_faces]) + + f += prim.num_faces + + sharp_face_attribute = attribute_ensure(mesh.attributes, 'sharp_face', 'BOOLEAN', 'FACE') + sharp_face_attribute.data.foreach_set('value', poly_sharps) + + +def merge_duplicate_verts( + vert_locs, + vert_normals, + vert_joints, + vert_weights, + sk_vert_locs, + loop_vidxs, + edge_vidxs, + attribute_data): + # This function attempts to invert the splitting done when exporting to + # glTF. Welds together verts with the same per-vert data (but possibly + # different per-loop data). + # + # Ideally normals would be treated as per-loop data, but that has problems, + # so we currently treat the normal as per-vert. + # + # Strategy is simple: put all the per-vert data into an array of structs + # ("dots"), dedupe with np.unique, then take all the data back out. + + # Very often two verts that "morally" should be merged will have normals + # with very small differences. Round off the normals to smooth this over. + if len(vert_normals) != 0: + vert_normals *= 50000 + vert_normals[:] = np.trunc(vert_normals) + vert_normals *= (1 / 50000) + + dot_fields = [('x', np.float32), ('y', np.float32), ('z', np.float32)] + if len(vert_normals) != 0: + dot_fields += [('nx', np.float32), ('ny', np.float32), ('nz', np.float32)] + for i, _ in enumerate(vert_joints): + dot_fields += [ + ('joint%dx' % i, np.uint32), ('joint%dy' % i, np.uint32), + ('joint%dz' % i, np.uint32), ('joint%dw' % i, np.uint32), + ('weight%dx' % i, np.float32), ('weight%dy' % i, np.float32), + ('weight%dz' % i, np.float32), ('weight%dw' % i, np.float32), + ] + for i, _ in enumerate(sk_vert_locs): + dot_fields += [ + ('sk%dx' % i, np.float32), ('sk%dy' % i, np.float32), ('sk%dz' % i, np.float32), + ] + dots = np.empty(len(vert_locs), dtype=np.dtype(dot_fields)) + + dots['x'] = vert_locs[:, 0] + dots['y'] = vert_locs[:, 1] + dots['z'] = vert_locs[:, 2] + if len(vert_normals) != 0: + dots['nx'] = vert_normals[:, 0] + dots['ny'] = vert_normals[:, 1] + dots['nz'] = vert_normals[:, 2] + for i, (joints, weights) in enumerate(zip(vert_joints, vert_weights)): + dots['joint%dx' % i] = joints[:, 0] + dots['joint%dy' % i] = joints[:, 1] + dots['joint%dz' % i] = joints[:, 2] + dots['joint%dw' % i] = joints[:, 3] + dots['weight%dx' % i] = weights[:, 0] + dots['weight%dy' % i] = weights[:, 1] + dots['weight%dz' % i] = weights[:, 2] + dots['weight%dw' % i] = weights[:, 3] + for i, locs in enumerate(sk_vert_locs): + dots['sk%dx' % i] = locs[:, 0] + dots['sk%dy' % i] = locs[:, 1] + dots['sk%dz' % i] = locs[:, 2] + + unique_dots, unique_ind, inv_indices = fast_structured_np_unique(dots, return_index=True, return_inverse=True) + + loop_vidxs = inv_indices[loop_vidxs] + edge_vidxs = inv_indices[edge_vidxs] + + # We don't split vertices only because of custom attribute + # If 2 vertices have same data (pos, normals, etc...) except custom attribute, we + # keep 1 custom attribute, arbitrary + for idx, i in enumerate(attribute_data): + attribute_data[idx] = attribute_data[idx][unique_ind] + + vert_locs = np.empty((len(unique_dots), 3), dtype=np.float32) + vert_locs[:, 0] = unique_dots['x'] + vert_locs[:, 1] = unique_dots['y'] + vert_locs[:, 2] = unique_dots['z'] + if len(vert_normals) != 0: + vert_normals = np.empty((len(unique_dots), 3), dtype=np.float32) + vert_normals[:, 0] = unique_dots['nx'] + vert_normals[:, 1] = unique_dots['ny'] + vert_normals[:, 2] = unique_dots['nz'] + for i in range(len(vert_joints)): + vert_joints[i] = np.empty((len(unique_dots), 4), dtype=np.uint32) + vert_joints[i][:, 0] = unique_dots['joint%dx' % i] + vert_joints[i][:, 1] = unique_dots['joint%dy' % i] + vert_joints[i][:, 2] = unique_dots['joint%dz' % i] + vert_joints[i][:, 3] = unique_dots['joint%dw' % i] + vert_weights[i] = np.empty((len(unique_dots), 4), dtype=np.float32) + vert_weights[i][:, 0] = unique_dots['weight%dx' % i] + vert_weights[i][:, 1] = unique_dots['weight%dy' % i] + vert_weights[i][:, 2] = unique_dots['weight%dz' % i] + vert_weights[i][:, 3] = unique_dots['weight%dw' % i] + for i in range(len(sk_vert_locs)): + sk_vert_locs[i] = np.empty((len(unique_dots), 3), dtype=np.float32) + sk_vert_locs[i][:, 0] = unique_dots['sk%dx' % i] + sk_vert_locs[i][:, 1] = unique_dots['sk%dy' % i] + sk_vert_locs[i][:, 2] = unique_dots['sk%dz' % i] + + return vert_locs, vert_normals, vert_joints, vert_weights, sk_vert_locs, loop_vidxs, edge_vidxs, attribute_data diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_node.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_node.py new file mode 100755 index 00000000000..180316b3e41 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_node.py @@ -0,0 +1,326 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from mathutils import Vector, Matrix +from ...io.imp.gltf2_io_user_extensions import import_user_extensions +from ..com.gltf2_blender_extras import set_extras +from ..com.gltf2_blender_default import BLENDER_GLTF_SPECIAL_COLLECTION +from .gltf2_blender_mesh import BlenderMesh +from .gltf2_blender_camera import BlenderCamera +from .gltf2_blender_light import BlenderLight +from .gltf2_blender_vnode import VNode + + +class BlenderNode(): + """Blender Node.""" + def __new__(cls, *args, **kwargs): + raise RuntimeError("%s should not be instantiated" % cls) + + @staticmethod + def create_vnode(gltf, vnode_id): + """Create VNode and all its descendants.""" + vnode = gltf.vnodes[vnode_id] + + gltf.display_current_node += 1 + if bpy.app.debug_value == 101: + gltf.log.critical("Node %d of %d (id %s)", gltf.display_current_node, len(gltf.vnodes), vnode_id) + + if vnode.type in [VNode.Object, VNode.Inst]: + gltf_node = gltf.data.nodes[vnode_id] if isinstance(vnode_id, int) else None + import_user_extensions('gather_import_node_before_hook', gltf, vnode, gltf_node) + obj = BlenderNode.create_object(gltf, vnode_id) + import_user_extensions('gather_import_node_after_hook', gltf, vnode, gltf_node, obj) + if vnode.is_arma: + BlenderNode.create_bones(gltf, vnode_id) + + elif vnode.type == VNode.Bone: + # These are created with their armature + pass + + elif vnode.type == VNode.DummyRoot: + # Don't actually create this + vnode.blender_object = None + + for child in vnode.children: + BlenderNode.create_vnode(gltf, child) + + @staticmethod + def create_object(gltf, vnode_id): + vnode = gltf.vnodes[vnode_id] + + if vnode.mesh_node_idx is not None: + obj = BlenderNode.create_mesh_object(gltf, vnode) + + elif vnode.type == VNode.Inst and vnode.mesh_idx is not None: + obj = BlenderNode.create_mesh_object(gltf, vnode) + + elif vnode.camera_node_idx is not None: + pynode = gltf.data.nodes[vnode.camera_node_idx] + cam = BlenderCamera.create(gltf, vnode, pynode.camera) + name = vnode.name or cam.name + obj = bpy.data.objects.new(name, cam) + + # Since we create the actual Blender object after the create call, we call the hook here + import_user_extensions('gather_import_camera_after_hook', gltf, vnode, obj, cam) + + elif vnode.light_node_idx is not None: + pynode = gltf.data.nodes[vnode.light_node_idx] + light = BlenderLight.create(gltf, vnode, pynode.extensions['KHR_lights_punctual']['light']) + name = vnode.name or light.name + obj = bpy.data.objects.new(name, light) + + # Since we create the actual Blender object after the create call, we call the hook here + import_user_extensions('gather_import_light_after_hook', gltf, vnode, obj, light) + + elif vnode.is_arma: + armature = bpy.data.armatures.new(vnode.arma_name) + name = vnode.name or armature.name + obj = bpy.data.objects.new(name, armature) + if gltf.import_settings['bone_heuristic'] == "BLENDER": + BlenderNode.armature_display(gltf, obj) + + else: + # Empty + name = vnode.name or vnode.default_name + obj = bpy.data.objects.new(name, None) + obj.empty_display_size = BlenderNode.calc_empty_display_size(gltf, vnode_id) + + vnode.blender_object = obj + + # Set extras (if came from a glTF node) + if isinstance(vnode_id, int): + pynode = gltf.data.nodes[vnode_id] + set_extras(obj, pynode.extras) + + # Set transform + trans, rot, scale = vnode.trs() + obj.location = trans + obj.rotation_mode = 'QUATERNION' + obj.rotation_quaternion = rot + obj.scale = scale + + # Set parent + if vnode.parent is not None: + parent_vnode = gltf.vnodes[vnode.parent] + if parent_vnode.type == VNode.Object: + obj.parent = parent_vnode.blender_object + elif parent_vnode.type == VNode.Bone: + arma_vnode = gltf.vnodes[parent_vnode.bone_arma] + obj.parent = arma_vnode.blender_object + obj.parent_type = 'BONE' + obj.parent_bone = parent_vnode.blender_bone_name + + # Nodes with a bone parent need to be translated + # backwards from the tip to the root + obj.location += Vector((0, -parent_vnode.bone_length, 0)) + + # Store Rest matrix of object + # Can't use directly matrix_world because not refreshed yet + if hasattr(obj, 'gltf2_animation_rest'): + obj.gltf2_animation_rest = Matrix.LocRotScale(obj.location, obj.rotation_quaternion, obj.scale) + + bpy.data.scenes[gltf.blender_scene].collection.objects.link(obj) + + return obj + + @staticmethod + def armature_display(gltf, obj): + obj.show_in_front = True + obj.data.relation_line_position = "HEAD" + + # Create a special collection (if not exists already) + # Content of this collection will not be exported + if BLENDER_GLTF_SPECIAL_COLLECTION not in bpy.data.collections: + bpy.data.collections.new(BLENDER_GLTF_SPECIAL_COLLECTION) + bpy.data.scenes[gltf.blender_scene].collection.children.link( + bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION]) + bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].hide_viewport = True + bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].hide_render = True + + # Create an icosphere, and assign it to the collection + bpy.ops.mesh.primitive_ico_sphere_add( + radius=1, enter_editmode=False, align='WORLD', location=( + 0, 0, 0), scale=( + 1, 1, 1)) + bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].objects.link(bpy.context.object) + gltf.bone_shape = bpy.context.object.name + bpy.context.collection.objects.unlink(bpy.context.object) + + @staticmethod + def calc_empty_display_size(gltf, vnode_id): + # Use min distance to parent/children to guess size + sizes = [] + vids = [vnode_id] + gltf.vnodes[vnode_id].children + for vid in vids: + vnode = gltf.vnodes[vid] + dist = vnode.trs()[0].length + sizes.append(dist * 0.4) + return max(min(sizes, default=1), 0.001) + + @staticmethod + def create_bones(gltf, arma_id): + arma = gltf.vnodes[arma_id] + blender_arma = arma.blender_object + armature = blender_arma.data + + # Find all bones for this arma + bone_ids = [] + + def visit(id): # Depth-first walk + if gltf.vnodes[id].type == VNode.Bone: + bone_ids.append(id) + for child in gltf.vnodes[id].children: + visit(child) + for child in arma.children: + visit(child) + + # Switch into edit mode to create all edit bones + + if bpy.context.mode != 'OBJECT': + bpy.ops.object.mode_set(mode='OBJECT') + bpy.context.window.scene = bpy.data.scenes[gltf.blender_scene] + bpy.context.view_layer.objects.active = blender_arma + bpy.ops.object.mode_set(mode="EDIT") + + for id in bone_ids: + vnode = gltf.vnodes[id] + editbone = armature.edit_bones.new(vnode.name or vnode.default_name) + vnode.blender_bone_name = editbone.name + editbone.use_connect = False # TODO? + + # Give the position of the bone in armature space + arma_mat = vnode.editbone_arma_mat + editbone.head = arma_mat @ Vector((0, 0, 0)) + editbone.tail = arma_mat @ Vector((0, 1, 0)) + if gltf.import_settings['bone_heuristic'] == "BLENDER": + editbone.length = vnode.bone_length / max(blender_arma.scale) + else: + editbone.length = vnode.bone_length + editbone.align_roll(arma_mat @ Vector((0, 0, 1)) - editbone.head) + + if isinstance(id, int): + pynode = gltf.data.nodes[id] + set_extras(editbone, pynode.extras) + + # Set all bone parents + for id in bone_ids: + vnode = gltf.vnodes[id] + parent_vnode = gltf.vnodes[vnode.parent] + if parent_vnode.type == VNode.Bone: + editbone = armature.edit_bones[vnode.blender_bone_name] + parent_editbone = armature.edit_bones[parent_vnode.blender_bone_name] + editbone.parent = parent_editbone + + # Switch back to object mode and do pose bones + bpy.ops.object.mode_set(mode="OBJECT") + + for id in bone_ids: + vnode = gltf.vnodes[id] + pose_bone = blender_arma.pose.bones[vnode.blender_bone_name] + + # BoneTRS = EditBone * PoseBone + # Set PoseBone to make BoneTRS = vnode.trs. + t, r, s = vnode.trs() + et, er = vnode.editbone_trans, vnode.editbone_rot + pose_bone.location = er.conjugated() @ (t - et) + pose_bone.rotation_mode = 'QUATERNION' + pose_bone.rotation_quaternion = er.conjugated() @ r + pose_bone.scale = s + + if isinstance(id, int): + pynode = gltf.data.nodes[id] + set_extras(pose_bone, pynode.extras) + + if gltf.import_settings['bone_heuristic'] == "BLENDER": + pose_bone.custom_shape = bpy.data.objects[gltf.bone_shape] + armature_min_dim = min([blender_arma.dimensions[0] / + blender_arma.scale[0], blender_arma.dimensions[1] / + blender_arma.scale[1], blender_arma.dimensions[2] / + blender_arma.scale[2]]) + pose_bone.custom_shape_scale_xyz = Vector([armature_min_dim * 0.05] * 3) + pose_bone.use_custom_shape_bone_size = False + + @staticmethod + def create_mesh_object(gltf, vnode): + if vnode.type != VNode.Inst: + # Regular case + pynode = gltf.data.nodes[vnode.mesh_node_idx] + else: + class DummyPyNode: + pass + pynode = DummyPyNode() + pynode.mesh = vnode.mesh_idx + pynode.skin = None + pynode.weights = None + + if not (0 <= pynode.mesh < len(gltf.data.meshes)): + # Avoid traceback for invalid gltf file: invalid reference to meshes array + # So return an empty blender object) + return bpy.data.objects.new(vnode.name or "Invalid Mesh Index", None) + pymesh = gltf.data.meshes[pynode.mesh] + + # Key to cache the Blender mesh by. + # Same cache key = instances of the same Blender mesh. + cache_key = None + if not pymesh.shapekey_names: + cache_key = (pynode.skin,) + else: + # Unlike glTF, all instances of a Blender mesh share shapekeys. + # So two instances that might have different morph weights need + # different cache keys. + if pynode.weight_animation is False: + cache_key = (pynode.skin, tuple(pynode.weights or [])) + else: + cache_key = None # don't use the cache at all + + if cache_key is not None and cache_key in pymesh.blender_name: + mesh = bpy.data.meshes[pymesh.blender_name[cache_key]] + else: + gltf.log.info("Blender create Mesh node {}".format(pymesh.name or pynode.mesh)) + mesh = BlenderMesh.create(gltf, pynode.mesh, pynode.skin) + if cache_key is not None: + pymesh.blender_name[cache_key] = mesh.name + + name = vnode.name or mesh.name + obj = bpy.data.objects.new(name, mesh) + + if pymesh.shapekey_names: + BlenderNode.set_morph_weights(gltf, pynode, obj) + + if pynode.skin is not None: + BlenderNode.setup_skinning(gltf, pynode, obj) + + return obj + + @staticmethod + def set_morph_weights(gltf, pynode, obj): + pymesh = gltf.data.meshes[pynode.mesh] + weights = pynode.weights or pymesh.weights or [] + for i, weight in enumerate(weights): + if pymesh.shapekey_names[i] is not None: + kb = obj.data.shape_keys.key_blocks[pymesh.shapekey_names[i]] + # extend range if needed + if weight < kb.slider_min: + kb.slider_min = weight + if weight > kb.slider_max: + kb.slider_max = weight + kb.value = weight + + # Store default weight + if hasattr(obj, 'gltf2_animation_weight_rest'): + w = obj.gltf2_animation_weight_rest.add() + w.val = weight + + @staticmethod + def setup_skinning(gltf, pynode, obj): + pyskin = gltf.data.skins[pynode.skin] + + # Armature/bones should have already been created. + + # Create an Armature modifier + first_bone = gltf.vnodes[pyskin.joints[0]] + arma = gltf.vnodes[first_bone.bone_arma] + mod = obj.modifiers.new(name="Armature", type="ARMATURE") + mod.object = arma.blender_object diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_pbrMetallicRoughness.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_pbrMetallicRoughness.py new file mode 100755 index 00000000000..1edf4e1ff4f --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_pbrMetallicRoughness.py @@ -0,0 +1,849 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from re import M +import bpy +from ...io.com.gltf2_io_constants import GLTF_IOR, BLENDER_COAT_ROUGHNESS +from ...io.com.gltf2_io import TextureInfo, MaterialPBRMetallicRoughness +from ..com.gltf2_blender_material_helpers import get_gltf_node_name, create_settings_group +from .gltf2_blender_texture import texture +from .gltf2_blender_KHR_materials_anisotropy import anisotropy +from .gltf2_blender_material_utils import \ + MaterialHelper, scalar_factor_and_texture, color_factor_and_texture, normal_map + + +def pbr_metallic_roughness(mh: MaterialHelper): + """Creates node tree for pbrMetallicRoughness materials.""" + pbr_node = mh.nodes.new('ShaderNodeBsdfPrincipled') + out_node = mh.nodes.new('ShaderNodeOutputMaterial') + pbr_node.location = 10, 300 + out_node.location = 300, 300 + mh.links.new(pbr_node.outputs[0], out_node.inputs[0]) + + need_volume_node = False # need a place to attach volume? + need_settings_node = False # need a place to attach occlusion/thickness? + + if mh.pymat.occlusion_texture is not None: + need_settings_node = True + + if volume_ext := mh.get_ext('KHR_materials_volume'): + if volume_ext.get('thicknessFactor', 0) != 0: + need_volume_node = True + need_settings_node = True + + # We also need volume node for KHR_animation_pointer + if mh.gltf.data.extensions_used is not None and "KHR_animation_pointer" in mh.gltf.data.extensions_used: + if mh.pymat.extensions and "KHR_materials_volume" in mh.pymat.extensions and len( + mh.pymat.extensions["KHR_materials_volume"]["animations"]) > 0: + for anim_idx in mh.pymat.extensions["KHR_materials_volume"]["animations"].keys(): + for channel_idx in mh.pymat.extensions["KHR_materials_volume"]["animations"][anim_idx]: + channel = mh.gltf.data.animations[anim_idx].channels[channel_idx] + pointer_tab = channel.target.extensions["KHR_animation_pointer"]["pointer"].split("/") + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_volume" and \ + pointer_tab[5] in ["thicknessFactor", "attenuationDistance", "attenuationColor"]: + need_volume_node = True + need_settings_node = True + + if need_settings_node: + mh.settings_node = make_settings_node(mh) + mh.settings_node.location = 40, -370 + mh.settings_node.width = 180 + + if need_volume_node: + volume_node = mh.nodes.new('ShaderNodeVolumeAbsorption') + volume_node.location = 40, -520 if need_settings_node else -370 + mh.links.new(out_node.inputs[1], volume_node.outputs[0]) + + locs = calc_locations(mh) + + emission( + mh, + location=locs['emission'], + color_socket=pbr_node.inputs['Emission Color'], + strength_socket=pbr_node.inputs['Emission Strength'], + ) + + base_color( + mh, + location=locs['base_color'], + color_socket=pbr_node.inputs['Base Color'], + alpha_socket=pbr_node.inputs['Alpha'] if not mh.is_opaque() else None, + ) + + metallic_roughness( + mh, + location=locs['metallic_roughness'], + metallic_socket=pbr_node.inputs['Metallic'], + roughness_socket=pbr_node.inputs['Roughness'], + ) + + normal( + mh, + location=locs['normal'], + normal_socket=pbr_node.inputs['Normal'], + ) + + if mh.pymat.occlusion_texture is not None: + occlusion( + mh, + location=locs['occlusion'], + occlusion_socket=mh.settings_node.inputs['Occlusion'], + ) + + clearcoat(mh, locs, pbr_node) + + transmission(mh, locs, pbr_node) + + if need_volume_node: + volume( + mh, + location=locs['volume_thickness'], + volume_node=volume_node, + thickness_socket=mh.settings_node.inputs[1] if mh.settings_node else None + ) + + specular(mh, locs, pbr_node) + + anisotropy( + mh, + location=locs['anisotropy'], + anisotropy_socket=pbr_node.inputs['Anisotropic'], + anisotropy_rotation_socket=pbr_node.inputs['Anisotropic Rotation'], + anisotropy_tangent_socket=pbr_node.inputs['Tangent'] + ) + + sheen(mh, locs, pbr_node) + + # IOR + ior_ext = mh.get_ext('KHR_materials_ior', {}) + ior = ior_ext.get('ior', GLTF_IOR) + pbr_node.inputs['IOR'].default_value = ior + + if len(ior_ext) > 0: + mh.pymat.extensions['KHR_materials_ior']['blender_nodetree'] = mh.node_tree # Needed for KHR_animation_pointer + mh.pymat.extensions['KHR_materials_ior']['blender_mat'] = mh.mat # Needed for KHR_animation_pointer + + +def clearcoat(mh, locs, pbr_node): + ext = mh.get_ext('KHR_materials_clearcoat', {}) + if len(ext) > 0: + # Needed for KHR_animation_pointer + mh.pymat.extensions['KHR_materials_clearcoat']['blender_nodetree'] = mh.node_tree + mh.pymat.extensions['KHR_materials_clearcoat']['blender_mat'] = mh.mat # Needed for KHR_animation_pointer + + # We will need clearcoat factor (Mix node) if animated by KHR_animation_pointer (and standard case if clearcoatFactor != 1) + # Check if animated by KHR_animation_pointer + force_clearcoat_factor = False + if mh.gltf.data.extensions_used is not None and "KHR_animation_pointer" in mh.gltf.data.extensions_used: + if mh.pymat.extensions and "KHR_materials_clearcoat" in mh.pymat.extensions and len( + mh.pymat.extensions["KHR_materials_clearcoat"]["animations"]) > 0: + for anim_idx in mh.pymat.extensions["KHR_materials_clearcoat"]["animations"].keys(): + for channel_idx in mh.pymat.extensions["KHR_materials_clearcoat"]["animations"][anim_idx]: + channel = mh.gltf.data.animations[anim_idx].channels[channel_idx] + pointer_tab = channel.target.extensions["KHR_animation_pointer"]["pointer"].split("/") + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_clearcoat" and \ + pointer_tab[5] == "clearcoatFactor": + force_clearcoat_factor = True + + scalar_factor_and_texture( + mh, + location=locs['clearcoat'], + label='Clearcoat', + socket=pbr_node.inputs['Coat Weight'], + factor=ext.get('clearcoatFactor', 0), + tex_info=ext.get('clearcoatTexture'), + channel=0, # Red + force_mix_node=force_clearcoat_factor + ) + + if len(ext) > 0: + tex_info = TextureInfo.from_dict(ext.get('clearcoatTexture')) if ext.get( + 'clearcoatTexture') is not None else None + # Because extensions are dict, they are not passed by reference + # So we need to update the dict of the KHR_texture_transform extension if needed + if tex_info is not None and tex_info.extensions is not None and "KHR_texture_transform" in tex_info.extensions: + mh.pymat.extensions['KHR_materials_clearcoat']['clearcoatTexture']['extensions']['KHR_texture_transform'] = tex_info.extensions["KHR_texture_transform"] + + # We will need clearcoatRoughness factor (Mix node) if animated by + # KHR_animation_pointer (and standard case if clearcoatRoughnessFactor != + # 1) + force_clearcoat_roughness_factor = False + # Check if animated by KHR_animation_pointer + if mh.gltf.data.extensions_used is not None and "KHR_animation_pointer" in mh.gltf.data.extensions_used: + if mh.pymat.extensions and "KHR_materials_clearcoat" in mh.pymat.extensions and len( + mh.pymat.extensions["KHR_materials_clearcoat"]["animations"]) > 0: + for anim_idx in mh.pymat.extensions["KHR_materials_clearcoat"]["animations"].keys(): + for channel_idx in mh.pymat.extensions["KHR_materials_clearcoat"]["animations"][anim_idx]: + channel = mh.gltf.data.animations[anim_idx].channels[channel_idx] + pointer_tab = channel.target.extensions["KHR_animation_pointer"]["pointer"].split("/") + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_clearcoat" and \ + pointer_tab[5] == "clearcoatRoughnessFactor": + force_clearcoat_roughness_factor = True + + scalar_factor_and_texture( + mh, + location=locs['clearcoat_roughness'], + label='Clearcoat Roughness', + socket=pbr_node.inputs['Coat Roughness'], + factor=ext.get('clearcoatRoughnessFactor', BLENDER_COAT_ROUGHNESS if ext.get( + 'clearcoatRoughnessTexture') is None else 0), + tex_info=ext.get('clearcoatRoughnessTexture'), + channel=1, # Green + force_mix_node=force_clearcoat_roughness_factor + ) + + if len(ext) > 0: + tex_info = TextureInfo.from_dict(ext.get('clearcoatRoughnessTexture')) if ext.get( + 'clearcoatRoughnessTexture') is not None else None + # Because extensions are dict, they are not passed by reference + # So we need to update the dict of the KHR_texture_transform extension if needed + if tex_info is not None and tex_info.extensions is not None and "KHR_texture_transform" in tex_info.extensions: + mh.pymat.extensions['KHR_materials_clearcoat']['clearcoatRoughnessTexture']['extensions']['KHR_texture_transform'] = tex_info.extensions["KHR_texture_transform"] + + normal_map( + mh, + location=locs['clearcoat_normal'], + label='Clearcoat Normal', + socket=pbr_node.inputs['Coat Normal'], + tex_info=ext.get('clearcoatNormalTexture'), + ) + + +def transmission(mh, locs, pbr_node): + ext = mh.get_ext('KHR_materials_transmission', {}) + factor = ext.get('transmissionFactor', 0) + + if len(ext) > 0: + # Needed for KHR_animation_pointer + mh.pymat.extensions['KHR_materials_transmission']['blender_nodetree'] = mh.node_tree + mh.pymat.extensions['KHR_materials_transmission']['blender_mat'] = mh.mat # Needed for KHR_animation_pointer + + # We need transmission if animated by KHR_animation_pointer + force_transmission = False + if mh.gltf.data.extensions_used is not None and "KHR_animation_pointer" in mh.gltf.data.extensions_used: + if mh.pymat.extensions and "KHR_materials_transmission" in mh.pymat.extensions and len( + mh.pymat.extensions["KHR_materials_transmission"]["animations"]) > 0: + for anim_idx in mh.pymat.extensions["KHR_materials_transmission"]["animations"].keys(): + for channel_idx in mh.pymat.extensions["KHR_materials_transmission"]["animations"][anim_idx]: + channel = mh.gltf.data.animations[anim_idx].channels[channel_idx] + pointer_tab = channel.target.extensions["KHR_animation_pointer"]["pointer"].split("/") + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_transmission" and \ + pointer_tab[5] == "transmissionFactor": + force_transmission = True + + if factor > 0 or force_transmission is True: + # Activate screen refraction (for Eevee) + mh.mat.use_screen_refraction = True + + scalar_factor_and_texture( + mh, + location=locs['transmission'], + label='Transmission', + socket=pbr_node.inputs['Transmission Weight'], + factor=factor, + tex_info=ext.get('transmissionTexture'), + channel=0, # Red + force_mix_node=force_transmission, + ) + + if len(ext) > 0: + tex_info = TextureInfo.from_dict(ext.get('transmissionTexture')) if ext.get( + 'transmissionTexture') is not None else None + # Because extensions are dict, they are not passed by reference + # So we need to update the dict of the KHR_texture_transform extension if needed + if tex_info is not None and tex_info.extensions is not None and "KHR_texture_transform" in tex_info.extensions: + mh.pymat.extensions['KHR_materials_transmission']['transmissionTexture']['extensions']['KHR_texture_transform'] = tex_info.extensions["KHR_texture_transform"] + + +def volume(mh, location, volume_node, thickness_socket): + # Based on https://github.com/KhronosGroup/glTF-Blender-IO/issues/1454#issuecomment-928319444 + ext = mh.get_ext('KHR_materials_volume', {}) + + if len(ext) > 0: + # Needed for KHR_animation_pointer + mh.pymat.extensions['KHR_materials_volume']['blender_nodetree'] = mh.node_tree + mh.pymat.extensions['KHR_materials_volume']['blender_mat'] = mh.mat # Needed for KHR_animation_pointer + + color = ext.get('attenuationColor', [1, 1, 1]) + volume_node.inputs[0].default_value = [*color, 1] + + distance = ext.get('attenuationDistance', float('inf')) + density = 1 / distance + volume_node.inputs[1].default_value = density + + # We also need math node if thickness factor is animated in KHR_animation_pointer + force_math_node = False + if mh.gltf.data.extensions_used is not None and "KHR_animation_pointer" in mh.gltf.data.extensions_used: + if len(mh.pymat.extensions["KHR_materials_volume"]["animations"]) > 0: + for anim_idx in mh.pymat.extensions["KHR_materials_volume"]["animations"].keys(): + for channel_idx in mh.pymat.extensions["KHR_materials_volume"]["animations"][anim_idx]: + channel = mh.gltf.data.animations[anim_idx].channels[channel_idx] + pointer_tab = channel.target.extensions["KHR_animation_pointer"]["pointer"].split("/") + if len(pointer_tab) == 6 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "extensions" and \ + pointer_tab[4] == "KHR_materials_volume" and \ + pointer_tab[5] == "thicknessFactor": + force_math_node = True + + scalar_factor_and_texture( + mh, + location=location, + label='Thickness', + socket=thickness_socket, + factor=ext.get('thicknessFactor', 0), + tex_info=ext.get('thicknessTexture'), + channel=1, # Green + force_mix_node=force_math_node, + ) + + if len(ext) > 0: + tex_info = TextureInfo.from_dict(ext.get('thicknessTexture')) if ext.get( + 'thicknessTexture') is not None else None + # Because extensions are dict, they are not passed by reference + # So we need to update the dict of the KHR_texture_transform extension if needed + if tex_info is not None and tex_info.extensions is not None and "KHR_texture_transform" in tex_info.extensions: + mh.pymat.extensions['KHR_materials_volume']['thicknessTexture']['extensions']['KHR_texture_transform'] = tex_info.extensions["KHR_texture_transform"] + + +def specular(mh, locs, pbr_node): + ext = mh.get_ext('KHR_materials_specular', {}) + + if len(ext) > 0: + # Needed for KHR_animation_pointer + mh.pymat.extensions['KHR_materials_specular']['blender_nodetree'] = mh.node_tree + mh.pymat.extensions['KHR_materials_specular']['blender_mat'] = mh.mat # Needed for KHR_animation_pointer + + # blender.IORLevel = 0.5 * gltf.specular + scalar_factor_and_texture( + mh, + location=locs['specularTexture'], + label='Specular', + socket=pbr_node.inputs['Specular IOR Level'], + factor=0.5 * ext.get('specularFactor', 1), + tex_info=ext.get('specularTexture'), + channel=4, # Alpha + ) + + if len(ext) > 0: + tex_info = TextureInfo.from_dict(ext.get('specularTexture')) if ext.get('specularTexture') is not None else None + # Because extensions are dict, they are not passed by reference + # So we need to update the dict of the KHR_texture_transform extension if needed + if tex_info is not None and tex_info.extensions is not None and "KHR_texture_transform" in tex_info.extensions: + mh.pymat.extensions['KHR_materials_specular']['specularTexture']['extensions']['KHR_texture_transform'] = tex_info.extensions["KHR_texture_transform"] + + color_factor_and_texture( + mh, + location=locs['specularColorTexture'], + label='Specular Color', + socket=pbr_node.inputs['Specular Tint'], + factor=ext.get('specularColorFactor', [1, 1, 1]), + tex_info=ext.get('specularColorTexture'), + ) + + if len(ext) > 0: + tex_info = TextureInfo.from_dict(ext.get('specularColorTexture')) if ext.get( + 'specularColorTexture') is not None else None + # Because extensions are dict, they are not passed by reference + # So we need to update the dict of the KHR_texture_transform extension if needed + if tex_info is not None and tex_info.extensions is not None and "KHR_texture_transform" in tex_info.extensions: + mh.pymat.extensions['KHR_materials_specular']['specularColorTexture']['extensions']['KHR_texture_transform'] = tex_info.extensions["KHR_texture_transform"] + + +def sheen(mh, locs, pbr_node): + ext = mh.get_ext('KHR_materials_sheen') + if ext is None: + return + + mh.pymat.extensions['KHR_materials_sheen']['blender_nodetree'] = mh.node_tree # Needed for KHR_animation_pointer + mh.pymat.extensions['KHR_materials_sheen']['blender_mat'] = mh.mat # Needed for KHR_animation_pointer + + pbr_node.inputs['Sheen Weight'].default_value = 1 + + color_factor_and_texture( + mh, + location=locs['sheenColorTexture'], + label='Sheen Color', + socket=pbr_node.inputs['Sheen Tint'], + factor=ext.get('sheenColorFactor', [0, 0, 0]), + tex_info=ext.get('sheenColorTexture'), + ) + + if len(ext) > 0: + tex_info = TextureInfo.from_dict(ext.get('sheenColorTexture')) if ext.get( + 'sheenColorTexture') is not None else None + # Because extensions are dict, they are not passed by reference + # So we need to update the dict of the KHR_texture_transform extension if needed + if tex_info is not None and tex_info.extensions is not None and "KHR_texture_transform" in tex_info.extensions: + mh.pymat.extensions['KHR_materials_sheen']['sheenColorTexture']['extensions']['KHR_texture_transform'] = tex_info.extensions["KHR_texture_transform"] + + scalar_factor_and_texture( + mh, + location=locs['sheenRoughnessTexture'], + label='Sheen Roughness', + socket=pbr_node.inputs['Sheen Roughness'], + factor=ext.get('sheenRoughnessFactor', 0), + tex_info=ext.get('sheenRoughnessTexture'), + channel=4, # Alpha + ) + + if len(ext) > 0: + tex_info = TextureInfo.from_dict(ext.get('sheenRoughnessTexture')) if ext.get( + 'sheenRoughnessTexture') is not None else None + # Because extensions are dict, they are not passed by reference + # So we need to update the dict of the KHR_texture_transform extension if needed + if tex_info is not None and tex_info.extensions is not None and "KHR_texture_transform" in tex_info.extensions: + mh.pymat.extensions['KHR_materials_sheen']['sheenRoughnessTexture']['extensions']['KHR_texture_transform'] = tex_info.extensions["KHR_texture_transform"] + + +def calc_locations(mh): + """Calculate locations to place each bit of the node graph at.""" + # Lay the blocks out top-to-bottom, aligned on the right + x = -200 + y = 0 + height = 460 # height of each block + locs = {} + + clearcoat_ext = mh.get_ext('KHR_materials_clearcoat', {}) + transmission_ext = mh.get_ext('KHR_materials_transmission', {}) + volume_ext = mh.get_ext('KHR_materials_volume', {}) + specular_ext = mh.get_ext('KHR_materials_specular', {}) + anisotropy_ext = mh.get_ext('KHR_materials_anisotropy', {}) + sheen_ext = mh.get_ext('KHR_materials_sheen', {}) + + locs['base_color'] = (x, y) + if mh.pymat.pbr_metallic_roughness.base_color_texture is not None or mh.vertex_color: + y -= height + locs['metallic_roughness'] = (x, y) + if mh.pymat.pbr_metallic_roughness.metallic_roughness_texture is not None: + y -= height + locs['transmission'] = (x, y) + if 'transmissionTexture' in transmission_ext: + y -= height + locs['normal'] = (x, y) + if mh.pymat.normal_texture is not None: + y -= height + locs['specularTexture'] = (x, y) + if 'specularTexture' in specular_ext: + y -= height + locs['specularColorTexture'] = (x, y) + if 'specularColorTexture' in specular_ext: + y -= height + locs['anisotropy'] = (x, y) + if 'anisotropyTexture' in anisotropy_ext: + y -= height + locs['sheenRoughnessTexture'] = (x, y) + if 'sheenRoughnessTexture' in sheen_ext: + y -= height + locs['sheenColorTexture'] = (x, y) + if 'sheenColorTexture' in sheen_ext: + y -= height + locs['clearcoat'] = (x, y) + if 'clearcoatTexture' in clearcoat_ext: + y -= height + locs['clearcoat_roughness'] = (x, y) + if 'clearcoatRoughnessTexture' in clearcoat_ext: + y -= height + locs['clearcoat_normal'] = (x, y) + if 'clearcoatNormalTexture' in clearcoat_ext: + y -= height + locs['emission'] = (x, y) + if mh.pymat.emissive_texture is not None: + y -= height + locs['occlusion'] = (x, y) + if mh.pymat.occlusion_texture is not None: + y -= height + locs['volume_thickness'] = (x, y) + if 'thicknessTexture' in volume_ext: + y -= height + + # Center things + total_height = -y + y_offset = total_height / 2 - 20 + for key in locs: + x, y = locs[key] + locs[key] = (x, y + y_offset) + + return locs + + +# These functions each create one piece of the node graph, slotting +# their outputs into the given socket, or setting its default value. +# location is roughly the upper-right corner of where to put nodes. + + +# [Texture] => [Emissive Factor] => +def emission(mh: MaterialHelper, location, color_socket, strength_socket): + factor = mh.pymat.emissive_factor or [0, 0, 0] + ext = mh.get_ext('KHR_materials_emissive_strength', {}) + strength = ext.get('emissiveStrength', 1) + if len(ext) > 0: + # Needed for KHR_animation_pointer + mh.pymat.extensions['KHR_materials_emissive_strength']['blender_nodetree'] = mh.node_tree + # Needed for KHR_animation_pointer + mh.pymat.extensions['KHR_materials_emissive_strength']['blender_mat'] = mh.mat + + if factor[0] == factor[1] == factor[2]: + # Fold greyscale factor into strength + strength *= factor[0] + factor = [1, 1, 1] + + # We need to check if emissive factor is animated via KHR_animation_pointer + # Because if not, we can use direct socket or mix node, depending if there + # is a texture or not, or if factor is grayscale + force_mix_node = False + if mh.gltf.data.extensions_used is not None and "KHR_animation_pointer" in mh.gltf.data.extensions_used: + if len(mh.pymat.animations) > 0: + for anim_idx in mh.pymat.animations.keys(): + for channel_idx in mh.pymat.animations[anim_idx]: + channel = mh.gltf.data.animations[anim_idx].channels[channel_idx] + pointer_tab = channel.target.extensions["KHR_animation_pointer"]["pointer"].split("/") + if len(pointer_tab) == 4 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "emissiveFactor": + force_mix_node = True + + color_factor_and_texture( + mh, + location, + label='Emissive', + socket=color_socket, + factor=factor, + tex_info=mh.pymat.emissive_texture, + force_mix_node=force_mix_node, + ) + strength_socket.default_value = strength + + +# [Texture] => [Mix Colors] => [Color Factor] => +# [Vertex Color] => [Mix Alphas] => [Alpha Factor] => +def base_color( + mh: MaterialHelper, + location, + color_socket, + alpha_socket=None, + is_diffuse=False, +): + """Handle base color (= baseColorTexture * vertexColor * baseColorFactor).""" + x, y = location + pbr = mh.pymat.pbr_metallic_roughness + if not is_diffuse: + base_color_factor = pbr.base_color_factor + base_color_texture = pbr.base_color_texture + else: + # Handle pbrSpecularGlossiness's diffuse with this function too, + # since it's almost exactly the same as base color. + base_color_factor = \ + mh.pymat.extensions['KHR_materials_pbrSpecularGlossiness'] \ + .get('diffuseFactor', [1, 1, 1, 1]) + base_color_texture = \ + mh.pymat.extensions['KHR_materials_pbrSpecularGlossiness'] \ + .get('diffuseTexture', None) + if base_color_texture is not None: + base_color_texture = TextureInfo.from_dict(base_color_texture) + + if base_color_factor is None: + base_color_factor = [1, 1, 1, 1] + + if base_color_texture is None and not mh.vertex_color: + color_socket.default_value = base_color_factor[:3] + [1] + if alpha_socket is not None: + alpha_socket.default_value = base_color_factor[3] + return + + # Mix in base color factor + needs_color_factor = base_color_factor[:3] != [1, 1, 1] + needs_alpha_factor = base_color_factor[3] != 1.0 and alpha_socket is not None + + # We need to check if base color factor is animated via KHR_animation_pointer + # Because if not, we can use direct socket or mix node, depending if there is a texture or not + # If there is an animation, we need to force creation of a mix node and math node, for color and alpha + if mh.gltf.data.extensions_used is not None and "KHR_animation_pointer" in mh.gltf.data.extensions_used: + if len(mh.pymat.pbr_metallic_roughness.animations) > 0: + for anim_idx in mh.pymat.pbr_metallic_roughness.animations.keys(): + for channel_idx in mh.pymat.pbr_metallic_roughness.animations[anim_idx]: + channel = mh.gltf.data.animations[anim_idx].channels[channel_idx] + pointer_tab = channel.target.extensions["KHR_animation_pointer"]["pointer"].split("/") + if len(pointer_tab) == 5 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "pbrMetallicRoughness" and \ + pointer_tab[4] == "baseColorFactor": + needs_color_factor = True + needs_alpha_factor = True if alpha_socket is not None else False + + if needs_color_factor or needs_alpha_factor: + if needs_color_factor: + node = mh.node_tree.nodes.new('ShaderNodeMix') + node.label = 'Color Factor' + node.data_type = "RGBA" + node.location = x - 140, y + node.blend_type = 'MULTIPLY' + # Outputs + mh.node_tree.links.new(color_socket, node.outputs[2]) + # Inputs + node.inputs['Factor'].default_value = 1.0 + color_socket = node.inputs[6] + node.inputs[7].default_value = base_color_factor[:3] + [1] + + if needs_alpha_factor: + node = mh.node_tree.nodes.new('ShaderNodeMath') + node.label = 'Alpha Factor' + node.location = x - 140, y - 230 + # Outputs + mh.node_tree.links.new(alpha_socket, node.outputs[0]) + # Inputs + node.operation = 'MULTIPLY' + alpha_socket = node.inputs[0] + node.inputs[1].default_value = base_color_factor[3] + + x -= 200 + + # These are where the texture/vertex color node will put its output. + texture_color_socket = color_socket + texture_alpha_socket = alpha_socket + vcolor_color_socket = color_socket + vcolor_alpha_socket = alpha_socket + + # Mix texture and vertex color together + if base_color_texture is not None and mh.vertex_color: + node = mh.node_tree.nodes.new('ShaderNodeMix') + node.label = 'Mix Vertex Color' + node.data_type = 'RGBA' + node.location = x - 140, y + node.blend_type = 'MULTIPLY' + # Outputs + mh.node_tree.links.new(color_socket, node.outputs[2]) + # Inputs + node.inputs['Factor'].default_value = 1.0 + texture_color_socket = node.inputs[6] + vcolor_color_socket = node.inputs[7] + + if alpha_socket is not None: + node = mh.node_tree.nodes.new('ShaderNodeMath') + node.label = 'Mix Vertex Alpha' + node.location = x - 140, y - 230 + node.operation = 'MULTIPLY' + # Outputs + mh.node_tree.links.new(alpha_socket, node.outputs[0]) + # Inputs + texture_alpha_socket = node.inputs[0] + vcolor_alpha_socket = node.inputs[1] + + x -= 200 + + # Vertex Color + if mh.vertex_color: + node = mh.node_tree.nodes.new('ShaderNodeVertexColor') + # Do not set the layer name, so rendered one will be used (At import => The first one) + node.location = x - 250, y - 240 + # Outputs + mh.node_tree.links.new(vcolor_color_socket, node.outputs['Color']) + if vcolor_alpha_socket is not None: + mh.node_tree.links.new(vcolor_alpha_socket, node.outputs['Alpha']) + + x -= 280 + + # Texture + if base_color_texture is not None: + texture( + mh, + tex_info=base_color_texture, + label='BASE COLOR' if not is_diffuse else 'DIFFUSE', + location=(x, y), + color_socket=texture_color_socket, + alpha_socket=texture_alpha_socket, + ) + + +# [Texture] => [Separate GB] => [Metal/Rough Factor] => +def metallic_roughness(mh: MaterialHelper, location, metallic_socket, roughness_socket): + x, y = location + pbr = mh.pymat.pbr_metallic_roughness + metal_factor = pbr.metallic_factor + rough_factor = pbr.roughness_factor + if metal_factor is None: + metal_factor = 1.0 + if rough_factor is None: + rough_factor = 1.0 + + if pbr.metallic_roughness_texture is None: + metallic_socket.default_value = metal_factor + roughness_socket.default_value = rough_factor + return + + need_metal_factor = metal_factor != 1.0 + need_rough_factor = rough_factor != 1.0 + + # We need to check if factor is animated via KHR_animation_pointer + # Because if not, we can use direct socket or mix node, depending if there is a texture or not + # If there is an animation, we need to force creation of a mix node and math node, for metal or rough + if mh.gltf.data.extensions_used is not None and "KHR_animation_pointer" in mh.gltf.data.extensions_used: + if len(mh.pymat.pbr_metallic_roughness.animations) > 0: + for anim_idx in mh.pymat.pbr_metallic_roughness.animations.keys(): + for channel_idx in mh.pymat.pbr_metallic_roughness.animations[anim_idx]: + channel = mh.gltf.data.pbr_metallic_roughness.animations[anim_idx].channels[channel_idx] + pointer_tab = channel.target.extensions["KHR_animation_pointer"]["pointer"].split("/") + if len(pointer_tab) == 5 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "pbrMetallicRoughness" and \ + pointer_tab[4] == "roughnessFactor": + need_rough_factor = True + if len(pointer_tab) == 5 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "pbrMetallicRoughness" and \ + pointer_tab[4] == "metallicFactor": + need_metal_factor = True + + if need_metal_factor or need_rough_factor: + # Mix metal factor + if need_metal_factor: + node = mh.node_tree.nodes.new('ShaderNodeMath') + node.label = 'Metallic Factor' + node.location = x - 140, y + node.operation = 'MULTIPLY' + # Outputs + mh.node_tree.links.new(metallic_socket, node.outputs[0]) + # Inputs + metallic_socket = node.inputs[0] + node.inputs[1].default_value = metal_factor + + # Mix rough factor + if need_rough_factor: + node = mh.node_tree.nodes.new('ShaderNodeMath') + node.label = 'Roughness Factor' + node.location = x - 140, y - 200 + node.operation = 'MULTIPLY' + # Outputs + mh.node_tree.links.new(roughness_socket, node.outputs[0]) + # Inputs + roughness_socket = node.inputs[0] + node.inputs[1].default_value = rough_factor + + x -= 200 + + # Separate RGB + node = mh.node_tree.nodes.new('ShaderNodeSeparateColor') + node.location = x - 150, y - 75 + # Outputs + mh.node_tree.links.new(metallic_socket, node.outputs['Blue']) + mh.node_tree.links.new(roughness_socket, node.outputs['Green']) + # Inputs + color_socket = node.inputs[0] + + x -= 200 + + texture( + mh, + tex_info=pbr.metallic_roughness_texture, + label='METALLIC ROUGHNESS', + location=(x, y), + is_data=True, + color_socket=color_socket, + ) + + +# [Texture] => [Normal Map] => +def normal(mh: MaterialHelper, location, normal_socket): + tex_info = mh.pymat.normal_texture + if tex_info is not None: + tex_info.blender_nodetree = mh.mat.node_tree # Used in case of for KHR_animation_pointer + tex_info.blender_mat = mh.mat # Used in case of for KHR_animation_pointer #TODOPointer Vertex Color... + + normal_map( + mh, + location=location, + label='Normal Map', + socket=normal_socket, + tex_info=tex_info, + ) + + +# [Texture] => [Separate R] => [Mix Strength] => +def occlusion(mh: MaterialHelper, location, occlusion_socket): + x, y = location + + if mh.pymat.occlusion_texture is None: + return + + strength = mh.pymat.occlusion_texture.strength + if strength is None: + strength = 1.0 + + strength_needed = strength != 1.0 + + # We need to check if occlusion strength is animated via KHR_animation_pointer + # Because if not, we can use direct socket or mix node, depending if there is a texture or not + # If there is an animation, we need to force creation of a mix node and math node, for strength + if mh.gltf.data.extensions_used is not None and "KHR_animation_pointer" in mh.gltf.data.extensions_used: + if len(mh.pymat.occlusion_texture.animations) > 0: + for anim_idx in mh.pymat.occlusion_texture.animations.keys(): + for channel_idx in mh.pymat.occlusion_texture.animations[anim_idx]: + channel = mh.gltf.data.animations[anim_idx].channels[channel_idx] + pointer_tab = channel.target.extensions["KHR_animation_pointer"]["pointer"].split("/") + if len(pointer_tab) == 5 and pointer_tab[1] == "materials" and \ + pointer_tab[3] == "occlusionTexture" and \ + pointer_tab[4] == "strength": + strength_needed = True + + if strength_needed: + # Mix with white + node = mh.node_tree.nodes.new('ShaderNodeMix') + node.label = 'Occlusion Strength' + node.data_type = 'RGBA' + node.location = x - 140, y + node.blend_type = 'MIX' + # Outputs + mh.node_tree.links.new(occlusion_socket, node.outputs[2]) + # Inputs + node.inputs['Factor'].default_value = strength + node.inputs[6].default_value = [1, 1, 1, 1] + occlusion_socket = node.inputs[7] + + x -= 200 + + # Separate RGB + node = mh.node_tree.nodes.new('ShaderNodeSeparateColor') + node.location = x - 150, y - 75 + # Outputs + mh.node_tree.links.new(occlusion_socket, node.outputs['Red']) + # Inputs + color_socket = node.inputs[0] + + x -= 200 + + mh.pymat.occlusion_texture.blender_nodetree = mh.mat.node_tree # Used in case of for KHR_animation_pointer + # Used in case of for KHR_animation_pointer #TODOPointer Vertex Color... + mh.pymat.occlusion_texture.blender_mat = mh.mat + + texture( + mh, + tex_info=mh.pymat.occlusion_texture, + label='OCCLUSION', + location=(x, y), + is_data=True, + color_socket=color_socket, + ) + + +def make_settings_node(mh): + """ + Make a Group node with a hookup for Occlusion. No effect in Blender, but + used to tell the exporter what the occlusion map should be. + """ + node = mh.node_tree.nodes.new('ShaderNodeGroup') + node.node_tree = get_settings_group() + return node + + +def get_settings_group(): + gltf_node_group_name = get_gltf_node_name() + if gltf_node_group_name in bpy.data.node_groups: + gltf_node_group = bpy.data.node_groups[gltf_node_group_name] + else: + # Create a new node group + gltf_node_group = create_settings_group(gltf_node_group_name) + return gltf_node_group diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_scene.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_scene.py new file mode 100755 index 00000000000..e4731420cb9 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_scene.py @@ -0,0 +1,121 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy + +from .gltf2_blender_node import BlenderNode +from .gltf2_blender_animation import BlenderAnimation +from .gltf2_blender_vnode import VNode, compute_vnodes +from ..com.gltf2_blender_extras import set_extras +from ...io.imp.gltf2_io_user_extensions import import_user_extensions + + +class BlenderScene(): + """Blender Scene.""" + def __new__(cls, *args, **kwargs): + raise RuntimeError("%s should not be instantiated" % cls) + + @staticmethod + def create(gltf): + """Scene creation.""" + scene = bpy.context.scene + gltf.blender_scene = scene.name + if bpy.context.collection.name in bpy.data.collections: # avoid master collection + gltf.blender_active_collection = bpy.context.collection.name + + if gltf.data.scene is not None: + import_user_extensions('gather_import_scene_before_hook', gltf, gltf.data.scenes[gltf.data.scene], scene) + pyscene = gltf.data.scenes[gltf.data.scene] + set_extras(scene, pyscene.extras) + + compute_vnodes(gltf) + + gltf.display_current_node = 0 # for debugging + BlenderNode.create_vnode(gltf, 'root') + + # User extensions before scene creation + gltf_scene = None + if gltf.data.scene is not None: + gltf_scene = gltf.data.scenes[gltf.data.scene] + import_user_extensions('gather_import_scene_after_nodes_hook', gltf, gltf_scene, scene) + + BlenderScene.create_animations(gltf) + + # User extensions after scene creation + gltf_scene = None + if gltf.data.scene is not None: + gltf_scene = gltf.data.scenes[gltf.data.scene] + import_user_extensions('gather_import_scene_after_animation_hook', gltf, gltf_scene, scene) + + if bpy.context.mode != 'OBJECT': + bpy.ops.object.mode_set(mode='OBJECT') + BlenderScene.select_imported_objects(gltf) + BlenderScene.set_active_object(gltf) + + @staticmethod + def create_animations(gltf): + """Create animations.""" + + # Use a class here, to be able to pass data by reference to hook (to be able to change them inside hook) + class IMPORT_animation_options: + def __init__(self, restore_first_anim: bool = True): + self.restore_first_anim = restore_first_anim + + animation_options = IMPORT_animation_options() + import_user_extensions('gather_import_animations', gltf, gltf.data.animations, animation_options) + + if gltf.data.animations: + # NLA tracks are added bottom to top, so create animations in + # reverse so the first winds up on top + for anim_idx in reversed(range(len(gltf.data.animations))): + BlenderAnimation.anim(gltf, anim_idx) + + # Restore first animation + if animation_options.restore_first_anim: + anim_name = gltf.data.animations[0].track_name + BlenderAnimation.restore_animation(gltf, anim_name) + + if hasattr(bpy.data.scenes[0], "gltf2_animation_applied"): + bpy.data.scenes[0].gltf2_animation_applied = bpy.data.scenes[0].gltf2_animation_tracks.find( + gltf.data.animations[0].track_name) + + @staticmethod + def select_imported_objects(gltf): + """Select all (and only) the imported objects.""" + if bpy.ops.object.select_all.poll(): + bpy.ops.object.select_all(action='DESELECT') + + for vnode in gltf.vnodes.values(): + if vnode.type == VNode.Object: + vnode.blender_object.select_set(state=True) + + @staticmethod + def set_active_object(gltf): + """Make the first root object from the default glTF scene active. + If no default scene, use the first scene, or just any root object. + """ + vnode = None + + if gltf.data.scene is not None: + pyscene = gltf.data.scenes[gltf.data.scene] + if pyscene.nodes: + vnode = gltf.vnodes[pyscene.nodes[0]] + + if not vnode: + for pyscene in gltf.data.scenes or []: + if pyscene.nodes: + vnode = gltf.vnodes[pyscene.nodes[0]] + break + + if not vnode: + vnode = gltf.vnodes['root'] + if vnode.type == VNode.DummyRoot: + if not vnode.children: + return # no nodes + vnode = gltf.vnodes[vnode.children[0]] + + if vnode.type == VNode.Bone: + vnode = gltf.vnodes[vnode.bone_arma] + + bpy.context.view_layer.objects.active = vnode.blender_object diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_texture.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_texture.py new file mode 100644 index 00000000000..afc72cb3942 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_texture.py @@ -0,0 +1,233 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from ...io.com.gltf2_io import Sampler +from ...io.com.gltf2_io_constants import TextureFilter, TextureWrap +from ...io.imp.gltf2_io_user_extensions import import_user_extensions +from ..com.gltf2_blender_conversion import texture_transform_gltf_to_blender +from .gltf2_blender_image import BlenderImage + + +def texture( + mh, + tex_info, + location, # Upper-right corner of the TexImage node + label, # Label for the TexImg node + color_socket, + alpha_socket=None, + is_data=False, + forced_image=None +): + """Creates nodes for a TextureInfo and hooks up the color/alpha outputs.""" + x, y = location + pytexture = mh.gltf.data.textures[tex_info.index] + + import_user_extensions('gather_import_texture_before_hook', mh.gltf, pytexture, mh, + tex_info, location, label, color_socket, alpha_socket, is_data) + + if pytexture.sampler is not None: + pysampler = mh.gltf.data.samplers[pytexture.sampler] + else: + pysampler = Sampler.from_dict({}) + + needs_uv_map = False # whether to create UVMap node + + # Image Texture + tex_img = mh.node_tree.nodes.new('ShaderNodeTexImage') + tex_img.location = x - 240, y + tex_img.label = label + + # Get image + if forced_image is None: + source = get_source(mh, pytexture) + if source is not None: + BlenderImage.create(mh.gltf, source) + pyimg = mh.gltf.data.images[source] + blender_image_name = pyimg.blender_image_name + if blender_image_name: + tex_img.image = bpy.data.images[blender_image_name] + else: + tex_img.image = forced_image + # Set colorspace for data images + if is_data: + if tex_img.image: + tex_img.image.colorspace_settings.is_data = True + # Set filtering + set_filtering(tex_img, pysampler) + # Outputs + if color_socket is not None: + mh.node_tree.links.new(color_socket, tex_img.outputs['Color']) + if alpha_socket is not None: + mh.node_tree.links.new(alpha_socket, tex_img.outputs['Alpha']) + # Inputs + uv_socket = tex_img.inputs[0] + + x -= 340 + + # Do wrapping + wrap_s = pysampler.wrap_s + wrap_t = pysampler.wrap_t + if wrap_s is None: + wrap_s = TextureWrap.Repeat + if wrap_t is None: + wrap_t = TextureWrap.Repeat + # If wrapping is the same in both directions, just set tex_img.extension + if wrap_s == wrap_t == TextureWrap.Repeat: + tex_img.extension = 'REPEAT' + elif wrap_s == wrap_t == TextureWrap.ClampToEdge: + tex_img.extension = 'EXTEND' + elif wrap_s == wrap_t == TextureWrap.MirroredRepeat: + tex_img.extension = 'MIRROR' + else: + # Otherwise separate the UV components and use math nodes to compute + # the wrapped UV coordinates + # => [Separate XYZ] => [Wrap for S] => [Combine XYZ] => + # => [Wrap for T] => + + tex_img.extension = 'EXTEND' # slightly better errors near the edge than REPEAT + + # Combine XYZ + com_uv = mh.node_tree.nodes.new('ShaderNodeCombineXYZ') + com_uv.location = x - 140, y - 100 + mh.node_tree.links.new(uv_socket, com_uv.outputs[0]) + u_socket = com_uv.inputs[0] + v_socket = com_uv.inputs[1] + x -= 200 + + for i in [0, 1]: + wrap = [wrap_s, wrap_t][i] + socket = [u_socket, v_socket][i] + if wrap == TextureWrap.Repeat: + # WRAP node for REPEAT + math = mh.node_tree.nodes.new('ShaderNodeMath') + math.location = x - 140, y + 30 - i * 200 + math.operation = 'WRAP' + math.inputs[1].default_value = 0 + math.inputs[2].default_value = 1 + mh.node_tree.links.new(socket, math.outputs[0]) + socket = math.inputs[0] + elif wrap == TextureWrap.MirroredRepeat: + # PINGPONG node for MIRRORED_REPEAT + math = mh.node_tree.nodes.new('ShaderNodeMath') + math.location = x - 140, y + 30 - i * 200 + math.operation = 'PINGPONG' + math.inputs[1].default_value = 1 + mh.node_tree.links.new(socket, math.outputs[0]) + socket = math.inputs[0] + else: + # Pass-through CLAMP since the tex_img node is set to EXTEND + pass + if i == 0: + u_socket = socket + else: + v_socket = socket + x -= 200 + + # Separate XYZ + sep_uv = mh.node_tree.nodes.new('ShaderNodeSeparateXYZ') + sep_uv.location = x - 140, y - 100 + mh.node_tree.links.new(u_socket, sep_uv.outputs[0]) + mh.node_tree.links.new(v_socket, sep_uv.outputs[1]) + uv_socket = sep_uv.inputs[0] + x -= 200 + + needs_uv_map = True + + # UV Transform (for KHR_texture_transform) + needs_tex_transform = 'KHR_texture_transform' in (tex_info.extensions or {}) + + # We also need to create tex transform if this property is animated in KHR_animation_pointer + if mh.gltf.data.extensions_used is not None and "KHR_animation_pointer" in mh.gltf.data.extensions_used: + if tex_info.extensions is not None and "KHR_texture_transform" in tex_info.extensions: + if len(tex_info.extensions["KHR_texture_transform"]["animations"]) > 0: + for anim_idx in tex_info.extensions["KHR_texture_transform"]["animations"].keys(): + for channel_idx in tex_info.extensions["KHR_texture_transform"]["animations"][anim_idx]: + channel = mh.gltf.data.animations[anim_idx].channels[channel_idx] + pointer_tab = channel.target.extensions["KHR_animation_pointer"]["pointer"].split("/") + if len(pointer_tab) >= 7 and pointer_tab[1] == "materials" and \ + pointer_tab[-3] == "extensions" and \ + pointer_tab[-2] == "KHR_texture_transform" and \ + pointer_tab[-1] in ["scale", "offset", "rotation"]: + needs_tex_transform = True + # Store multiple channel data, as we will need all channels to convert to + # blender data when animated + if "multiple_channels" not in tex_info.extensions['KHR_texture_transform'].keys(): + tex_info.extensions['KHR_texture_transform']["multiple_channels"] = {} + tex_info.extensions['KHR_texture_transform']["multiple_channels"][pointer_tab[-1] + ] = (anim_idx, channel_idx) + + if needs_tex_transform: + mapping = mh.node_tree.nodes.new('ShaderNodeMapping') + mapping.location = x - 160, y + 30 + mapping.vector_type = 'POINT' + # Outputs + mh.node_tree.links.new(uv_socket, mapping.outputs[0]) + # Inputs + uv_socket = mapping.inputs[0] + + transform = tex_info.extensions['KHR_texture_transform'] + transform = texture_transform_gltf_to_blender(transform) + mapping.inputs['Location'].default_value[0] = transform['offset'][0] + mapping.inputs['Location'].default_value[1] = transform['offset'][1] + mapping.inputs['Rotation'].default_value[2] = transform['rotation'] + mapping.inputs['Scale'].default_value[0] = transform['scale'][0] + mapping.inputs['Scale'].default_value[1] = transform['scale'][1] + + x -= 260 + needs_uv_map = True + + # Needed for KHR_animation_pointer + tex_info.extensions['KHR_texture_transform']['blender_nodetree'] = mh.node_tree + + # UV Map + uv_idx = tex_info.tex_coord or 0 + try: + uv_idx = tex_info.extensions['KHR_texture_transform']['texCoord'] + except Exception: + pass + if uv_idx != 0 or needs_uv_map: + uv_map = mh.node_tree.nodes.new('ShaderNodeUVMap') + uv_map.location = x - 160, y - 70 + uv_map.uv_map = 'UVMap' if uv_idx == 0 else 'UVMap.%03d' % uv_idx + # Outputs + mh.node_tree.links.new(uv_socket, uv_map.outputs[0]) + + import_user_extensions('gather_import_texture_after_hook', mh.gltf, pytexture, mh.node_tree, + mh, tex_info, location, label, color_socket, alpha_socket, is_data) + + +def get_source(mh, pytexture): + src = pytexture.source + try: + webp_src = pytexture.extensions['EXT_texture_webp']['source'] + except Exception: + webp_src = None + + if mh.gltf.import_settings['import_webp_texture']: + return webp_src if webp_src is not None else src + else: + return src if src is not None else webp_src + + +def set_filtering(tex_img, pysampler): + """Set the filtering/interpolation on an Image Texture from the glTf sampler.""" + minf = pysampler.min_filter + magf = pysampler.mag_filter + + # Ignore mipmapping + if minf in [TextureFilter.NearestMipmapNearest, TextureFilter.NearestMipmapLinear]: + minf = TextureFilter.Nearest + elif minf in [TextureFilter.LinearMipmapNearest, TextureFilter.LinearMipmapLinear]: + minf = TextureFilter.Linear + + # If both are nearest or the only specified one was nearest, use nearest. + if (minf, magf) in [ + (TextureFilter.Nearest, TextureFilter.Nearest), + (TextureFilter.Nearest, None), + (None, TextureFilter.Nearest), + ]: + tex_img.interpolation = 'Closest' + else: + tex_img.interpolation = 'Linear' diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_vnode.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_vnode.py new file mode 100644 index 00000000000..dea74a979d2 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_blender_vnode.py @@ -0,0 +1,584 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import bpy +from mathutils import Vector, Quaternion, Matrix +from ...io.imp.gltf2_io_binary import BinaryData +from ..com.gltf2_blender_math import scale_rot_swap_matrix, nearby_signed_perm_matrix + + +def compute_vnodes(gltf): + """Computes the tree of virtual nodes. + Copies the glTF nodes into a tree of VNodes, then performs a series of + passes to transform it into a form that we can import into Blender. + """ + init_vnodes(gltf) + mark_bones_and_armas(gltf) + move_skinned_meshes(gltf) + fixup_multitype_nodes(gltf) + correct_cameras_and_lights(gltf) + pick_bind_pose(gltf) + prettify_bones(gltf) + calc_bone_matrices(gltf) + + +class VNode: + """A "virtual" node. + These are what eventually get turned into nodes + in the Blender scene. + """ + # Types + Object = 0 + Bone = 1 + DummyRoot = 2 + Inst = 3 + + def __init__(self): + self.name = None + self.default_name = 'Node' # fallback when no name + self.children = [] + self.parent = None + self.type = VNode.Object + self.is_arma = False + self.base_trs = ( + Vector((0, 0, 0)), + Quaternion((1, 0, 0, 0)), + Vector((1, 1, 1)), + ) + # Additional rotations before/after the base TRS. + # Allows per-vnode axis adjustment. See local_rotation. + self.rotation_after = Quaternion((1, 0, 0, 0)) + self.rotation_before = Quaternion((1, 0, 0, 0)) + + # Indices of the glTF node where the mesh, etc. came from. + # (They can get moved around.) + self.mesh_node_idx = None + self.camera_node_idx = None + self.light_node_idx = None + + def trs(self): + # (final TRS) = (rotation after) (base TRS) (rotation before) + t, r, s = self.base_trs + m = scale_rot_swap_matrix(self.rotation_before) + return ( + self.rotation_after @ t, + self.rotation_after @ r @ self.rotation_before, + m @ s, + ) + + def base_locs_to_final_locs(self, base_locs): + ra = self.rotation_after + return [ra @ loc for loc in base_locs] + + def base_rots_to_final_rots(self, base_rots): + ra, rb = self.rotation_after, self.rotation_before + return [ra @ rot @ rb for rot in base_rots] + + def base_scales_to_final_scales(self, base_scales): + m = scale_rot_swap_matrix(self.rotation_before) + return [m @ scale for scale in base_scales] + + +def local_rotation(gltf, vnode_id, rot): + """Appends a local rotation to vnode's world transform: + (new world transform) = (old world transform) @ (rot) + without changing the world transform of vnode's children. + + For correctness, rot must be a signed permutation of the axes + (eg. (X Y Z)->(X -Z Y)) OR vnode's scale must always be uniform. + """ + gltf.vnodes[vnode_id].rotation_before @= rot + + # Append the inverse rotation after children's TRS to cancel it out. + rot_inv = rot.conjugated() + for child in gltf.vnodes[vnode_id].children: + gltf.vnodes[child].rotation_after = \ + rot_inv @ gltf.vnodes[child].rotation_after + + +def init_vnodes(gltf): + # Map of all VNodes. The keys are arbitrary IDs. + # Nodes coming from glTF use the index into gltf.data.nodes for an ID. + gltf.vnodes = {} + + for i, pynode in enumerate(gltf.data.nodes or []): + vnode = VNode() + gltf.vnodes[i] = vnode + vnode.name = pynode.name + vnode.default_name = 'Node_%d' % i + vnode.children = list(pynode.children or []) + vnode.base_trs = get_node_trs(gltf, pynode) + if pynode.mesh is not None: + # Check if there is gpu_instancing extension + if pynode.extensions and "EXT_mesh_gpu_instancing" in pynode.extensions.keys(): + manage_gpu_instancing(gltf, vnode, i, pynode.extensions['EXT_mesh_gpu_instancing'], pynode.mesh) + else: + vnode.mesh_node_idx = i + if pynode.camera is not None: + vnode.camera_node_idx = i + if 'KHR_lights_punctual' in (pynode.extensions or {}): + vnode.light_node_idx = i + + for id in gltf.vnodes: + for child in gltf.vnodes[id].children: + assert gltf.vnodes[child].parent is None + gltf.vnodes[child].parent = id + + # Inserting a root node will simplify things. + roots = [id for id in gltf.vnodes if gltf.vnodes[id].parent is None] + gltf.vnodes['root'] = VNode() + gltf.vnodes['root'].type = VNode.DummyRoot + gltf.vnodes['root'].default_name = 'Root' + gltf.vnodes['root'].children = roots + for root in roots: + gltf.vnodes[root].parent = 'root' + + +def manage_gpu_instancing(gltf, vnode, i, ext, mesh_id): + + trans_list = BinaryData.get_data_from_accessor(gltf, ext['attributes'].get('TRANSLATION', None)) \ + if ext['attributes'].get('TRANSLATION', None) is not None else None + + rot_list = BinaryData.get_data_from_accessor(gltf, ext['attributes'].get('ROTATION', None)) \ + if ext['attributes'].get('ROTATION', None) is not None else None + + scale_list = BinaryData.get_data_from_accessor(gltf, ext['attributes'].get('SCALE', None)) \ + if ext['attributes'].get('SCALE', None) is not None else None + + # Retrieve the first available attribute to get the number of children + val = next((elem for elem in [ + trans_list, + rot_list, + scale_list, + ] if elem is not None), None) + + # Wwe can't have only custom properties + if not val: + return + + length = len(val) + + if trans_list is None: + trans_list = [None] * length + if rot_list is None: + rot_list = [None] * length + if scale_list is None: + scale_list = [None] * length + + assert len(trans_list) == len(rot_list) == len(scale_list) + + for inst in range(length): + inst_id = '%d' % i + "." + '%d' % inst + inst_vnode = VNode() + inst_vnode.type = VNode.Inst + gltf.vnodes[inst_id] = inst_vnode + inst_vnode.name = None + inst_vnode.default_name = 'Node_' + inst_id + inst_vnode.children = [] + inst_vnode.base_trs = get_inst_trs(gltf, trans_list[inst], rot_list[inst], scale_list[inst]) + inst_vnode.mesh_idx = mesh_id + + vnode.children.append(inst_id) + + +def get_inst_trs(gltf, trans, rot, scale): + t = gltf.loc_gltf_to_blender(trans or [0, 0, 0]) + r = gltf.quaternion_gltf_to_blender(rot or [0, 0, 0, 1]) + s = gltf.scale_gltf_to_blender(scale or [1, 1, 1]) + return t, r, s + + +def get_node_trs(gltf, pynode): + if pynode.matrix is not None: + m = gltf.matrix_gltf_to_blender(pynode.matrix) + return m.decompose() + + t = gltf.loc_gltf_to_blender(pynode.translation or [0, 0, 0]) + r = gltf.quaternion_gltf_to_blender(pynode.rotation or [0, 0, 0, 1]) + s = gltf.scale_gltf_to_blender(pynode.scale or [1, 1, 1]) + return t, r, s + + +def mark_bones_and_armas(gltf): + """ + Mark nodes as armatures so that every node that is used as joint is a + descendant of an armature. Mark everything between an armature and a + joint as a bone. + """ + for skin in gltf.data.skins or []: + descendants = list(skin.joints) + if skin.skeleton is not None: + descendants.append(skin.skeleton) + arma_id = deepest_common_ancestor(gltf, descendants) + + if arma_id in skin.joints: + arma_id = gltf.vnodes[arma_id].parent + + if gltf.vnodes[arma_id].type != VNode.Bone: + gltf.vnodes[arma_id].type = VNode.Object + gltf.vnodes[arma_id].is_arma = True + gltf.vnodes[arma_id].arma_name = skin.name or 'Armature' + + for joint in skin.joints: + while joint != arma_id: + gltf.vnodes[joint].type = VNode.Bone + gltf.vnodes[joint].is_arma = False + joint = gltf.vnodes[joint].parent + + # Mark the armature each bone is a descendant of. + + def visit(vnode_id, cur_arma): # Depth-first walk + vnode = gltf.vnodes[vnode_id] + + if vnode.is_arma: + cur_arma = vnode_id + elif vnode.type == VNode.Bone: + vnode.bone_arma = cur_arma + else: + cur_arma = None + + for child in vnode.children: + visit(child, cur_arma) + + visit('root', cur_arma=None) + + +def deepest_common_ancestor(gltf, vnode_ids): + """Find the deepest (improper) ancestor of a set of vnodes.""" + path_to_ancestor = [] # path to deepest ancestor so far + for vnode_id in vnode_ids: + path = path_from_root(gltf, vnode_id) + if not path_to_ancestor: + path_to_ancestor = path + else: + path_to_ancestor = longest_common_prefix(path, path_to_ancestor) + return path_to_ancestor[-1] + + +def path_from_root(gltf, vnode_id): + """Returns the ids of all vnodes from the root to vnode_id.""" + path = [] + while vnode_id is not None: + path.append(vnode_id) + vnode_id = gltf.vnodes[vnode_id].parent + path.reverse() + return path + + +def longest_common_prefix(list1, list2): + i = 0 + while i != min(len(list1), len(list2)): + if list1[i] != list2[i]: + break + i += 1 + return list1[:i] + + +def move_skinned_meshes(gltf): + """ + In glTF, where in the node hierarchy a skinned mesh is instantiated has + no effect on its world space position: only the world transforms of the + joints in its skin affect it. + + To do this in Blender: + * Move a skinned mesh to become a child of the armature that skins it. + Have to ensure the mesh and arma have the same world transform. + * When we do mesh creation, we will also need to put all the verts in + the bind pose in arma space. + """ + ids = list(gltf.vnodes.keys()) + for id in ids: + vnode = gltf.vnodes[id] + + if vnode.mesh_node_idx is None: + continue + + skin = gltf.data.nodes[vnode.mesh_node_idx].skin + if skin is None: + continue + + pyskin = gltf.data.skins[skin] + arma = gltf.vnodes[pyskin.joints[0]].bone_arma + + # First try moving the whole node if we can do it without + # messing anything up. + is_animated = ( + gltf.data.animations and + isinstance(id, int) and + gltf.data.nodes[id].animations + ) + ok_to_move = ( + not is_animated and + vnode.type == VNode.Object and + not vnode.is_arma and + not vnode.children and + vnode.camera_node_idx is None and + vnode.light_node_idx is None + ) + if ok_to_move: + reparent(gltf, id, new_parent=arma) + vnode.base_trs = ( + Vector((0, 0, 0)), + Quaternion((1, 0, 0, 0)), + Vector((1, 1, 1)), + ) + continue + + # Otherwise, create a new child of the arma and move + # the mesh instance there, leaving the node behind. + new_id = str(id) + '.skinned' + gltf.vnodes[new_id] = VNode() + gltf.vnodes[new_id].parent = arma + gltf.vnodes[arma].children.append(new_id) + gltf.vnodes[new_id].mesh_node_idx = vnode.mesh_node_idx + vnode.mesh_node_idx = None + + +def reparent(gltf, vnode_id, new_parent): + """Moves a VNode to a new parent.""" + vnode = gltf.vnodes[vnode_id] + if vnode.parent == new_parent: + return + if vnode.parent is not None: + parent_vnode = gltf.vnodes[vnode.parent] + index = parent_vnode.children.index(vnode_id) + del parent_vnode.children[index] + vnode.parent = new_parent + gltf.vnodes[new_parent].children.append(vnode_id) + + +def fixup_multitype_nodes(gltf): + """ + Blender only lets each object have one of: an armature, a mesh, a + camera, a light. Also bones cannot have any of these either. Find any + nodes like this and move the mesh/camera/light onto new children. + """ + ids = list(gltf.vnodes.keys()) + for id in ids: + vnode = gltf.vnodes[id] + + needs_move = False + + if vnode.is_arma or vnode.type == VNode.Bone: + needs_move = True + + if vnode.mesh_node_idx is not None: + if needs_move: + new_id = str(id) + '.mesh' + gltf.vnodes[new_id] = VNode() + gltf.vnodes[new_id].mesh_node_idx = vnode.mesh_node_idx + gltf.vnodes[new_id].parent = id + vnode.children.append(new_id) + vnode.mesh_node_idx = None + needs_move = True + + if vnode.camera_node_idx is not None: + if needs_move: + new_id = str(id) + '.camera' + gltf.vnodes[new_id] = VNode() + gltf.vnodes[new_id].camera_node_idx = vnode.camera_node_idx + gltf.vnodes[new_id].parent = id + vnode.children.append(new_id) + vnode.camera_node_idx = None + needs_move = True + + if vnode.light_node_idx is not None: + if needs_move: + new_id = str(id) + '.light' + gltf.vnodes[new_id] = VNode() + gltf.vnodes[new_id].light_node_idx = vnode.light_node_idx + gltf.vnodes[new_id].parent = id + vnode.children.append(new_id) + vnode.light_node_idx = None + needs_move = True + + +def correct_cameras_and_lights(gltf): + """ + Depending on the coordinate change, lights and cameras might need to be + rotated to match Blender conventions for which axes they point along. + """ + if gltf.camera_correction is None: + return + + for id, vnode in gltf.vnodes.items(): + needs_correction = \ + vnode.camera_node_idx is not None or \ + vnode.light_node_idx is not None + + if needs_correction: + local_rotation(gltf, id, gltf.camera_correction) + + +def pick_bind_pose(gltf): + """ + Pick the bind pose for all bones. Skinned meshes will be retargeted onto + this bind pose during mesh creation. + """ + if gltf.import_settings['guess_original_bind_pose']: + # Record inverse bind matrices. We're going to milk them for information + # about the original bind pose. + inv_binds = {'root': Matrix.Identity(4)} + for skin in gltf.data.skins or []: + if skin.inverse_bind_matrices is None: + continue + + # Assume inverse bind matrices are calculated relative to the skeleton + skel = skin.skeleton + if skel is not None: + if skel in skin.joints: + skel = gltf.vnodes[skel].parent + if skel not in inv_binds: + inv_binds[skel] = Matrix.Identity(4) + + skin_inv_binds = BinaryData.get_data_from_accessor(gltf, skin.inverse_bind_matrices) + skin_inv_binds = [gltf.matrix_gltf_to_blender(m) for m in skin_inv_binds] + for i, joint in enumerate(skin.joints): + inv_binds[joint] = skin_inv_binds[i] + + for vnode_id in gltf.vnodes: + vnode = gltf.vnodes[vnode_id] + if vnode.type == VNode.Bone: + # Initialize bind pose to default pose (from gltf.data.nodes) + vnode.bind_trans = Vector(vnode.base_trs[0]) + vnode.bind_rot = Quaternion(vnode.base_trs[1]) + + if gltf.import_settings['guess_original_bind_pose']: + # Try to guess bind pose from inverse bind matrices + if vnode_id in inv_binds and vnode.parent in inv_binds: + # (bind matrix) = (parent bind matrix) (bind local). Solve for bind local... + bind_local = inv_binds[vnode.parent] @ inv_binds[vnode_id].inverted_safe() + t, r, _s = bind_local.decompose() + vnode.bind_trans = t + vnode.bind_rot = r + + # Initialize editbones to match bind pose + vnode.editbone_trans = Vector(vnode.bind_trans) + vnode.editbone_rot = Quaternion(vnode.bind_rot) + + +def prettify_bones(gltf): + """ + Prettify bone lengths/directions. + """ + def visit(vnode_id, parent_rot=None): # Depth-first walk + vnode = gltf.vnodes[vnode_id] + rot = None + + if vnode.type == VNode.Bone: + vnode.bone_length = pick_bone_length(gltf, vnode_id) + rot = pick_bone_rotation(gltf, vnode_id, parent_rot) + if rot is not None: + rotate_edit_bone(gltf, vnode_id, rot) + + for child in vnode.children: + visit(child, parent_rot=rot) + + visit('root') + + +MIN_BONE_LENGTH = 0.004 # too small and bones get deleted + + +def pick_bone_length(gltf, bone_id): + """Heuristic for bone length.""" + vnode = gltf.vnodes[bone_id] + + child_locs = [ + gltf.vnodes[child].editbone_trans + for child in vnode.children + if gltf.vnodes[child].type == VNode.Bone + ] + child_locs = [loc for loc in child_locs if loc.length > MIN_BONE_LENGTH] + if child_locs: + return min(loc.length for loc in child_locs) + + if gltf.vnodes[vnode.parent].type == VNode.Bone: + return gltf.vnodes[vnode.parent].bone_length + + if vnode.editbone_trans.length > MIN_BONE_LENGTH: + return vnode.editbone_trans.length + + return 1 + + +def pick_bone_rotation(gltf, bone_id, parent_rot): + """Heuristic for bone rotation. + A bone's tip lies on its local +Y axis so rotating a bone let's us + adjust the bone direction. + """ + if bpy.app.debug_value == 100: + return None + + if gltf.import_settings['bone_heuristic'] == 'BLENDER': + return Quaternion((2**0.5 / 2, 2**0.5 / 2, 0, 0)) + elif gltf.import_settings['bone_heuristic'] in ['TEMPERANCE', 'FORTUNE']: + return temperance(gltf, bone_id, parent_rot) + + +def temperance(gltf, bone_id, parent_rot): + vnode = gltf.vnodes[bone_id] + + # Try to put our tip at the centroid of our children + child_locs = [ + gltf.vnodes[child].editbone_trans + for child in vnode.children + if gltf.vnodes[child].type == VNode.Bone + ] + child_locs = [loc for loc in child_locs if loc.length > MIN_BONE_LENGTH] + if child_locs: + centroid = sum(child_locs, Vector((0, 0, 0))) + rot = Vector((0, 1, 0)).rotation_difference(centroid) + if gltf.import_settings['bone_heuristic'] == 'TEMPERANCE': + # Snap to the local axes; required for local_rotation to be + # accurate when vnode has a non-uniform scaling. + # FORTUNE skips this, so it may look better, but may have errors. + rot = nearby_signed_perm_matrix(rot).to_quaternion() + return rot + + return parent_rot + + +def rotate_edit_bone(gltf, bone_id, rot): + """Rotate one edit bone without affecting anything else.""" + gltf.vnodes[bone_id].editbone_rot @= rot + # Cancel out the rotation so children aren't affected. + rot_inv = rot.conjugated() + for child_id in gltf.vnodes[bone_id].children: + child = gltf.vnodes[child_id] + if child.type == VNode.Bone: + child.editbone_trans = rot_inv @ child.editbone_trans + child.editbone_rot = rot_inv @ child.editbone_rot + # Need to rotate the bone's final TRS by the same amount so skinning + # isn't affected. + local_rotation(gltf, bone_id, rot) + + +def calc_bone_matrices(gltf): + """ + Calculate the transformations from bone space to arma space in the bind + pose and in the edit bone pose. + """ + def visit(vnode_id): # Depth-first walk + vnode = gltf.vnodes[vnode_id] + if vnode.type == VNode.Bone: + if gltf.vnodes[vnode.parent].type == VNode.Bone: + parent_bind_mat = gltf.vnodes[vnode.parent].bind_arma_mat + parent_editbone_mat = gltf.vnodes[vnode.parent].editbone_arma_mat + else: + parent_bind_mat = Matrix.Identity(4) + parent_editbone_mat = Matrix.Identity(4) + + t, r = vnode.bind_trans, vnode.bind_rot + local_to_parent = Matrix.Translation(t) @ Quaternion(r).to_matrix().to_4x4() + vnode.bind_arma_mat = parent_bind_mat @ local_to_parent + + t, r = vnode.editbone_trans, vnode.editbone_rot + local_to_parent = Matrix.Translation(t) @ Quaternion(r).to_matrix().to_4x4() + vnode.editbone_arma_mat = parent_editbone_mat @ local_to_parent + + for child in vnode.children: + visit(child) + + visit('root') diff --git a/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_io_draco_compression_extension.py b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_io_draco_compression_extension.py new file mode 100644 index 00000000000..e8d4595fc47 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/blender/imp/gltf2_io_draco_compression_extension.py @@ -0,0 +1,138 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from ctypes import * + +from ...io.com.gltf2_io import BufferView +from ...io.imp.gltf2_io_binary import BinaryData +from ...io.com.gltf2_io_draco_compression_extension import dll_path + + +def decode_primitive(gltf, prim): + """ + Handles draco compression. + Moves decoded data into new buffers and buffer views held by the accessors of the given primitive. + """ + + # Load DLL and setup function signatures. + dll = cdll.LoadLibrary(str(dll_path().resolve())) + + dll.decoderCreate.restype = c_void_p + dll.decoderCreate.argtypes = [] + + dll.decoderRelease.restype = None + dll.decoderRelease.argtypes = [c_void_p] + + dll.decoderDecode.restype = c_bool + dll.decoderDecode.argtypes = [c_void_p, c_void_p, c_size_t] + + dll.decoderReadAttribute.restype = c_bool + dll.decoderReadAttribute.argtypes = [c_void_p, c_uint32, c_size_t, c_char_p] + + dll.decoderGetVertexCount.restype = c_uint32 + dll.decoderGetVertexCount.argtypes = [c_void_p] + + dll.decoderGetIndexCount.restype = c_uint32 + dll.decoderGetIndexCount.argtypes = [c_void_p] + + dll.decoderAttributeIsNormalized.restype = c_bool + dll.decoderAttributeIsNormalized.argtypes = [c_void_p, c_uint32] + + dll.decoderGetAttributeByteLength.restype = c_size_t + dll.decoderGetAttributeByteLength.argtypes = [c_void_p, c_uint32] + + dll.decoderCopyAttribute.restype = None + dll.decoderCopyAttribute.argtypes = [c_void_p, c_uint32, c_void_p] + + dll.decoderReadIndices.restype = c_bool + dll.decoderReadIndices.argtypes = [c_void_p, c_size_t] + + dll.decoderGetIndicesByteLength.restype = c_size_t + dll.decoderGetIndicesByteLength.argtypes = [c_void_p] + + dll.decoderCopyIndices.restype = None + dll.decoderCopyIndices.argtypes = [c_void_p, c_void_p] + + decoder = dll.decoderCreate() + extension = prim.extensions['KHR_draco_mesh_compression'] + + name = prim.name if hasattr(prim, 'name') else '[unnamed]' + + # Create Draco decoder. + draco_buffer = bytes(BinaryData.get_buffer_view(gltf, extension['bufferView'])) + if not dll.decoderDecode(decoder, draco_buffer, len(draco_buffer)): + gltf.log.error('Draco Decoder: Unable to decode. Skipping primitive {}.'.format(name)) + return + + # Choose a buffer index which does not yet exist, skipping over existing glTF buffers yet to be loaded + # and buffers which were generated and did not exist in the initial glTF file, like this decoder does. + base_buffer_idx = len(gltf.data.buffers) + for existing_buffer_idx in gltf.buffers: + if base_buffer_idx <= existing_buffer_idx: + base_buffer_idx = existing_buffer_idx + 1 + + # Read indices. + index_accessor = gltf.data.accessors[prim.indices] + if dll.decoderGetIndexCount(decoder) != index_accessor.count: + gltf.log.warning( + 'Draco Decoder: Index count of accessor and decoded index count does not match. Updating accessor.') + index_accessor.count = dll.decoderGetIndexCount(decoder) + if not dll.decoderReadIndices(decoder, index_accessor.component_type): + gltf.log.error('Draco Decoder: Unable to decode indices. Skipping primitive {}.'.format(name)) + return + + indices_byte_length = dll.decoderGetIndicesByteLength(decoder) + decoded_data = bytes(indices_byte_length) + dll.decoderCopyIndices(decoder, decoded_data) + + # Generate a new buffer holding the decoded indices. + gltf.buffers[base_buffer_idx] = decoded_data + + # Create a buffer view referencing the new buffer. + gltf.data.buffer_views.append(BufferView.from_dict({ + 'buffer': base_buffer_idx, + 'byteLength': indices_byte_length + })) + + # Update accessor to point to the new buffer view. + index_accessor.buffer_view = len(gltf.data.buffer_views) - 1 + + # Read each attribute. + for attr_idx, attr in enumerate(extension['attributes']): + dracoId = extension['attributes'][attr] + if attr not in prim.attributes: + gltf.log.error( + 'Draco Decoder: Draco attribute {} not in primitive attributes. Skipping primitive {}.'.format( + attr, name)) + return + + accessor = gltf.data.accessors[prim.attributes[attr]] + if dll.decoderGetVertexCount(decoder) != accessor.count: + gltf.log.warning( + 'Draco Decoder: Vertex count of accessor and decoded vertex count does not match for attribute {}. Updating accessor.'.format( + attr, + name)) + accessor.count = dll.decoderGetVertexCount(decoder) + if not dll.decoderReadAttribute(decoder, dracoId, accessor.component_type, accessor.type.encode()): + gltf.log.error('Draco Decoder: Could not decode attribute {}. Skipping primitive {}.'.format(attr, name)) + return + + byte_length = dll.decoderGetAttributeByteLength(decoder, dracoId) + decoded_data = bytes(byte_length) + dll.decoderCopyAttribute(decoder, dracoId, decoded_data) + + # Generate a new buffer holding the decoded vertex data. + buffer_idx = base_buffer_idx + 1 + attr_idx + gltf.buffers[buffer_idx] = decoded_data + + # Create a buffer view referencing the new buffer. + gltf.data.buffer_views.append(BufferView.from_dict({ + 'buffer': buffer_idx, + 'byteLength': byte_length + })) + + # Update accessor to point to the new buffer view. + accessor.buffer_view = len(gltf.data.buffer_views) - 1 + + dll.decoderRelease(decoder) diff --git a/scripts/addons_core/io_scene_gltf2/io/__init__.py b/scripts/addons_core/io_scene_gltf2/io/__init__.py new file mode 100755 index 00000000000..6c885812820 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/io/__init__.py @@ -0,0 +1,5 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from .imp import * diff --git a/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io.py b/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io.py new file mode 100755 index 00000000000..6048e595ee0 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io.py @@ -0,0 +1,1215 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +# NOTE: Generated from latest glTF 2.0 JSON Scheme specs using quicktype (https://github.com/quicktype/quicktype) +# command used: +# quicktype --src glTF.schema.json --src-lang schema -t gltf --lang python --python-version 3.5 + +# TODO: add __slots__ to all classes by extending the generator + +# TODO: REMOVE traceback import + +# NOTE: this file is modified for addonExtension use. See +# https://github.com/KhronosGroup/glTF-Blender-IO/commit/62ff119d8ffeab48f66e9d2699741407d532fe0f + +import sys +import traceback + +from ...io.com import gltf2_io_debug + + +def from_int(x): + assert isinstance(x, int) and not isinstance(x, bool) + return x + + +def from_none(x): + assert x is None + return x + + +def from_union(fs, x): + tracebacks = [] + for f in fs: + try: + return f(x) + except AssertionError: + _, _, tb = sys.exc_info() + tracebacks.append(tb) + for tb in tracebacks: + traceback.print_tb(tb) # Fixed format + tb_info = traceback.extract_tb(tb) + for tbi in tb_info: + filename, line, func, text = tbi + print('ERROR', 'An error occurred on line {} in statement {}'.format(line, text)) + assert False + + +def from_dict(f, x): + assert isinstance(x, dict) + return {k: f(v) for (k, v) in x.items()} + + +def to_class(c, x): + assert isinstance(x, c) + return x.to_dict() + + +def from_list(f, x): + assert isinstance(x, list) + return [f(y) for y in x] + + +def from_float(x): + assert isinstance(x, (float, int)) and not isinstance(x, bool) + return float(x) + + +def from_str(x): + assert isinstance(x, str) + return x + + +def from_bool(x): + assert isinstance(x, bool) + return x + + +def to_float(x): + assert isinstance(x, float) + return x + + +def extension_to_dict(obj): + if hasattr(obj, 'to_list'): + obj = obj.to_list() + if hasattr(obj, 'to_dict'): + obj = obj.to_dict() + if isinstance(obj, list): + return [extension_to_dict(x) for x in obj] + elif isinstance(obj, dict): + return {k: extension_to_dict(v) for (k, v) in obj.items()} + return obj + + +def from_extension(x): + x = extension_to_dict(x) + assert isinstance(x, dict) + return x + + +def from_extra(x): + return extension_to_dict(x) + + +class AccessorSparseIndices: + """Index array of size `count` that points to those accessor attributes that deviate from + their initialization value. Indices must strictly increase. + + Indices of those attributes that deviate from their initialization value. + """ + + def __init__(self, buffer_view, byte_offset, component_type, extensions, extras): + self.buffer_view = buffer_view + self.byte_offset = byte_offset + self.component_type = component_type + self.extensions = extensions + self.extras = extras + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + buffer_view = from_int(obj.get("bufferView")) + byte_offset = from_union([from_int, from_none], obj.get("byteOffset")) + component_type = from_int(obj.get("componentType")) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + return AccessorSparseIndices(buffer_view, byte_offset, component_type, extensions, extras) + + def to_dict(self): + result = {} + result["bufferView"] = from_int(self.buffer_view) + result["byteOffset"] = from_union([from_int, from_none], self.byte_offset) + result["componentType"] = from_int(self.component_type) + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + return result + + +class AccessorSparseValues: + """Array of size `count` times number of components, storing the displaced accessor + attributes pointed by `indices`. Substituted values must have the same `componentType` + and number of components as the base accessor. + + Array of size `accessor.sparse.count` times number of components storing the displaced + accessor attributes pointed by `accessor.sparse.indices`. + """ + + def __init__(self, buffer_view, byte_offset, extensions, extras): + self.buffer_view = buffer_view + self.byte_offset = byte_offset + self.extensions = extensions + self.extras = extras + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + buffer_view = from_int(obj.get("bufferView")) + byte_offset = from_union([from_int, from_none], obj.get("byteOffset")) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + return AccessorSparseValues(buffer_view, byte_offset, extensions, extras) + + def to_dict(self): + result = {} + result["bufferView"] = from_int(self.buffer_view) + result["byteOffset"] = from_union([from_int, from_none], self.byte_offset) + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + return result + + +class AccessorSparse: + """Sparse storage of attributes that deviate from their initialization value.""" + + def __init__(self, count, extensions, extras, indices, values): + self.count = count + self.extensions = extensions + self.extras = extras + self.indices = indices + self.values = values + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + count = from_int(obj.get("count")) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + indices = AccessorSparseIndices.from_dict(obj.get("indices")) + values = AccessorSparseValues.from_dict(obj.get("values")) + return AccessorSparse(count, extensions, extras, indices, values) + + def to_dict(self): + result = {} + result["count"] = from_int(self.count) + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["indices"] = to_class(AccessorSparseIndices, self.indices) + result["values"] = to_class(AccessorSparseValues, self.values) + return result + + +class Accessor: + """A typed view into a bufferView. A bufferView contains raw binary data. An accessor + provides a typed view into a bufferView or a subset of a bufferView similar to how + WebGL's `vertexAttribPointer()` defines an attribute in a buffer. + """ + + def __init__(self, buffer_view, byte_offset, component_type, count, extensions, extras, max, min, name, normalized, + sparse, type): + self.buffer_view = buffer_view + self.byte_offset = byte_offset + self.component_type = component_type + self.count = count + self.extensions = extensions + self.extras = extras + self.max = max + self.min = min + self.name = name + self.normalized = normalized + self.sparse = sparse + self.type = type + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + buffer_view = from_union([from_int, from_none], obj.get("bufferView")) + byte_offset = from_union([from_int, from_none], obj.get("byteOffset")) + component_type = from_int(obj.get("componentType")) + count = from_int(obj.get("count")) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + max = from_union([lambda x: from_list(from_float, x), from_none], obj.get("max")) + min = from_union([lambda x: from_list(from_float, x), from_none], obj.get("min")) + name = from_union([from_str, from_none], obj.get("name")) + normalized = from_union([from_bool, from_none], obj.get("normalized")) + sparse = from_union([AccessorSparse.from_dict, from_none], obj.get("sparse")) + type = from_str(obj.get("type")) + return Accessor(buffer_view, byte_offset, component_type, count, extensions, extras, max, min, name, normalized, + sparse, type) + + def to_dict(self): + result = {} + result["bufferView"] = from_union([from_int, from_none], self.buffer_view) + result["byteOffset"] = from_union([from_int, from_none], self.byte_offset) + result["componentType"] = from_int(self.component_type) + result["count"] = from_int(self.count) + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["max"] = from_union([lambda x: from_list(to_float, x), from_none], self.max) + result["min"] = from_union([lambda x: from_list(to_float, x), from_none], self.min) + result["name"] = from_union([from_str, from_none], self.name) + result["normalized"] = from_union([from_bool, from_none], self.normalized) + result["sparse"] = from_union([lambda x: to_class(AccessorSparse, x), from_none], self.sparse) + result["type"] = from_str(self.type) + return result + + +class AnimationChannelTarget: + """The index of the node and TRS property to target. + + The index of the node and TRS property that an animation channel targets. + """ + + def __init__(self, extensions, extras, node, path): + self.extensions = extensions + self.extras = extras + self.node = node + self.path = path + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + node = from_union([from_int, from_none], obj.get("node")) + path = from_str(obj.get("path")) + return AnimationChannelTarget(extensions, extras, node, path) + + def to_dict(self): + result = {} + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["node"] = from_union([from_int, from_none], self.node) + result["path"] = from_str(self.path) + return result + + +class AnimationChannel: + """Targets an animation's sampler at a node's property.""" + + def __init__(self, extensions, extras, sampler, target): + self.extensions = extensions + self.extras = extras + self.sampler = sampler + self.target = target + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + sampler = from_int(obj.get("sampler")) + target = AnimationChannelTarget.from_dict(obj.get("target")) + return AnimationChannel(extensions, extras, sampler, target) + + def to_dict(self): + result = {} + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["sampler"] = from_int(self.sampler) + result["target"] = to_class(AnimationChannelTarget, self.target) + return result + + +class AnimationSampler: + """Combines input and output accessors with an interpolation algorithm to define a keyframe + graph (but not its target). + """ + + def __init__(self, extensions, extras, input, interpolation, output): + self.extensions = extensions + self.extras = extras + self.input = input + self.interpolation = interpolation + self.output = output + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + input = from_int(obj.get("input")) + interpolation = from_union([from_str, from_none], obj.get("interpolation")) + output = from_int(obj.get("output")) + return AnimationSampler(extensions, extras, input, interpolation, output) + + def to_dict(self): + result = {} + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["input"] = from_int(self.input) + result["interpolation"] = from_union([from_str, from_none], self.interpolation) + result["output"] = from_int(self.output) + return result + + +class Animation: + """A keyframe animation.""" + + def __init__(self, channels, extensions, extras, name, samplers): + self.channels = channels + self.extensions = extensions + self.extras = extras + self.name = name + self.samplers = samplers + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + channels = from_list(AnimationChannel.from_dict, obj.get("channels")) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + name = from_union([from_str, from_none], obj.get("name")) + samplers = from_list(AnimationSampler.from_dict, obj.get("samplers")) + return Animation(channels, extensions, extras, name, samplers) + + def to_dict(self): + result = {} + result["channels"] = from_list(lambda x: to_class(AnimationChannel, x), self.channels) + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["name"] = from_union([from_str, from_none], self.name) + result["samplers"] = from_list(lambda x: to_class(AnimationSampler, x), self.samplers) + return result + + +class Asset: + """Metadata about the glTF asset.""" + + def __init__(self, copyright, extensions, extras, generator, min_version, version): + self.copyright = copyright + self.extensions = extensions + self.extras = extras + self.generator = generator + self.min_version = min_version + self.version = version + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + copyright = from_union([from_str, from_none], obj.get("copyright")) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + generator = from_union([from_str, from_none], obj.get("generator")) + min_version = from_union([from_str, from_none], obj.get("minVersion")) + version = from_str(obj.get("version")) + return Asset(copyright, extensions, extras, generator, min_version, version) + + def to_dict(self): + result = {} + result["copyright"] = from_union([from_str, from_none], self.copyright) + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["generator"] = from_union([from_str, from_none], self.generator) + result["minVersion"] = from_union([from_str, from_none], self.min_version) + result["version"] = from_str(self.version) + return result + + +class BufferView: + """A view into a buffer generally representing a subset of the buffer.""" + + def __init__(self, buffer, byte_length, byte_offset, byte_stride, extensions, extras, name, target): + self.buffer = buffer + self.byte_length = byte_length + self.byte_offset = byte_offset + self.byte_stride = byte_stride + self.extensions = extensions + self.extras = extras + self.name = name + self.target = target + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + buffer = from_int(obj.get("buffer")) + byte_length = from_int(obj.get("byteLength")) + byte_offset = from_union([from_int, from_none], obj.get("byteOffset")) + byte_stride = from_union([from_int, from_none], obj.get("byteStride")) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + name = from_union([from_str, from_none], obj.get("name")) + target = from_union([from_int, from_none], obj.get("target")) + return BufferView(buffer, byte_length, byte_offset, byte_stride, extensions, extras, name, target) + + def to_dict(self): + result = {} + result["buffer"] = from_int(self.buffer) + result["byteLength"] = from_int(self.byte_length) + result["byteOffset"] = from_union([from_int, from_none], self.byte_offset) + result["byteStride"] = from_union([from_int, from_none], self.byte_stride) + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["name"] = from_union([from_str, from_none], self.name) + result["target"] = from_union([from_int, from_none], self.target) + return result + + +class Buffer: + """A buffer points to binary geometry, animation, or skins.""" + + def __init__(self, byte_length, extensions, extras, name, uri): + self.byte_length = byte_length + self.extensions = extensions + self.extras = extras + self.name = name + self.uri = uri + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + byte_length = from_int(obj.get("byteLength")) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + name = from_union([from_str, from_none], obj.get("name")) + uri = from_union([from_str, from_none], obj.get("uri")) + return Buffer(byte_length, extensions, extras, name, uri) + + def to_dict(self): + result = {} + result["byteLength"] = from_int(self.byte_length) + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["name"] = from_union([from_str, from_none], self.name) + result["uri"] = from_union([from_str, from_none], self.uri) + return result + + +class CameraOrthographic: + """An orthographic camera containing properties to create an orthographic projection matrix.""" + + def __init__(self, extensions, extras, xmag, ymag, zfar, znear): + self.extensions = extensions + self.extras = extras + self.xmag = xmag + self.ymag = ymag + self.zfar = zfar + self.znear = znear + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + xmag = from_float(obj.get("xmag")) + ymag = from_float(obj.get("ymag")) + zfar = from_float(obj.get("zfar")) + znear = from_float(obj.get("znear")) + return CameraOrthographic(extensions, extras, xmag, ymag, zfar, znear) + + def to_dict(self): + result = {} + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["xmag"] = to_float(self.xmag) + result["ymag"] = to_float(self.ymag) + result["zfar"] = to_float(self.zfar) + result["znear"] = to_float(self.znear) + return result + + +class CameraPerspective: + """A perspective camera containing properties to create a perspective projection matrix.""" + + def __init__(self, aspect_ratio, extensions, extras, yfov, zfar, znear): + self.aspect_ratio = aspect_ratio + self.extensions = extensions + self.extras = extras + self.yfov = yfov + self.zfar = zfar + self.znear = znear + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + aspect_ratio = from_union([from_float, from_none], obj.get("aspectRatio")) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + yfov = from_float(obj.get("yfov")) + zfar = from_union([from_float, from_none], obj.get("zfar")) + znear = from_float(obj.get("znear")) + return CameraPerspective(aspect_ratio, extensions, extras, yfov, zfar, znear) + + def to_dict(self): + result = {} + result["aspectRatio"] = from_union([to_float, from_none], self.aspect_ratio) + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["yfov"] = to_float(self.yfov) + result["zfar"] = from_union([to_float, from_none], self.zfar) + result["znear"] = to_float(self.znear) + return result + + +class Camera: + """A camera's projection. A node can reference a camera to apply a transform to place the + camera in the scene. + """ + + def __init__(self, extensions, extras, name, orthographic, perspective, type): + self.extensions = extensions + self.extras = extras + self.name = name + self.orthographic = orthographic + self.perspective = perspective + self.type = type + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + name = from_union([from_str, from_none], obj.get("name")) + orthographic = from_union([CameraOrthographic.from_dict, from_none], obj.get("orthographic")) + perspective = from_union([CameraPerspective.from_dict, from_none], obj.get("perspective")) + type = from_str(obj.get("type")) + return Camera(extensions, extras, name, orthographic, perspective, type) + + def to_dict(self): + result = {} + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["name"] = from_union([from_str, from_none], self.name) + result["orthographic"] = from_union([lambda x: to_class(CameraOrthographic, x), from_none], self.orthographic) + result["perspective"] = from_union([lambda x: to_class(CameraPerspective, x), from_none], self.perspective) + result["type"] = from_str(self.type) + return result + + +class Image: + """Image data used to create a texture. Image can be referenced by URI or `bufferView` + index. `mimeType` is required in the latter case. + """ + + def __init__(self, buffer_view, extensions, extras, mime_type, name, uri): + self.buffer_view = buffer_view + self.extensions = extensions + self.extras = extras + self.mime_type = mime_type + self.name = name + self.uri = uri + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + buffer_view = from_union([from_int, from_none], obj.get("bufferView")) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + mime_type = from_union([from_str, from_none], obj.get("mimeType")) + name = from_union([from_str, from_none], obj.get("name")) + uri = from_union([from_str, from_none], obj.get("uri")) + return Image(buffer_view, extensions, extras, mime_type, name, uri) + + def to_dict(self): + result = {} + result["bufferView"] = from_union([from_int, from_none], self.buffer_view) + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["mimeType"] = from_union([from_str, from_none], self.mime_type) + result["name"] = from_union([from_str, from_none], self.name) + result["uri"] = from_union([from_str, from_none], self.uri) + return result + + +class TextureInfo: + """The emissive map texture. + + The base color texture. + + The metallic-roughness texture. + + Reference to a texture. + """ + + def __init__(self, extensions, extras, index, tex_coord): + self.extensions = extensions + self.extras = extras + self.index = index + self.tex_coord = tex_coord + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + index = from_int(obj.get("index")) + tex_coord = from_union([from_int, from_none], obj.get("texCoord")) + return TextureInfo(extensions, extras, index, tex_coord) + + def to_dict(self): + result = {} + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["index"] = from_int(self.index) + result["texCoord"] = from_union([from_int, from_none], self.tex_coord) + return result + + +class MaterialNormalTextureInfoClass: + """The normal map texture. + + Reference to a texture. + """ + + def __init__(self, extensions, extras, index, scale, tex_coord): + self.extensions = extensions + self.extras = extras + self.index = index + self.scale = scale + self.tex_coord = tex_coord + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + index = from_int(obj.get("index")) + scale = from_union([from_float, from_none], obj.get("scale")) + tex_coord = from_union([from_int, from_none], obj.get("texCoord")) + return MaterialNormalTextureInfoClass(extensions, extras, index, scale, tex_coord) + + def to_dict(self): + result = {} + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["index"] = from_int(self.index) + result["scale"] = from_union([to_float, from_none], self.scale) + result["texCoord"] = from_union([from_int, from_none], self.tex_coord) + return result + + +class MaterialOcclusionTextureInfoClass: + """The occlusion map texture. + + Reference to a texture. + """ + + def __init__(self, extensions, extras, index, strength, tex_coord): + self.extensions = extensions + self.extras = extras + self.index = index + self.strength = strength + self.tex_coord = tex_coord + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + index = from_int(obj.get("index")) + strength = from_union([from_float, from_none], obj.get("strength")) + tex_coord = from_union([from_int, from_none], obj.get("texCoord")) + return MaterialOcclusionTextureInfoClass(extensions, extras, index, strength, tex_coord) + + def to_dict(self): + result = {} + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["index"] = from_int(self.index) + result["strength"] = from_union([to_float, from_none], self.strength) + result["texCoord"] = from_union([from_int, from_none], self.tex_coord) + return result + + +class MaterialPBRMetallicRoughness: + """A set of parameter values that are used to define the metallic-roughness material model + from Physically-Based Rendering (PBR) methodology. When not specified, all the default + values of `pbrMetallicRoughness` apply. + + A set of parameter values that are used to define the metallic-roughness material model + from Physically-Based Rendering (PBR) methodology. + """ + + def __init__(self, base_color_factor, base_color_texture, extensions, extras, metallic_factor, + metallic_roughness_texture, roughness_factor): + self.base_color_factor = base_color_factor + self.base_color_texture = base_color_texture + self.extensions = extensions + self.extras = extras + self.metallic_factor = metallic_factor + self.metallic_roughness_texture = metallic_roughness_texture + self.roughness_factor = roughness_factor + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + base_color_factor = from_union([lambda x: from_list(from_float, x), from_none], obj.get("baseColorFactor")) + base_color_texture = from_union([TextureInfo.from_dict, from_none], obj.get("baseColorTexture")) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + metallic_factor = from_union([from_float, from_none], obj.get("metallicFactor")) + metallic_roughness_texture = from_union([TextureInfo.from_dict, from_none], obj.get("metallicRoughnessTexture")) + roughness_factor = from_union([from_float, from_none], obj.get("roughnessFactor")) + return MaterialPBRMetallicRoughness(base_color_factor, base_color_texture, extensions, extras, metallic_factor, + metallic_roughness_texture, roughness_factor) + + def to_dict(self): + result = {} + result["baseColorFactor"] = from_union([lambda x: from_list(to_float, x), from_none], self.base_color_factor) + result["baseColorTexture"] = from_union([lambda x: to_class(TextureInfo, x), from_none], + self.base_color_texture) + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["metallicFactor"] = from_union([to_float, from_none], self.metallic_factor) + result["metallicRoughnessTexture"] = from_union([lambda x: to_class(TextureInfo, x), from_none], + self.metallic_roughness_texture) + result["roughnessFactor"] = from_union([to_float, from_none], self.roughness_factor) + return result + + +class Material: + """The material appearance of a primitive.""" + + def __init__(self, alpha_cutoff, alpha_mode, double_sided, emissive_factor, emissive_texture, extensions, extras, + name, normal_texture, occlusion_texture, pbr_metallic_roughness): + self.alpha_cutoff = alpha_cutoff + self.alpha_mode = alpha_mode + self.double_sided = double_sided + self.emissive_factor = emissive_factor + self.emissive_texture = emissive_texture + self.extensions = extensions + self.extras = extras + self.name = name + self.normal_texture = normal_texture + self.occlusion_texture = occlusion_texture + self.pbr_metallic_roughness = pbr_metallic_roughness + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + alpha_cutoff = from_union([from_float, from_none], obj.get("alphaCutoff")) + alpha_mode = from_union([from_str, from_none], obj.get("alphaMode")) + double_sided = from_union([from_bool, from_none], obj.get("doubleSided")) + emissive_factor = from_union([lambda x: from_list(from_float, x), from_none], obj.get("emissiveFactor")) + emissive_texture = from_union([TextureInfo.from_dict, from_none], obj.get("emissiveTexture")) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + name = from_union([from_str, from_none], obj.get("name")) + normal_texture = from_union([MaterialNormalTextureInfoClass.from_dict, from_none], obj.get("normalTexture")) + occlusion_texture = from_union([MaterialOcclusionTextureInfoClass.from_dict, from_none], + obj.get("occlusionTexture")) + pbr_metallic_roughness = from_union([MaterialPBRMetallicRoughness.from_dict, from_none], + obj.get("pbrMetallicRoughness")) + return Material(alpha_cutoff, alpha_mode, double_sided, emissive_factor, emissive_texture, extensions, extras, + name, normal_texture, occlusion_texture, pbr_metallic_roughness) + + def to_dict(self): + result = {} + result["alphaCutoff"] = from_union([to_float, from_none], self.alpha_cutoff) + result["alphaMode"] = from_union([from_str, from_none], self.alpha_mode) + result["doubleSided"] = from_union([from_bool, from_none], self.double_sided) + result["emissiveFactor"] = from_union([lambda x: from_list(to_float, x), from_none], self.emissive_factor) + result["emissiveTexture"] = from_union([lambda x: to_class(TextureInfo, x), from_none], self.emissive_texture) + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["name"] = from_union([from_str, from_none], self.name) + result["normalTexture"] = from_union([lambda x: to_class(MaterialNormalTextureInfoClass, x), from_none], + self.normal_texture) + result["occlusionTexture"] = from_union([lambda x: to_class(MaterialOcclusionTextureInfoClass, x), from_none], + self.occlusion_texture) + result["pbrMetallicRoughness"] = from_union([lambda x: to_class(MaterialPBRMetallicRoughness, x), from_none], + self.pbr_metallic_roughness) + return result + + +class MeshPrimitive: + """Geometry to be rendered with the given material.""" + + def __init__(self, attributes, extensions, extras, indices, material, mode, targets): + self.attributes = attributes + self.extensions = extensions + self.extras = extras + self.indices = indices + self.material = material + self.mode = mode + self.targets = targets + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + attributes = from_dict(from_int, obj.get("attributes")) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + indices = from_union([from_int, from_none], obj.get("indices")) + material = from_union([from_int, from_none], obj.get("material")) + mode = from_union([from_int, from_none], obj.get("mode")) + targets = from_union([lambda x: from_list(lambda x: from_dict(from_int, x), x), from_none], obj.get("targets")) + return MeshPrimitive(attributes, extensions, extras, indices, material, mode, targets) + + def to_dict(self): + result = {} + result["attributes"] = from_dict(from_int, self.attributes) + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["indices"] = from_union([from_int, from_none], self.indices) + result["material"] = from_union([from_int, from_none], self.material) + result["mode"] = from_union([from_int, from_none], self.mode) + result["targets"] = from_union([lambda x: from_list(lambda x: from_dict(from_int, x), x), from_none], + self.targets) + return result + + +class Mesh: + """A set of primitives to be rendered. A node can contain one mesh. A node's transform + places the mesh in the scene. + """ + + def __init__(self, extensions, extras, name, primitives, weights): + self.extensions = extensions + self.extras = extras + self.name = name + self.primitives = primitives + self.weights = weights + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + name = from_union([from_str, from_none], obj.get("name")) + primitives = from_list(MeshPrimitive.from_dict, obj.get("primitives")) + weights = from_union([lambda x: from_list(from_float, x), from_none], obj.get("weights")) + return Mesh(extensions, extras, name, primitives, weights) + + def to_dict(self): + result = {} + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["name"] = from_union([from_str, from_none], self.name) + result["primitives"] = from_list(lambda x: to_class(MeshPrimitive, x), self.primitives) + result["weights"] = from_union([lambda x: from_list(to_float, x), from_none], self.weights) + return result + + +class Node: + """A node in the node hierarchy. When the node contains `skin`, all `mesh.primitives` must + contain `JOINTS_0` and `WEIGHTS_0` attributes. A node can have either a `matrix` or any + combination of `translation`/`rotation`/`scale` (TRS) properties. TRS properties are + converted to matrices and postmultiplied in the `T * R * S` order to compose the + transformation matrix; first the scale is applied to the vertices, then the rotation, and + then the translation. If none are provided, the transform is the identity. When a node is + targeted for animation (referenced by an animation.channel.target), only TRS properties + may be present; `matrix` will not be present. + """ + + def __init__(self, camera, children, extensions, extras, matrix, mesh, name, rotation, scale, skin, translation, + weights): + self.camera = camera + self.children = children + self.extensions = extensions + self.extras = extras + self.matrix = matrix + self.mesh = mesh + self.name = name + self.rotation = rotation + self.scale = scale + self.skin = skin + self.translation = translation + self.weights = weights + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + camera = from_union([from_int, from_none], obj.get("camera")) + children = from_union([lambda x: from_list(from_int, x), from_none], obj.get("children")) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + matrix = from_union([lambda x: from_list(from_float, x), from_none], obj.get("matrix")) + mesh = from_union([from_int, from_none], obj.get("mesh")) + name = from_union([from_str, from_none], obj.get("name")) + rotation = from_union([lambda x: from_list(from_float, x), from_none], obj.get("rotation")) + scale = from_union([lambda x: from_list(from_float, x), from_none], obj.get("scale")) + skin = from_union([from_int, from_none], obj.get("skin")) + translation = from_union([lambda x: from_list(from_float, x), from_none], obj.get("translation")) + weights = from_union([lambda x: from_list(from_float, x), from_none], obj.get("weights")) + return Node(camera, children, extensions, extras, matrix, mesh, name, rotation, scale, skin, translation, + weights) + + def to_dict(self): + result = {} + result["camera"] = from_union([from_int, from_none], self.camera) + result["children"] = from_union([lambda x: from_list(from_int, x), from_none], self.children) + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["matrix"] = from_union([lambda x: from_list(to_float, x), from_none], self.matrix) + result["mesh"] = from_union([from_int, from_none], self.mesh) + result["name"] = from_union([from_str, from_none], self.name) + result["rotation"] = from_union([lambda x: from_list(to_float, x), from_none], self.rotation) + result["scale"] = from_union([lambda x: from_list(to_float, x), from_none], self.scale) + result["skin"] = from_union([from_int, from_none], self.skin) + result["translation"] = from_union([lambda x: from_list(to_float, x), from_none], self.translation) + result["weights"] = from_union([lambda x: from_list(to_float, x), from_none], self.weights) + return result + + +class Sampler: + """Texture sampler properties for filtering and wrapping modes.""" + + def __init__(self, extensions, extras, mag_filter, min_filter, name, wrap_s, wrap_t): + self.extensions = extensions + self.extras = extras + self.mag_filter = mag_filter + self.min_filter = min_filter + self.name = name + self.wrap_s = wrap_s + self.wrap_t = wrap_t + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + mag_filter = from_union([from_int, from_none], obj.get("magFilter")) + min_filter = from_union([from_int, from_none], obj.get("minFilter")) + name = from_union([from_str, from_none], obj.get("name")) + wrap_s = from_union([from_int, from_none], obj.get("wrapS")) + wrap_t = from_union([from_int, from_none], obj.get("wrapT")) + return Sampler(extensions, extras, mag_filter, min_filter, name, wrap_s, wrap_t) + + def to_dict(self): + result = {} + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["magFilter"] = from_union([from_int, from_none], self.mag_filter) + result["minFilter"] = from_union([from_int, from_none], self.min_filter) + result["name"] = from_union([from_str, from_none], self.name) + result["wrapS"] = from_union([from_int, from_none], self.wrap_s) + result["wrapT"] = from_union([from_int, from_none], self.wrap_t) + return result + + +class Scene: + """The root nodes of a scene.""" + + def __init__(self, extensions, extras, name, nodes): + self.extensions = extensions + self.extras = extras + self.name = name + self.nodes = nodes + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + name = from_union([from_str, from_none], obj.get("name")) + nodes = from_union([lambda x: from_list(from_int, x), from_none], obj.get("nodes")) + return Scene(extensions, extras, name, nodes) + + def to_dict(self): + result = {} + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["name"] = from_union([from_str, from_none], self.name) + result["nodes"] = from_union([lambda x: from_list(from_int, x), from_none], self.nodes) + return result + + +class Skin: + """Joints and matrices defining a skin.""" + + def __init__(self, extensions, extras, inverse_bind_matrices, joints, name, skeleton): + self.extensions = extensions + self.extras = extras + self.inverse_bind_matrices = inverse_bind_matrices + self.joints = joints + self.name = name + self.skeleton = skeleton + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + inverse_bind_matrices = from_union([from_int, from_none], obj.get("inverseBindMatrices")) + joints = from_list(from_int, obj.get("joints")) + name = from_union([from_str, from_none], obj.get("name")) + skeleton = from_union([from_int, from_none], obj.get("skeleton")) + return Skin(extensions, extras, inverse_bind_matrices, joints, name, skeleton) + + def to_dict(self): + result = {} + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["inverseBindMatrices"] = from_union([from_int, from_none], self.inverse_bind_matrices) + result["joints"] = from_list(from_int, self.joints) + result["name"] = from_union([from_str, from_none], self.name) + result["skeleton"] = from_union([from_int, from_none], self.skeleton) + return result + + +class Texture: + """A texture and its sampler.""" + + def __init__(self, extensions, extras, name, sampler, source): + self.extensions = extensions + self.extras = extras + self.name = name + self.sampler = sampler + self.source = source + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + name = from_union([from_str, from_none], obj.get("name")) + sampler = from_union([from_int, from_none], obj.get("sampler")) + source = from_union([from_int, from_none], obj.get("source")) + return Texture(extensions, extras, name, sampler, source) + + def to_dict(self): + result = {} + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extras"] = from_extra(self.extras) + result["name"] = from_union([from_str, from_none], self.name) + result["sampler"] = from_union([from_int, from_none], self.sampler) + result["source"] = from_union([from_int, from_none], self.source) + return result + + +class Gltf: + """The root object for a glTF asset.""" + + def __init__(self, accessors, animations, asset, buffers, buffer_views, cameras, extensions, extensions_required, + extensions_used, extras, images, materials, meshes, nodes, samplers, scene, scenes, skins, textures): + self.accessors = accessors + self.animations = animations + self.asset = asset + self.buffers = buffers + self.buffer_views = buffer_views + self.cameras = cameras + self.extensions = extensions + self.extensions_required = extensions_required + self.extensions_used = extensions_used + self.extras = extras + self.images = images + self.materials = materials + self.meshes = meshes + self.nodes = nodes + self.samplers = samplers + self.scene = scene + self.scenes = scenes + self.skins = skins + self.textures = textures + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + accessors = from_union([lambda x: from_list(Accessor.from_dict, x), from_none], obj.get("accessors")) + animations = from_union([lambda x: from_list(Animation.from_dict, x), from_none], obj.get("animations")) + asset = Asset.from_dict(obj.get("asset")) + buffers = from_union([lambda x: from_list(Buffer.from_dict, x), from_none], obj.get("buffers")) + buffer_views = from_union([lambda x: from_list(BufferView.from_dict, x), from_none], obj.get("bufferViews")) + cameras = from_union([lambda x: from_list(Camera.from_dict, x), from_none], obj.get("cameras")) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extensions_required = from_union([lambda x: from_list(from_str, x), from_none], obj.get("extensionsRequired")) + extensions_used = from_union([lambda x: from_list(from_str, x), from_none], obj.get("extensionsUsed")) + extras = obj.get("extras") + images = from_union([lambda x: from_list(Image.from_dict, x), from_none], obj.get("images")) + materials = from_union([lambda x: from_list(Material.from_dict, x), from_none], obj.get("materials")) + meshes = from_union([lambda x: from_list(Mesh.from_dict, x), from_none], obj.get("meshes")) + nodes = from_union([lambda x: from_list(Node.from_dict, x), from_none], obj.get("nodes")) + samplers = from_union([lambda x: from_list(Sampler.from_dict, x), from_none], obj.get("samplers")) + scene = from_union([from_int, from_none], obj.get("scene")) + scenes = from_union([lambda x: from_list(Scene.from_dict, x), from_none], obj.get("scenes")) + skins = from_union([lambda x: from_list(Skin.from_dict, x), from_none], obj.get("skins")) + textures = from_union([lambda x: from_list(Texture.from_dict, x), from_none], obj.get("textures")) + return Gltf(accessors, animations, asset, buffers, buffer_views, cameras, extensions, extensions_required, + extensions_used, extras, images, materials, meshes, nodes, samplers, scene, scenes, skins, textures) + + def to_dict(self): + result = {} + result["accessors"] = from_union([lambda x: from_list(lambda x: to_class(Accessor, x), x), from_none], + self.accessors) + result["animations"] = from_union([lambda x: from_list(lambda x: to_class(Animation, x), x), from_none], + self.animations) + result["asset"] = to_class(Asset, self.asset) + result["buffers"] = from_union([lambda x: from_list(lambda x: to_class(Buffer, x), x), from_none], self.buffers) + result["bufferViews"] = from_union([lambda x: from_list(lambda x: to_class(BufferView, x), x), from_none], + self.buffer_views) + result["cameras"] = from_union([lambda x: from_list(lambda x: to_class(Camera, x), x), from_none], self.cameras) + result["extensions"] = from_union([lambda x: from_dict(from_extension, x), from_none], + self.extensions) + result["extensionsRequired"] = from_union([lambda x: from_list(from_str, x), from_none], + self.extensions_required) + result["extensionsUsed"] = from_union([lambda x: from_list(from_str, x), from_none], self.extensions_used) + result["extras"] = from_extra(self.extras) + result["images"] = from_union([lambda x: from_list(lambda x: to_class(Image, x), x), from_none], self.images) + result["materials"] = from_union([lambda x: from_list(lambda x: to_class(Material, x), x), from_none], + self.materials) + result["meshes"] = from_union([lambda x: from_list(lambda x: to_class(Mesh, x), x), from_none], self.meshes) + result["nodes"] = from_union([lambda x: from_list(lambda x: to_class(Node, x), x), from_none], self.nodes) + result["samplers"] = from_union([lambda x: from_list(lambda x: to_class(Sampler, x), x), from_none], + self.samplers) + result["scene"] = from_union([from_int, from_none], self.scene) + result["scenes"] = from_union([lambda x: from_list(lambda x: to_class(Scene, x), x), from_none], self.scenes) + result["skins"] = from_union([lambda x: from_list(lambda x: to_class(Skin, x), x), from_none], self.skins) + result["textures"] = from_union([lambda x: from_list(lambda x: to_class(Texture, x), x), from_none], + self.textures) + return result + + +def gltf_from_dict(s): + return Gltf.from_dict(s) + + +def gltf_to_dict(x): + return to_class(Gltf, x) diff --git a/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_constants.py b/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_constants.py new file mode 100755 index 00000000000..fd1894f7386 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_constants.py @@ -0,0 +1,159 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from enum import IntEnum + + +class ComponentType(IntEnum): + Byte = 5120 + UnsignedByte = 5121 + Short = 5122 + UnsignedShort = 5123 + UnsignedInt = 5125 + Float = 5126 + + @classmethod + def to_type_code(cls, component_type): + return { + ComponentType.Byte: 'b', + ComponentType.UnsignedByte: 'B', + ComponentType.Short: 'h', + ComponentType.UnsignedShort: 'H', + ComponentType.UnsignedInt: 'I', + ComponentType.Float: 'f' + }[component_type] + + @classmethod + def to_numpy_dtype(cls, component_type): + import numpy as np + return { + ComponentType.Byte: np.int8, + ComponentType.UnsignedByte: np.uint8, + ComponentType.Short: np.int16, + ComponentType.UnsignedShort: np.uint16, + ComponentType.UnsignedInt: np.uint32, + ComponentType.Float: np.float32, + }[component_type] + + @classmethod + def from_legacy_define(cls, type_define): + return { + GLTF_COMPONENT_TYPE_BYTE: ComponentType.Byte, + GLTF_COMPONENT_TYPE_UNSIGNED_BYTE: ComponentType.UnsignedByte, + GLTF_COMPONENT_TYPE_SHORT: ComponentType.Short, + GLTF_COMPONENT_TYPE_UNSIGNED_SHORT: ComponentType.UnsignedShort, + GLTF_COMPONENT_TYPE_UNSIGNED_INT: ComponentType.UnsignedInt, + GLTF_COMPONENT_TYPE_FLOAT: ComponentType.Float + }[type_define] + + @classmethod + def get_size(cls, component_type): + return { + ComponentType.Byte: 1, + ComponentType.UnsignedByte: 1, + ComponentType.Short: 2, + ComponentType.UnsignedShort: 2, + ComponentType.UnsignedInt: 4, + ComponentType.Float: 4 + }[component_type] + + +class DataType: + Scalar = "SCALAR" + Vec2 = "VEC2" + Vec3 = "VEC3" + Vec4 = "VEC4" + Mat2 = "MAT2" + Mat3 = "MAT3" + Mat4 = "MAT4" + + def __new__(cls, *args, **kwargs): + raise RuntimeError("{} should not be instantiated".format(cls.__name__)) + + @classmethod + def num_elements(cls, data_type): + return { + DataType.Scalar: 1, + DataType.Vec2: 2, + DataType.Vec3: 3, + DataType.Vec4: 4, + DataType.Mat2: 4, + DataType.Mat3: 9, + DataType.Mat4: 16 + }[data_type] + + @classmethod + def vec_type_from_num(cls, num_elems): + if not (0 < num_elems < 5): + raise ValueError("No vector type with {} elements".format(num_elems)) + return { + 1: DataType.Scalar, + 2: DataType.Vec2, + 3: DataType.Vec3, + 4: DataType.Vec4 + }[num_elems] + + @classmethod + def mat_type_from_num(cls, num_elems): + if not (4 <= num_elems <= 16): + raise ValueError("No matrix type with {} elements".format(num_elems)) + return { + 4: DataType.Mat2, + 9: DataType.Mat3, + 16: DataType.Mat4 + }[num_elems] + + +class TextureFilter(IntEnum): + Nearest = 9728 + Linear = 9729 + NearestMipmapNearest = 9984 + LinearMipmapNearest = 9985 + NearestMipmapLinear = 9986 + LinearMipmapLinear = 9987 + + +class TextureWrap(IntEnum): + ClampToEdge = 33071 + MirroredRepeat = 33648 + Repeat = 10497 + + +class BufferViewTarget(IntEnum): + ARRAY_BUFFER = 34962 + ELEMENT_ARRAY_BUFFER = 34963 + +################# +# LEGACY DEFINES + + +GLTF_VERSION = "2.0" + +# +# Component Types +# +GLTF_COMPONENT_TYPE_BYTE = "BYTE" +GLTF_COMPONENT_TYPE_UNSIGNED_BYTE = "UNSIGNED_BYTE" +GLTF_COMPONENT_TYPE_SHORT = "SHORT" +GLTF_COMPONENT_TYPE_UNSIGNED_SHORT = "UNSIGNED_SHORT" +GLTF_COMPONENT_TYPE_UNSIGNED_INT = "UNSIGNED_INT" +GLTF_COMPONENT_TYPE_FLOAT = "FLOAT" + + +# +# Data types +# +GLTF_DATA_TYPE_SCALAR = "SCALAR" +GLTF_DATA_TYPE_VEC2 = "VEC2" +GLTF_DATA_TYPE_VEC3 = "VEC3" +GLTF_DATA_TYPE_VEC4 = "VEC4" +GLTF_DATA_TYPE_MAT2 = "MAT2" +GLTF_DATA_TYPE_MAT3 = "MAT3" +GLTF_DATA_TYPE_MAT4 = "MAT4" + +GLTF_IOR = 1.5 +BLENDER_COAT_ROUGHNESS = 0.03 + +# Rounding digit used for normal/tangent rounding +ROUNDING_DIGIT = 4 diff --git a/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_debug.py b/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_debug.py new file mode 100755 index 00000000000..889e93bbae4 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_debug.py @@ -0,0 +1,119 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +# +# Imports +# + +import time +import logging +import logging.handlers + +# +# Globals +# + +g_profile_started = False +g_profile_start = 0.0 +g_profile_end = 0.0 +g_profile_delta = 0.0 + + +def get_timestamp(): + current_time = time.gmtime() + return time.strftime("%H:%M:%S", current_time) + + +def profile_start(): + """Start profiling by storing the current time.""" + global g_profile_start + global g_profile_started + + if g_profile_started: + print('ERROR', 'Profiling already started') + return + + g_profile_started = True + + g_profile_start = time.time() + + +def profile_end(label=None): + """Stop profiling and printing out the delta time since profile start.""" + global g_profile_end + global g_profile_delta + global g_profile_started + + if not g_profile_started: + print('ERROR', 'Profiling not started') + return + + g_profile_started = False + + g_profile_end = time.time() + g_profile_delta = g_profile_end - g_profile_start + + output = 'Delta time: ' + str(g_profile_delta) + + if label is not None: + output = output + ' (' + label + ')' + + print('PROFILE', output) + + +class Log: + def __init__(self, loglevel): + self.logger = logging.getLogger('glTFImporter') + + # For console display + self.console_handler = logging.StreamHandler() + formatter = logging.Formatter('%(asctime)s | %(levelname)s: %(message)s', "%H:%M:%S") + self.console_handler.setFormatter(formatter) + + # For popup display + self.popup_handler = logging.handlers.MemoryHandler(1024 * 10) + + self.logger.addHandler(self.console_handler) + # self.logger.addHandler(self.popup_handler) => Make sure to not attach the popup handler to the logger + + self.logger.setLevel(int(loglevel)) + + def error(self, message, popup=False): + self.logger.error(message) + if popup: + self.popup_handler.buffer.append(('ERROR', message)) + + def warning(self, message, popup=False): + self.logger.warning(message) + if popup: + self.popup_handler.buffer.append(('WARNING', message)) + + def info(self, message, popup=False): + self.logger.info(message) + if popup: + self.popup_handler.buffer.append(('INFO', message)) + + def debug(self, message, popup=False): + self.logger.debug(message) + if popup: + self.popup_handler.buffer.append(('DEBUG', message)) + + def critical(self, message, popup=False): + self.logger.critical(message) + if popup: + # There is no Critical level in Blender, so we use error + self.popup_handler.buffer.append(('ERROR', message)) + + def profile(self, message, popup=False): # There is no profile level in logging, so we use info + self.logger.info(message) + if popup: + self.popup_handler.buffer.append(('PROFILE', message)) + + def messages(self): + return self.popup_handler.buffer + + def flush(self): + self.logger.removeHandler(self.console_handler) + self.popup_handler.flush() + self.logger.removeHandler(self.popup_handler) diff --git a/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_draco_compression_extension.py b/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_draco_compression_extension.py new file mode 100644 index 00000000000..046417b42ad --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_draco_compression_extension.py @@ -0,0 +1,58 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import os +import sys +from pathlib import Path +import bpy + + +def dll_path() -> Path: + """ + Get the DLL path depending on the underlying platform. + :return: DLL path. + """ + lib_name = 'extern_draco' + blender_root = Path(bpy.app.binary_path).parent + python_lib = Path('{v[0]}.{v[1]}/python/lib'.format(v=bpy.app.version)) + python_version = 'python{v[0]}.{v[1]}'.format(v=sys.version_info) + + path = os.environ.get('BLENDER_EXTERN_DRACO_LIBRARY_PATH') + if path is None: + path = { + 'win32': blender_root / python_lib / 'site-packages', + 'linux': blender_root / python_lib / python_version / 'site-packages', + 'darwin': blender_root.parent / 'Resources' / python_lib / python_version / 'site-packages' + }.get(sys.platform) + else: + return Path(path) + + library_name = { + 'win32': '{}.dll'.format(lib_name), + 'linux': 'lib{}.so'.format(lib_name), + 'darwin': 'lib{}.dylib'.format(lib_name) + }.get(sys.platform) + + if path is None or library_name is None: + print('WARNING', 'Unsupported platform {}, Draco mesh compression is unavailable'.format(sys.platform)) + + return path / library_name + + +def dll_exists(quiet=False) -> bool: + """ + Checks whether the DLL path exists. + :return: True if the DLL exists. + """ + path = dll_path() + exists = path.exists() and path.is_file() + if quiet is False: + if exists: + print('INFO', 'Draco mesh compression is available, use library at %s' % dll_path().absolute()) + else: + print( + 'ERROR', + 'Draco mesh compression is not available because library could not be found at %s' % + dll_path().absolute()) + return exists diff --git a/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_extensions.py b/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_extensions.py new file mode 100644 index 00000000000..8c3c06f668b --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_extensions.py @@ -0,0 +1,30 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from typing import List, Dict, Any + + +class Extension: + """Container for extensions. Allows to specify requiredness""" + extension = True # class method used to check Extension class at traversal (after reloading script, isinstance is not working) + + def __init__(self, name: str, extension: Dict[str, Any], required: bool = True): + self.name = name + self.extension = extension + self.required = required + + +class ChildOfRootExtension(Extension): + """Container object for extensions that should be appended to the root extensions""" + + def __init__(self, path: List[str], name: str, extension: Dict[str, Any], required: bool = True): + """ + Wrap a local extension entity into an object that will later be inserted into a root extension and converted + to a reference. + :param path: The path of the extension object in the root extension. E.g. ['lights'] for + KHR_lights_punctual. Must be a path to a list in the extensions dict. + :param extension: The data that should be placed into the extension list + """ + self.path = path + super().__init__(name, extension, required) diff --git a/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_lights_punctual.py b/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_lights_punctual.py new file mode 100644 index 00000000000..c903f9476bf --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_lights_punctual.py @@ -0,0 +1,68 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from ...io.com.gltf2_io import from_dict, from_union, from_none, from_float, from_str, from_list +from ...io.com.gltf2_io import to_float, to_class + + +class LightSpot: + """light/spot""" + + def __init__(self, inner_cone_angle, outer_cone_angle): + self.inner_cone_angle = inner_cone_angle + self.outer_cone_angle = outer_cone_angle + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + inner_cone_angle = from_union([from_float, from_none], obj.get("innerConeAngle")) + outer_cone_angle = from_union([from_float, from_none], obj.get("outerConeAngle")) + return LightSpot(inner_cone_angle, outer_cone_angle) + + def to_dict(self): + result = {} + result["innerConeAngle"] = from_union([from_float, from_none], self.inner_cone_angle) + result["outerConeAngle"] = from_union([from_float, from_none], self.outer_cone_angle) + return result + + +class Light: + """defines a set of lights for use with glTF 2.0. Lights define light sources within a scene""" + + def __init__(self, color, intensity, spot, type, range, name, extensions, extras): + self.color = color + self.intensity = intensity + self.spot = spot + self.type = type + self.range = range + self.name = name + self.extensions = extensions + self.extras = extras + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + color = from_union([lambda x: from_list(from_float, x), from_none], obj.get("color")) + intensity = from_union([from_float, from_none], obj.get("intensity")) + spot = LightSpot.from_dict(obj.get("spot")) + type = from_str(obj.get("type")) + range = from_union([from_float, from_none], obj.get("range")) + name = from_union([from_str, from_none], obj.get("name")) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + return Light(color, intensity, spot, type, range, name, extensions, extras) + + def to_dict(self): + result = {} + result["color"] = from_union([lambda x: from_list(to_float, x), from_none], self.color) + result["intensity"] = from_union([from_float, from_none], self.intensity) + result["spot"] = from_union([lambda x: to_class(LightSpot, x), from_none], self.spot) + result["type"] = from_str(self.type) + result["range"] = from_union([from_float, from_none], self.range) + result["name"] = from_union([from_str, from_none], self.name) + result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + self.extensions) + result["extras"] = self.extras + return result diff --git a/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_path.py b/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_path.py new file mode 100644 index 00000000000..a01a8b780e8 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_path.py @@ -0,0 +1,19 @@ +# SPDX-FileCopyrightText: 2018-2023 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from urllib.parse import unquote, quote +from os.path import normpath +from os import sep + + +def uri_to_path(uri): + uri = uri.replace('\\', '/') # Some files come with \\ as dir separator + uri = unquote(uri) + return normpath(uri) + + +def path_to_uri(path): + path = normpath(path) + path = path.replace(sep, '/') + return quote(path) diff --git a/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_variants.py b/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_variants.py new file mode 100644 index 00000000000..89b3de78c69 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/io/com/gltf2_io_variants.py @@ -0,0 +1,32 @@ +# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from ...io.com.gltf2_io import from_dict, from_union, from_none, from_float, from_str, from_list +from ...io.com.gltf2_io import to_float, to_class + + +class Variant: + """defines variant for use with glTF 2.0.""" + + def __init__(self, name, extensions, extras): + self.name = name + self.extensions = extensions + self.extras = extras + + @staticmethod + def from_dict(obj): + assert isinstance(obj, dict) + name = from_union([from_str, from_none], obj.get("name")) + extensions = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + obj.get("extensions")) + extras = obj.get("extras") + return Variant(name, extensions, extras) + + def to_dict(self): + result = {} + result["name"] = from_union([from_str, from_none], self.name) + result["extensions"] = from_union([lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], + self.extensions) + result["extras"] = self.extras + return result diff --git a/scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_binary_data.py b/scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_binary_data.py new file mode 100755 index 00000000000..309b8c2afa0 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_binary_data.py @@ -0,0 +1,35 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import typing +import array +from ...io.com import gltf2_io_constants + + +class BinaryData: + """Store for gltf binary data that can later be stored in a buffer.""" + + def __init__(self, data: bytes, bufferViewTarget=None): + if not isinstance(data, bytes): + raise TypeError("Data is not a bytes array") + self.data = data + self.bufferViewTarget = bufferViewTarget + + def __eq__(self, other): + return self.data == other.data + + def __hash__(self): + return hash(self.data) + + @classmethod + def from_list(cls, + lst: typing.List[typing.Any], + gltf_component_type: gltf2_io_constants.ComponentType, + bufferViewTarget=None): + format_char = gltf2_io_constants.ComponentType.to_type_code(gltf_component_type) + return BinaryData(array.array(format_char, lst).tobytes(), bufferViewTarget) + + @property + def byte_length(self): + return len(self.data) diff --git a/scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_buffer.py b/scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_buffer.py new file mode 100755 index 00000000000..4d62c4aee19 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_buffer.py @@ -0,0 +1,54 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import base64 + +from ...io.com import gltf2_io +from ...io.exp import gltf2_io_binary_data + + +class Buffer: + """Class representing binary data for use in a glTF file as 'buffer' property.""" + + def __init__(self, buffer_index=0, initial_data=None): + self.__data = bytearray(b"") + if initial_data is not None: + self.__data = bytearray(initial_data.tobytes()) + self.__buffer_index = buffer_index + + def add_and_get_view(self, binary_data: gltf2_io_binary_data.BinaryData) -> gltf2_io.BufferView: + """Add binary data to the buffer. Return a glTF BufferView.""" + offset = len(self.__data) + self.__data.extend(binary_data.data) + + length = binary_data.byte_length + + # offsets should be a multiple of 4 --> therefore add padding if necessary + padding = (4 - (length % 4)) % 4 + self.__data.extend(b"\x00" * padding) + + buffer_view = gltf2_io.BufferView( + buffer=self.__buffer_index, + byte_length=length, + byte_offset=offset, + byte_stride=None, + extensions=None, + extras=None, + name=None, + target=binary_data.bufferViewTarget + ) + return buffer_view + + @property + def byte_length(self): + return len(self.__data) + + def to_bytes(self): + return self.__data + + def clear(self): + self.__data = b"" + + def to_embed_string(self): + return 'data:application/octet-stream;base64,' + base64.b64encode(self.__data).decode('ascii') diff --git a/scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_draco_compression_extension.py b/scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_draco_compression_extension.py new file mode 100644 index 00000000000..0053980db3c --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_draco_compression_extension.py @@ -0,0 +1,173 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from ctypes import * +from pathlib import Path + +from ...io.exp.gltf2_io_binary_data import BinaryData +from ...io.com.gltf2_io_draco_compression_extension import dll_path + + +def encode_scene_primitives(scenes, export_settings): + """ + Handles draco compression. + Moves position, normal and texture coordinate attributes into a Draco encoded buffer. + """ + + # Load DLL and setup function signatures. + dll = cdll.LoadLibrary(str(dll_path().resolve())) + + dll.encoderCreate.restype = c_void_p + dll.encoderCreate.argtypes = [c_uint32] + + dll.encoderRelease.restype = None + dll.encoderRelease.argtypes = [c_void_p] + + dll.encoderSetCompressionLevel.restype = None + dll.encoderSetCompressionLevel.argtypes = [c_void_p, c_uint32] + + dll.encoderSetQuantizationBits.restype = None + dll.encoderSetQuantizationBits.argtypes = [c_void_p, c_uint32, c_uint32, c_uint32, c_uint32, c_uint32] + + dll.encoderSetIndices.restype = None + dll.encoderSetIndices.argtypes = [c_void_p, c_size_t, c_uint32, c_void_p] + + dll.encoderSetAttribute.restype = c_uint32 + dll.encoderSetAttribute.argtypes = [c_void_p, c_char_p, c_size_t, c_char_p, c_void_p] + + dll.encoderEncode.restype = c_bool + dll.encoderEncode.argtypes = [c_void_p, c_uint8] + + dll.encoderGetEncodedVertexCount.restype = c_uint32 + dll.encoderGetEncodedVertexCount.argtypes = [c_void_p] + + dll.encoderGetEncodedIndexCount.restype = c_uint32 + dll.encoderGetEncodedIndexCount.argtypes = [c_void_p] + + dll.encoderGetByteLength.restype = c_uint64 + dll.encoderGetByteLength.argtypes = [c_void_p] + + dll.encoderCopy.restype = None + dll.encoderCopy.argtypes = [c_void_p, c_void_p] + + # Don't encode the same primitive multiple times. + encoded_primitives_cache = {} + + # Compress meshes into Draco buffers. + for scene in scenes: + for node in scene.nodes: + __traverse_node(node, lambda node: __encode_node(node, dll, export_settings, encoded_primitives_cache)) + + # Release uncompressed index and attribute buffers. + # Since those buffers may be shared across nodes, this step must happen after all meshes have been compressed. + for scene in scenes: + for node in scene.nodes: + __traverse_node(node, lambda node: __cleanup_node(node)) + + +def __cleanup_node(node): + if node.mesh is None: + return + + for primitive in node.mesh.primitives: + if primitive.extensions is None or primitive.extensions['KHR_draco_mesh_compression'] is None: + continue + + primitive.indices.buffer_view = None + for attr_name in primitive.attributes: + attr = primitive.attributes[attr_name] + attr.buffer_view = None + + +def __traverse_node(node, f): + f(node) + if node.children is not None: + for child in node.children: + __traverse_node(child, f) + + +def __encode_node(node, dll, export_settings, encoded_primitives_cache): + if node.mesh is not None: + export_settings['log'].info('Draco encoder: Encoding mesh {}.'.format(node.name)) + for primitive in node.mesh.primitives: + __encode_primitive(primitive, dll, export_settings, encoded_primitives_cache) + + +def __encode_primitive(primitive, dll, export_settings, encoded_primitives_cache): + attributes = primitive.attributes + indices = primitive.indices + + # Check if this primitive has already been encoded. + # This usually happens when nodes are duplicated in Blender, thus their indices/attributes are shared data. + if primitive in encoded_primitives_cache: + if primitive.extensions is None: + primitive.extensions = {} + primitive.extensions['KHR_draco_mesh_compression'] = encoded_primitives_cache[primitive] + return + + # Only do TRIANGLES primitives + if primitive.mode not in [None, 4]: + return + + if 'POSITION' not in attributes: + export_settings['log'].warning('Draco encoder: Primitive without positions encountered. Skipping.') + return + + positions = attributes['POSITION'] + + # Skip nodes without a position buffer, e.g. a primitive from a Blender shared instance. + if attributes['POSITION'].buffer_view is None: + return + + encoder = dll.encoderCreate(positions.count) + + draco_ids = {} + for attr_name in attributes: + attr = attributes[attr_name] + draco_id = dll.encoderSetAttribute( + encoder, + attr_name.encode(), + attr.component_type, + attr.type.encode(), + attr.buffer_view.data) + draco_ids[attr_name] = draco_id + + dll.encoderSetIndices(encoder, indices.component_type, indices.count, indices.buffer_view.data) + + dll.encoderSetCompressionLevel(encoder, export_settings['gltf_draco_mesh_compression_level']) + dll.encoderSetQuantizationBits(encoder, + export_settings['gltf_draco_position_quantization'], + export_settings['gltf_draco_normal_quantization'], + export_settings['gltf_draco_texcoord_quantization'], + export_settings['gltf_draco_color_quantization'], + export_settings['gltf_draco_generic_quantization']) + + preserve_triangle_order = primitive.targets is not None and len(primitive.targets) > 0 + if not dll.encoderEncode(encoder, preserve_triangle_order): + export_settings['log'].error('Could not encode primitive. Skipping primitive.') + + byte_length = dll.encoderGetByteLength(encoder) + encoded_data = bytes(byte_length) + dll.encoderCopy(encoder, encoded_data) + + if primitive.extensions is None: + primitive.extensions = {} + + extension_info = { + 'bufferView': BinaryData(encoded_data), + 'attributes': draco_ids + } + primitive.extensions['KHR_draco_mesh_compression'] = extension_info + encoded_primitives_cache[primitive] = extension_info + + # Set to triangle list mode. + primitive.mode = 4 + + # Update accessors to match encoded data. + indices.count = dll.encoderGetEncodedIndexCount(encoder) + encoded_vertices = dll.encoderGetEncodedVertexCount(encoder) + for attr_name in attributes: + attributes[attr_name].count = encoded_vertices + + dll.encoderRelease(encoder) diff --git a/scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_export.py b/scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_export.py new file mode 100755 index 00000000000..e848b936295 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_export.py @@ -0,0 +1,122 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +# +# Imports +# + +import json +import struct +from ...io.exp.gltf2_io_user_extensions import export_user_extensions + +# +# Globals +# + +# +# Functions +# +from collections import OrderedDict + + +def save_gltf(gltf, export_settings, encoder, glb_buffer): + # Use a class here, to be able to pass data by reference to hook (to be able to change them inside hook) + class GlTF_format: + def __init__(self, indent, separators): + self.indent = indent + self.separators = separators + + gltf_format = GlTF_format(None, (',', ':')) + + if export_settings['gltf_format'] != 'GLB': + gltf_format.indent = "\t" + # The comma is typically followed by a newline, so no trailing whitespace is needed on it. + # No space before and after ':' to save space + gltf_format.separators = (',', ':') + + sort_order = [ + "asset", + "extensionsUsed", + "extensionsRequired", + "extensions", + "extras", + "scene", + "scenes", + "nodes", + "cameras", + "animations", + "materials", + "meshes", + "textures", + "images", + "skins", + "accessors", + "bufferViews", + "samplers", + "buffers" + ] + + export_user_extensions('gather_gltf_encoded_hook', export_settings, gltf_format, sort_order) + + gltf_ordered = OrderedDict(sorted(gltf.items(), key=lambda item: sort_order.index(item[0]))) + gltf_encoded = json.dumps( + gltf_ordered, + indent=gltf_format.indent, + separators=gltf_format.separators, + cls=encoder, + allow_nan=False) + + # + + if export_settings['gltf_format'] != 'GLB': + file = open(export_settings['gltf_filepath'], "w", encoding="utf8", newline="\n") + file.write(gltf_encoded) + file.write("\n") + file.close() + + binary = export_settings['gltf_binary'] + if len(binary) > 0 and not export_settings['gltf_embed_buffers']: + file = open(export_settings['gltf_filedirectory'] + export_settings['gltf_binaryfilename'], "wb") + file.write(binary) + file.close() + + else: + file = open(export_settings['gltf_filepath'], "wb") + + gltf_data = gltf_encoded.encode() + binary = glb_buffer + + length_gltf = len(gltf_data) + spaces_gltf = (4 - (length_gltf & 3)) & 3 + length_gltf += spaces_gltf + + length_bin = len(binary) + zeros_bin = (4 - (length_bin & 3)) & 3 + length_bin += zeros_bin + + length = 12 + 8 + length_gltf + if length_bin > 0: + length += 8 + length_bin + + # Header (Version 2) + file.write('glTF'.encode()) + file.write(struct.pack("I", 2)) + file.write(struct.pack("I", length)) + + # Chunk 0 (JSON) + file.write(struct.pack("I", length_gltf)) + file.write('JSON'.encode()) + file.write(gltf_data) + file.write(b' ' * spaces_gltf) + + # Chunk 1 (BIN) + if length_bin > 0: + file.write(struct.pack("I", length_bin)) + file.write('BIN\0'.encode()) + file.write(binary) + file.write(b'\0' * zeros_bin) + + file.close() + + return True diff --git a/scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_image_data.py b/scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_image_data.py new file mode 100755 index 00000000000..559641af4bb --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_image_data.py @@ -0,0 +1,48 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import re + + +class ImageData: + """Contains encoded images""" + # FUTURE_WORK: as a method to allow the node graph to be better supported, we could model some of + # the node graph elements with numpy functions + + def __init__(self, data: bytes, mime_type: str, name: str): + self._data = data + self._mime_type = mime_type + self._name = name + + def __eq__(self, other): + return self._data == other.data + + def __hash__(self): + return hash(self._data) + + def adjusted_name(self): + regex_dot = re.compile("\\.") + adjusted_name = re.sub(regex_dot, "_", self.name) + new_name = "".join([char for char in adjusted_name if char not in "!#$&'()*+,/:;<>?@[\\]^`{|}~"]) + return new_name + + @property + def data(self): + return self._data + + @property + def name(self): + return self._name + + @property + def file_extension(self): + if self._mime_type == "image/jpeg": + return ".jpg" + elif self._mime_type == "image/webp": + return ".webp" + return ".png" + + @property + def byte_length(self): + return len(self._data) diff --git a/scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_user_extensions.py b/scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_user_extensions.py new file mode 100644 index 00000000000..30a7f3ad768 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/io/exp/gltf2_io_user_extensions.py @@ -0,0 +1,17 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +def export_user_extensions(hook_name, export_settings, *args): + if args and hasattr(args[0], "extensions"): + if args[0].extensions is None: + args[0].extensions = {} + + for extension in export_settings['gltf_user_extensions']: + hook = getattr(extension, hook_name, None) + if hook is not None: + try: + hook(*args, export_settings) + except Exception as e: + export_settings['log'].error("Extension hook " + hook_name + " fails on " + extension) + export_settings['log'].error(str(e)) diff --git a/scripts/addons_core/io_scene_gltf2/io/imp/__init__.py b/scripts/addons_core/io_scene_gltf2/io/imp/__init__.py new file mode 100755 index 00000000000..83c7609f0d8 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/io/imp/__init__.py @@ -0,0 +1,5 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +"""IO imp package.""" diff --git a/scripts/addons_core/io_scene_gltf2/io/imp/gltf2_io_binary.py b/scripts/addons_core/io_scene_gltf2/io/imp/gltf2_io_binary.py new file mode 100755 index 00000000000..9af613fd12f --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/io/imp/gltf2_io_binary.py @@ -0,0 +1,215 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +import struct +import numpy as np + +from ..com.gltf2_io import Accessor +from ..com.gltf2_io_constants import ComponentType, DataType + + +class BinaryData(): + """Binary reader.""" + def __new__(cls, *args, **kwargs): + raise RuntimeError("%s should not be instantiated" % cls) + + # Note that this function is not used in Blender importer, but is kept in + # Source code to be used in any pipeline that want to manage gltf/glb file in python + @staticmethod + def get_binary_from_accessor(gltf, accessor_idx): + """Get binary from accessor.""" + accessor = gltf.data.accessors[accessor_idx] + if accessor.buffer_view is None: + return None + + data = BinaryData.get_buffer_view(gltf, accessor.buffer_view) + + accessor_offset = accessor.byte_offset + if accessor_offset is None: + accessor_offset = 0 + + return data[accessor_offset:] + + @staticmethod + def get_buffer_view(gltf, buffer_view_idx): + """Get binary data for buffer view.""" + buffer_view = gltf.data.buffer_views[buffer_view_idx] + + if buffer_view.buffer in gltf.buffers.keys(): + buffer = gltf.buffers[buffer_view.buffer] + else: + # load buffer + gltf.load_buffer(buffer_view.buffer) + buffer = gltf.buffers[buffer_view.buffer] + + byte_offset = buffer_view.byte_offset + if byte_offset is None: + byte_offset = 0 + + return buffer[byte_offset:byte_offset + buffer_view.byte_length] + + @staticmethod + def get_data_from_accessor(gltf, accessor_idx, cache=False): + """Get data from accessor.""" + if accessor_idx in gltf.accessor_cache: + return gltf.accessor_cache[accessor_idx] + + data = BinaryData.decode_accessor(gltf, accessor_idx).tolist() + + if cache: + gltf.accessor_cache[accessor_idx] = data + + return data + + @staticmethod + def decode_accessor(gltf, accessor_idx, cache=False): + """Decodes accessor to 2D numpy array (count x num_components).""" + if accessor_idx in gltf.decode_accessor_cache: + return gltf.accessor_cache[accessor_idx] + + accessor = gltf.data.accessors[accessor_idx] + array = BinaryData.decode_accessor_obj(gltf, accessor) + + if cache: + gltf.accessor_cache[accessor_idx] = array + # Prevent accidentally modifying cached arrays + array.flags.writeable = False + + return array + + @staticmethod + def decode_accessor_internal(accessor): + # Is use internally when accessor binary data is not yet in a glTF buffer_view + # MAT2/3 have special alignment requirements that aren't handled. But it + # doesn't matter because nothing uses them. + assert accessor.type not in ['MAT2', 'MAT3'] + + dtype = ComponentType.to_numpy_dtype(accessor.component_type) + component_nb = DataType.num_elements(accessor.type) + + buffer_data = accessor.buffer_view.data + + accessor_offset = accessor.byte_offset or 0 + buffer_data = buffer_data[accessor_offset:] + + bytes_per_elem = dtype(1).nbytes + default_stride = bytes_per_elem * component_nb + stride = default_stride + + array = np.frombuffer( + buffer_data, + dtype=np.dtype(dtype).newbyteorder('<'), + count=accessor.count * component_nb, + ) + array = array.reshape(accessor.count, component_nb) + + return array + + @staticmethod + def decode_accessor_obj(gltf, accessor): + # MAT2/3 have special alignment requirements that aren't handled. But it + # doesn't matter because nothing uses them. + assert accessor.type not in ['MAT2', 'MAT3'] + + dtype = ComponentType.to_numpy_dtype(accessor.component_type) + component_nb = DataType.num_elements(accessor.type) + + if accessor.buffer_view is not None: + bufferView = gltf.data.buffer_views[accessor.buffer_view] + buffer_data = BinaryData.get_buffer_view(gltf, accessor.buffer_view) + + accessor_offset = accessor.byte_offset or 0 + buffer_data = buffer_data[accessor_offset:] + + bytes_per_elem = dtype(1).nbytes + default_stride = bytes_per_elem * component_nb + stride = bufferView.byte_stride or default_stride + + if stride == default_stride: + array = np.frombuffer( + buffer_data, + dtype=np.dtype(dtype).newbyteorder('<'), + count=accessor.count * component_nb, + ) + array = array.reshape(accessor.count, component_nb) + + else: + # The data looks like + # XXXppXXXppXXXppXXX + # where X are the components and p are padding. + # One XXXpp group is one stride's worth of data. + assert stride % bytes_per_elem == 0 + elems_per_stride = stride // bytes_per_elem + num_elems = (accessor.count - 1) * elems_per_stride + component_nb + + array = np.frombuffer( + buffer_data, + dtype=np.dtype(dtype).newbyteorder('<'), + count=num_elems, + ) + assert array.strides[0] == bytes_per_elem + array = np.lib.stride_tricks.as_strided( + array, + shape=(accessor.count, component_nb), + strides=(stride, bytes_per_elem), + ) + + else: + # No buffer view; initialize to zeros + array = np.zeros((accessor.count, component_nb), dtype=dtype) + + if accessor.sparse: + sparse_indices_obj = Accessor.from_dict({ + 'count': accessor.sparse.count, + 'bufferView': accessor.sparse.indices.buffer_view, + 'byteOffset': accessor.sparse.indices.byte_offset or 0, + 'componentType': accessor.sparse.indices.component_type, + 'type': 'SCALAR', + }) + sparse_indices = BinaryData.decode_accessor_obj(gltf, sparse_indices_obj) + sparse_indices = sparse_indices.reshape(len(sparse_indices)) + + sparse_values_obj = Accessor.from_dict({ + 'count': accessor.sparse.count, + 'bufferView': accessor.sparse.values.buffer_view, + 'byteOffset': accessor.sparse.values.byte_offset or 0, + 'componentType': accessor.component_type, + 'type': accessor.type, + }) + sparse_values = BinaryData.decode_accessor_obj(gltf, sparse_values_obj) + + if not array.flags.writeable: + array = array.copy() + array[sparse_indices] = sparse_values + + # Normalization + if accessor.normalized: + if accessor.component_type == 5120: # int8 + array = np.maximum(-1.0, array / 127.0) + elif accessor.component_type == 5121: # uint8 + array = array / 255.0 + elif accessor.component_type == 5122: # int16 + array = np.maximum(-1.0, array / 32767.0) + elif accessor.component_type == 5123: # uint16 + array = array / 65535.0 + + array = array.astype(np.float32, copy=False) + + return array + + @staticmethod + def get_image_data(gltf, img_idx): + """Get data from image.""" + pyimage = gltf.data.images[img_idx] + + assert not ( + pyimage.uri is not None and + pyimage.buffer_view is not None + ) + + if pyimage.uri is not None: + return gltf.load_uri(pyimage.uri) + if pyimage.buffer_view is not None: + return BinaryData.get_buffer_view(gltf, pyimage.buffer_view) + return None diff --git a/scripts/addons_core/io_scene_gltf2/io/imp/gltf2_io_gltf.py b/scripts/addons_core/io_scene_gltf2/io/imp/gltf2_io_gltf.py new file mode 100755 index 00000000000..1a25b157be9 --- /dev/null +++ b/scripts/addons_core/io_scene_gltf2/io/imp/gltf2_io_gltf.py @@ -0,0 +1,198 @@ +# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors +# +# SPDX-License-Identifier: Apache-2.0 + +from ...io.com.gltf2_io_path import uri_to_path +from ..com.gltf2_io import gltf_from_dict +from ..com.gltf2_io_debug import Log +import logging +import json +import struct +import base64 +from os.path import dirname, join, isfile + + +# Raise this error to have the importer report an error message. +class ImportError(RuntimeError): + pass + + +class glTFImporter(): + """glTF Importer class.""" + + def __init__(self, filename, import_settings): + """initialization.""" + self.filename = filename + self.import_settings = import_settings + self.glb_buffer = None + self.buffers = {} + self.accessor_cache = {} + self.decode_accessor_cache = {} + self.import_user_extensions = import_settings['import_user_extensions'] + self.variant_mapping = {} # Used to map between mgltf material idx and blender material, for Variants + + if 'loglevel' not in self.import_settings.keys(): + self.import_settings['loglevel'] = logging.CRITICAL + + self.log = Log(import_settings['loglevel']) + + # TODO: move to a com place? + self.extensions_managed = [ + 'KHR_materials_pbrSpecularGlossiness', + 'KHR_lights_punctual', + 'KHR_materials_unlit', + 'KHR_texture_transform', + 'KHR_materials_clearcoat', + 'KHR_mesh_quantization', + 'EXT_mesh_gpu_instancing', + 'KHR_draco_mesh_compression', + 'KHR_materials_variants', + 'KHR_materials_emissive_strength', + 'KHR_materials_transmission', + 'KHR_materials_specular', + 'KHR_materials_sheen', + 'KHR_materials_ior', + 'KHR_animation_pointer', + 'KHR_materials_volume', + 'EXT_texture_webp', + 'KHR_materials_anisotropy' + ] + + # Add extensions required supported by custom import extensions + for import_extension in self.import_user_extensions: + if hasattr(import_extension, "extensions"): + for custom_extension in import_extension.extensions: + if custom_extension.required: + self.extensions_managed.append(custom_extension.name) + + @staticmethod + def load_json(content): + def bad_constant(val): + raise ImportError('Bad glTF: json contained %s' % val) + try: + text = str(content, encoding='utf-8') + return json.loads(text, parse_constant=bad_constant) + except ValueError as e: + raise ImportError('Bad glTF: json error: %s' % e.args[0]) + + @staticmethod + def check_version(gltf): + """Check version. This is done *before* gltf_from_dict.""" + if not isinstance(gltf, dict) or 'asset' not in gltf: + raise ImportError("Bad glTF: no asset in json") + if 'version' not in gltf['asset']: + raise ImportError("Bad glTF: no version") + if gltf['asset']['version'] != "2.0": + raise ImportError("glTF version must be 2.0; got %s" % gltf['asset']['version']) + + def checks(self): + """Some checks.""" + if self.data.extensions_required is not None: + for extension in self.data.extensions_required: + if extension not in self.data.extensions_used: + raise ImportError("Extension required must be in Extension Used too") + if extension not in self.extensions_managed: + raise ImportError("Extension %s is not available on this addon version" % extension) + + if self.data.extensions_used is not None: + for extension in self.data.extensions_used: + if extension not in self.extensions_managed: + # Non blocking error #TODO log + pass + + def load_glb(self, content): + """Load binary glb.""" + magic = content[:4] + if magic != b'glTF': + raise ImportError("This file is not a glTF/glb file") + + version, file_size = struct.unpack_from(' Animations, and 3D Viewport -> Animation panel", + "doc_url": "{BLENDER_MANUAL_URL}/animation/armatures/posing/editing/pose_library.html", + "support": "OFFICIAL", + "category": "Animation", +} + +from typing import List, Tuple + +_need_reload = "operators" in locals() +from . import gui, keymaps, operators, conversion + +if _need_reload: + import importlib + + gui = importlib.reload(gui) + keymaps = importlib.reload(keymaps) + operators = importlib.reload(operators) + conversion = importlib.reload(conversion) + +import bpy + +addon_keymaps: List[Tuple[bpy.types.KeyMap, bpy.types.KeyMapItem]] = [] + + +def register() -> None: + bpy.types.WindowManager.poselib_previous_action = bpy.props.PointerProperty(type=bpy.types.Action) + + operators.register() + keymaps.register() + gui.register() + + +def unregister() -> None: + gui.unregister() + keymaps.unregister() + operators.unregister() + + try: + del bpy.types.WindowManager.poselib_previous_action + except AttributeError: + pass diff --git a/scripts/addons_core/pose_library/asset_browser.py b/scripts/addons_core/pose_library/asset_browser.py new file mode 100644 index 00000000000..e714f5e05d0 --- /dev/null +++ b/scripts/addons_core/pose_library/asset_browser.py @@ -0,0 +1,99 @@ +# SPDX-FileCopyrightText: 2021-2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +"""Functions for finding and working with Asset Browsers.""" + +from typing import Iterable, Optional, Tuple + +import bpy +from bpy_extras import asset_utils + + +if "functions" not in locals(): + from . import functions +else: + import importlib + + functions = importlib.reload(functions) + + +def biggest_asset_browser_area(screen: bpy.types.Screen) -> Optional[bpy.types.Area]: + """Return the asset browser Area that's largest on screen. + + :param screen: context.window.screen + + :return: the Area, or None if no Asset Browser area exists. + """ + + def area_sorting_key(area: bpy.types.Area) -> Tuple[bool, int]: + """Return area size in pixels.""" + return area.width * area.height + + areas = list(suitable_areas(screen)) + if not areas: + return None + + return max(areas, key=area_sorting_key) + + +def suitable_areas(screen: bpy.types.Screen) -> Iterable[bpy.types.Area]: + """Generator, yield Asset Browser areas.""" + + for area in screen.areas: + space_data = area.spaces[0] + if not asset_utils.SpaceAssetInfo.is_asset_browser(space_data): + continue + yield area + + +def area_from_context(context: bpy.types.Context) -> Optional[bpy.types.Area]: + """Return an Asset Browser suitable for the given category. + + Prefers the current Asset Browser if available, otherwise the biggest. + """ + + space_data = context.space_data + if asset_utils.SpaceAssetInfo.is_asset_browser(space_data): + return context.area + + # Try the current screen first. + browser_area = biggest_asset_browser_area(context.screen) + if browser_area: + return browser_area + + for win in context.window_manager.windows: + if win.screen == context.screen: + continue + browser_area = biggest_asset_browser_area(win.screen) + if browser_area: + return browser_area + + return None + + +def activate_asset(asset: bpy.types.Action, asset_browser: bpy.types.Area, *, deferred: bool) -> None: + """Select & focus the asset in the browser.""" + + space_data = asset_browser.spaces[0] + assert asset_utils.SpaceAssetInfo.is_asset_browser(space_data) + space_data.activate_asset_by_id(asset, deferred=deferred) + + +def active_catalog_id(asset_browser: bpy.types.Area) -> str: + """Return the ID of the catalog shown in the asset browser.""" + return params(asset_browser).catalog_id + + +def params(asset_browser: bpy.types.Area) -> bpy.types.FileAssetSelectParams: + """Return the asset browser parameters given its Area.""" + space_data = asset_browser.spaces[0] + assert asset_utils.SpaceAssetInfo.is_asset_browser(space_data) + return space_data.params + + +def tag_redraw(screen: bpy.types.Screen) -> None: + """Tag all asset browsers for redrawing.""" + + for area in suitable_areas(screen): + area.tag_redraw() diff --git a/scripts/addons_core/pose_library/conversion.py b/scripts/addons_core/pose_library/conversion.py new file mode 100644 index 00000000000..08c250e578a --- /dev/null +++ b/scripts/addons_core/pose_library/conversion.py @@ -0,0 +1,62 @@ +# SPDX-FileCopyrightText: 2021-2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +Pose Library - Conversion of old pose libraries. +""" + +from typing import Optional +from collections.abc import Collection + +if "pose_creation" not in locals(): + from . import pose_creation +else: + import importlib + + pose_creation = importlib.reload(pose_creation) + +import bpy +from bpy.types import ( + Action, + TimelineMarker, +) + + +def convert_old_poselib(old_poselib: Action) -> Collection[Action]: + """Convert an old-style pose library to a set of pose Actions. + + Old pose libraries were one Action with multiple pose markers. Each pose + marker will be converted to an Action by itself and marked as asset. + """ + + pose_assets = [action for marker in old_poselib.pose_markers if (action := convert_old_pose(old_poselib, marker))] + + # Mark all Actions as assets in one go. Ideally this would be done on an + # appropriate frame in the scene (to set up things like the background + # colour), but the old-style poselib doesn't contain such information. All + # we can do is just render on the current frame. + context_override = {'selected_ids': pose_assets} + with bpy.context.temp_override(**context_override): + bpy.ops.asset.mark() + + return pose_assets + + +def convert_old_pose(old_poselib: Action, marker: TimelineMarker) -> Optional[Action]: + """Convert an old-style pose library pose to a pose action.""" + + frame: int = marker.frame + action: Optional[Action] = None + + for fcurve in old_poselib.fcurves: + key = pose_creation.find_keyframe(fcurve, frame) + if not key: + continue + + if action is None: + action = bpy.data.actions.new(marker.name) + + pose_creation.create_single_key_fcurve(action, fcurve, key) + + return action diff --git a/scripts/addons_core/pose_library/functions.py b/scripts/addons_core/pose_library/functions.py new file mode 100644 index 00000000000..1e5971d952a --- /dev/null +++ b/scripts/addons_core/pose_library/functions.py @@ -0,0 +1,57 @@ +# SPDX-FileCopyrightText: 2021-2022 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +Pose Library - functions. +""" + +from pathlib import Path +from typing import Any, List, Iterable + +Datablock = Any + +import bpy + + +def load_assets_from(filepath: Path) -> List[Datablock]: + if not has_assets(filepath): + # Avoid loading any datablocks when there are none marked as asset. + return [] + + # Append everything from the file. + with bpy.data.libraries.load(str(filepath)) as ( + data_from, + data_to, + ): + for attr in dir(data_to): + setattr(data_to, attr, getattr(data_from, attr)) + + # Iterate over the appended datablocks to find assets. + def loaded_datablocks() -> Iterable[Datablock]: + for attr in dir(data_to): + datablocks = getattr(data_to, attr) + for datablock in datablocks: + yield datablock + + loaded_assets = [] + for datablock in loaded_datablocks(): + if not getattr(datablock, "asset_data", None): + continue + + # Fake User is lost when appending from another file. + datablock.use_fake_user = True + loaded_assets.append(datablock) + return loaded_assets + + +def has_assets(filepath: Path) -> bool: + with bpy.data.libraries.load(str(filepath), assets_only=True) as ( + data_from, + _, + ): + for attr in dir(data_from): + data_names = getattr(data_from, attr) + if data_names: + return True + return False diff --git a/scripts/addons_core/pose_library/gui.py b/scripts/addons_core/pose_library/gui.py new file mode 100644 index 00000000000..390dd172808 --- /dev/null +++ b/scripts/addons_core/pose_library/gui.py @@ -0,0 +1,254 @@ +# SPDX-FileCopyrightText: 2021-2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +Pose Library - GUI definition. +""" + +import bpy +from bpy.types import ( + AssetHandle, + AssetRepresentation, + Context, + Menu, + Panel, + UILayout, + UIList, + WindowManager, + WorkSpace, +) +from bl_ui_utils.layout import operator_context + + +class PoseLibraryPanel: + @classmethod + def pose_library_panel_poll(cls, context: Context) -> bool: + return context.mode == 'POSE' + + @classmethod + def poll(cls, context: Context) -> bool: + return cls.pose_library_panel_poll(context) + + +class VIEW3D_AST_pose_library(bpy.types.AssetShelf): + bl_space_type = "VIEW_3D" + # We have own keymap items to add custom drag behavior (pose blending), disable the default + # asset dragging. + bl_options = {'NO_ASSET_DRAG'} + + @classmethod + def poll(cls, context: Context) -> bool: + return PoseLibraryPanel.poll(context) + + @classmethod + def asset_poll(cls, asset: AssetRepresentation) -> bool: + return asset.id_type == 'ACTION' + + @classmethod + def draw_context_menu(cls, _context: Context, _asset: AssetRepresentation, layout: UILayout): + # Make sure these operator properties match those used in `VIEW3D_PT_pose_library_legacy`. + layout.operator("poselib.apply_pose_asset", text="Apply Pose").flipped = False + layout.operator("poselib.apply_pose_asset", text="Apply Pose Flipped").flipped = True + + with operator_context(layout, 'INVOKE_DEFAULT'): + layout.operator("poselib.blend_pose_asset", text="Blend Pose") + + layout.separator() + props = layout.operator("poselib.pose_asset_select_bones", text="Select Pose Bones") + props.select = True + props = layout.operator("poselib.pose_asset_select_bones", text="Deselect Pose Bones") + props.select = False + + layout.separator() + layout.operator("asset.open_containing_blend_file") + + +class VIEW3D_PT_pose_library_legacy(PoseLibraryPanel, Panel): + bl_space_type = "VIEW_3D" + bl_region_type = "UI" + bl_category = "Animation" + bl_label = "Pose Library" + + def draw(self, _context: Context) -> None: + layout = self.layout + layout.label(text="The pose library moved.", icon='INFO') + sub = layout.column(align=True) + sub.label(text="Pose assets are now available") + sub.label(text="in the asset shelf.") + layout.operator("screen.region_toggle", text="Toggle Asset Shelf").region_type = 'ASSET_SHELF' + + +def pose_library_list_item_context_menu(self: UIList, context: Context) -> None: + def is_pose_asset_view() -> bool: + # Important: Must check context first, or the menu is added for every kind of list. + list = getattr(context, "ui_list", None) + if not list or list.bl_idname != "UI_UL_asset_view" or list.list_id != "pose_assets": + return False + if not context.active_file: + return False + return True + + def is_pose_library_asset_browser() -> bool: + asset_library_ref = getattr(context, "asset_library_reference", None) + if not asset_library_ref: + return False + asset = getattr(context, "asset", None) + if not asset: + return False + return bool(asset.id_type == 'ACTION') + + if not is_pose_asset_view() and not is_pose_library_asset_browser(): + return + + layout = self.layout + + layout.separator() + + # Make sure these operator properties match those used in `VIEW3D_PT_pose_library_legacy`. + layout.operator("poselib.apply_pose_asset", text="Apply Pose").flipped = False + layout.operator("poselib.apply_pose_asset", text="Apply Pose Flipped").flipped = True + + with operator_context(layout, 'INVOKE_DEFAULT'): + layout.operator("poselib.blend_pose_asset", text="Blend Pose") + + layout.separator() + props = layout.operator("poselib.pose_asset_select_bones", text="Select Pose Bones") + props.select = True + props = layout.operator("poselib.pose_asset_select_bones", text="Deselect Pose Bones") + props.select = False + + if not is_pose_asset_view(): + layout.separator() + layout.operator("asset.assign_action") + + layout.separator() + if is_pose_asset_view(): + layout.operator("asset.open_containing_blend_file") + + props.select = False + + +class DOPESHEET_PT_asset_panel(PoseLibraryPanel, Panel): + bl_space_type = "DOPESHEET_EDITOR" + bl_region_type = "UI" + bl_label = "Create Pose Asset" + bl_category = "Action" + + def draw(self, context: Context) -> None: + layout = self.layout + col = layout.column(align=True) + row = col.row(align=True) + row.operator("poselib.create_pose_asset").activate_new_action = True + if bpy.types.POSELIB_OT_restore_previous_action.poll(context): + row.operator("poselib.restore_previous_action", text="", icon='LOOP_BACK') + col.operator("poselib.copy_as_asset", icon="COPYDOWN") + + layout.operator("poselib.convert_old_poselib") + + +def pose_library_list_item_asset_menu(self: UIList, context: Context) -> None: + layout = self.layout + layout.menu("ASSETBROWSER_MT_asset") + + +class ASSETBROWSER_MT_asset(Menu): + bl_label = "Asset" + + @classmethod + def poll(cls, context): + from bpy_extras.asset_utils import SpaceAssetInfo + + return SpaceAssetInfo.is_asset_browser_poll(context) + + def draw(self, context: Context) -> None: + layout = self.layout + + layout.operator("poselib.paste_asset", icon='PASTEDOWN') + layout.separator() + layout.operator("poselib.create_pose_asset").activate_new_action = False + + +# Messagebus subscription to monitor asset library changes. +_msgbus_owner = object() + + +def _on_asset_library_changed() -> None: + """Update areas when a different asset library is selected.""" + refresh_area_types = {'DOPESHEET_EDITOR', 'VIEW_3D'} + for win in bpy.context.window_manager.windows: + for area in win.screen.areas: + if area.type not in refresh_area_types: + continue + + area.tag_redraw() + + +def register_message_bus() -> None: + bpy.msgbus.subscribe_rna( + key=(bpy.types.FileAssetSelectParams, "asset_library_reference"), + owner=_msgbus_owner, + args=(), + notify=_on_asset_library_changed, + options={'PERSISTENT'}, + ) + + +def unregister_message_bus() -> None: + bpy.msgbus.clear_by_owner(_msgbus_owner) + + +@bpy.app.handlers.persistent +def _on_blendfile_load_pre(none, other_none) -> None: + # The parameters are required, but both are None. + unregister_message_bus() + + +@bpy.app.handlers.persistent +def _on_blendfile_load_post(none, other_none) -> None: + # The parameters are required, but both are None. + register_message_bus() + + +classes = ( + DOPESHEET_PT_asset_panel, + VIEW3D_PT_pose_library_legacy, + ASSETBROWSER_MT_asset, + VIEW3D_AST_pose_library, +) + +_register, _unregister = bpy.utils.register_classes_factory(classes) + + +def register() -> None: + _register() + + WorkSpace.active_pose_asset_index = bpy.props.IntProperty( + name="Active Pose Asset", + # TODO explain which list the index belongs to, or how it can be used to get the pose. + description="Per workspace index of the active pose asset", + ) + # Register for window-manager. This is a global property that shouldn't be + # written to files. + WindowManager.pose_assets = bpy.props.CollectionProperty(type=AssetHandle) + + bpy.types.UI_MT_list_item_context_menu.prepend(pose_library_list_item_context_menu) + bpy.types.ASSETBROWSER_MT_context_menu.prepend(pose_library_list_item_context_menu) + bpy.types.ASSETBROWSER_MT_editor_menus.append(pose_library_list_item_asset_menu) + + register_message_bus() + bpy.app.handlers.load_pre.append(_on_blendfile_load_pre) + bpy.app.handlers.load_post.append(_on_blendfile_load_post) + + +def unregister() -> None: + _unregister() + + unregister_message_bus() + + del WorkSpace.active_pose_asset_index + del WindowManager.pose_assets + + bpy.types.UI_MT_list_item_context_menu.remove(pose_library_list_item_context_menu) + bpy.types.ASSETBROWSER_MT_context_menu.remove(pose_library_list_item_context_menu) + bpy.types.ASSETBROWSER_MT_editor_menus.remove(pose_library_list_item_asset_menu) diff --git a/scripts/addons_core/pose_library/keymaps.py b/scripts/addons_core/pose_library/keymaps.py new file mode 100644 index 00000000000..4541d988b29 --- /dev/null +++ b/scripts/addons_core/pose_library/keymaps.py @@ -0,0 +1,38 @@ +# SPDX-FileCopyrightText: 2010-2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from typing import List, Tuple + +import bpy + +addon_keymaps: List[Tuple[bpy.types.KeyMap, bpy.types.KeyMapItem]] = [] + + +def register() -> None: + wm = bpy.context.window_manager + if wm.keyconfigs.addon is None: + # This happens when Blender is running in the background. + return + + km = wm.keyconfigs.addon.keymaps.new(name="File Browser Main", space_type="FILE_BROWSER") + + # DblClick to apply pose. + kmi = km.keymap_items.new("poselib.apply_pose_asset", "LEFTMOUSE", "DOUBLE_CLICK") + addon_keymaps.append((km, kmi)) + + # Asset Shelf + km = wm.keyconfigs.addon.keymaps.new(name="Asset Shelf") + # Click to apply pose. + kmi = km.keymap_items.new("poselib.apply_pose_asset", "LEFTMOUSE", "CLICK") + addon_keymaps.append((km, kmi)) + # Drag to blend pose. + kmi = km.keymap_items.new("poselib.blend_pose_asset", "LEFTMOUSE", "CLICK_DRAG") + addon_keymaps.append((km, kmi)) + + +def unregister() -> None: + # Clear shortcuts from the keymap. + for km, kmi in addon_keymaps: + km.keymap_items.remove(kmi) + addon_keymaps.clear() diff --git a/scripts/addons_core/pose_library/operators.py b/scripts/addons_core/pose_library/operators.py new file mode 100644 index 00000000000..624a785f11a --- /dev/null +++ b/scripts/addons_core/pose_library/operators.py @@ -0,0 +1,460 @@ +# SPDX-FileCopyrightText: 2021-2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +Pose Library - operators. +""" + +from pathlib import Path +from typing import Optional, Set + +_need_reload = "functions" in locals() +from . import asset_browser, functions, pose_creation, pose_usage + +if _need_reload: + import importlib + + asset_browser = importlib.reload(asset_browser) + functions = importlib.reload(functions) + pose_creation = importlib.reload(pose_creation) + pose_usage = importlib.reload(pose_usage) + + +import bpy +from bpy.props import BoolProperty, StringProperty +from bpy.types import ( + Action, + AssetRepresentation, + Context, + Event, + Object, + Operator, +) +from bpy_extras import asset_utils +from bpy.app.translations import pgettext_tip as tip_ + + +class PoseAssetCreator: + @classmethod + def poll(cls, context: Context) -> bool: + return bool( + # There must be an object. + context.object + # It must be in pose mode with selected bones. + and context.object.mode == "POSE" + and context.object.pose + and context.selected_pose_bones_from_active_object + ) + + +class POSELIB_OT_create_pose_asset(PoseAssetCreator, Operator): + bl_idname = "poselib.create_pose_asset" + bl_label = "Create Pose Asset" + bl_description = ( + "Create a new Action that contains the pose of the selected bones, and mark it as Asset. " + "The asset will be stored in the current blend file" + ) + bl_options = {'REGISTER', 'UNDO'} + + pose_name: StringProperty(name="Pose Name") # type: ignore + activate_new_action: BoolProperty(name="Activate New Action", default=True) # type: ignore + + @classmethod + def poll(cls, context: Context) -> bool: + if context.object is None or context.object.mode != "POSE": + # The operator assumes pose mode, so that bone selection is visible. + cls.poll_message_set("An active armature object in pose mode is needed") + return False + + # Make sure that if there is an asset browser open, the artist can see the newly created pose asset. + asset_browse_area: Optional[bpy.types.Area] = asset_browser.area_from_context(context) + if not asset_browse_area: + # No asset browser is visible, so there also aren't any expectations + # that this asset will be visible. + return True + + asset_space_params = asset_browser.params(asset_browse_area) + if asset_space_params.asset_library_reference != 'LOCAL': + cls.poll_message_set("Asset Browser must be set to the Current File library") + return False + + return True + + def execute(self, context: Context) -> Set[str]: + pose_name = self.pose_name or context.object.name + asset = pose_creation.create_pose_asset_from_context(context, pose_name) + if not asset: + self.report({"WARNING"}, "No keyframes were found for this pose") + return {"CANCELLED"} + + if self.activate_new_action: + self._set_active_action(context, asset) + self._activate_asset_in_browser(context, asset) + return {'FINISHED'} + + def _set_active_action(self, context: Context, asset: Action) -> None: + self._prevent_action_loss(context.object) + + anim_data = context.object.animation_data_create() + context.window_manager.poselib_previous_action = anim_data.action + anim_data.action = asset + + def _activate_asset_in_browser(self, context: Context, asset: Action) -> None: + """Activate the new asset in the appropriate Asset Browser. + + This makes it possible to immediately check & edit the created pose asset. + """ + + asset_browse_area: Optional[bpy.types.Area] = asset_browser.area_from_context(context) + if not asset_browse_area: + return + + # After creating an asset, the window manager has to process the + # notifiers before editors should be manipulated. + pose_creation.assign_from_asset_browser(asset, asset_browse_area) + + # Pass deferred=True, because we just created a new asset that isn't + # known to the Asset Browser space yet. That requires the processing of + # notifiers, which will only happen after this code has finished + # running. + asset_browser.activate_asset(asset, asset_browse_area, deferred=True) + + def _prevent_action_loss(self, object: Object) -> None: + """Mark the action with Fake User if necessary. + + This is to prevent action loss when we reduce its reference counter by one. + """ + + if not object.animation_data: + return + + action = object.animation_data.action + if not action: + return + + if action.use_fake_user or action.users > 1: + # Removing one user won't GC it. + return + + action.use_fake_user = True + self.report({'WARNING'}, tip_("Action %s marked Fake User to prevent loss") % action.name) + + +class POSELIB_OT_restore_previous_action(Operator): + bl_idname = "poselib.restore_previous_action" + bl_label = "Restore Previous Action" + bl_description = "Switch back to the previous Action, after creating a pose asset" + bl_options = {'REGISTER', 'UNDO'} + + @classmethod + def poll(cls, context: Context) -> bool: + return bool( + context.window_manager.poselib_previous_action + and context.object + and context.object.animation_data + and context.object.animation_data.action + and context.object.animation_data.action.asset_data is not None + ) + + def execute(self, context: Context) -> Set[str]: + # This is the Action that was just created with "Create Pose Asset". + # It has to be re-applied after switching to the previous action, + # to ensure the character keeps the same pose. + self.pose_action = context.object.animation_data.action + + prev_action = context.window_manager.poselib_previous_action + context.object.animation_data.action = prev_action + context.window_manager.poselib_previous_action = None + + # Wait a bit for the action assignment to be handled, before applying the pose. + wm = context.window_manager + self._timer = wm.event_timer_add(0.001, window=context.window) + wm.modal_handler_add(self) + + return {'RUNNING_MODAL'} + + def modal(self, context, event): + if event.type != 'TIMER': + return {'RUNNING_MODAL'} + + wm = context.window_manager + wm.event_timer_remove(self._timer) + + context.object.pose.apply_pose_from_action(self.pose_action) + return {'FINISHED'} + + +class ASSET_OT_assign_action(Operator): + bl_idname = "asset.assign_action" + bl_label = "Assign Action" + bl_description = "Set this pose Action as active Action on the active Object" + bl_options = {'REGISTER', 'UNDO'} + + @classmethod + def poll(cls, context: Context) -> bool: + return bool(isinstance(getattr(context, "id", None), Action) and context.object) + + def execute(self, context: Context) -> Set[str]: + context.object.animation_data_create().action = context.id + return {"FINISHED"} + + +class POSELIB_OT_copy_as_asset(PoseAssetCreator, Operator): + bl_idname = "poselib.copy_as_asset" + bl_label = "Copy Pose as Asset" + bl_description = "Create a new pose asset on the clipboard, to be pasted into an Asset Browser" + bl_options = {'REGISTER'} + + CLIPBOARD_ASSET_MARKER = "ASSET-BLEND=" + + def execute(self, context: Context) -> Set[str]: + asset = pose_creation.create_pose_asset_from_context(context, new_asset_name=context.object.name) + if asset is None: + self.report({"WARNING"}, "No animation data found to create asset from") + return {"CANCELLED"} + + filepath = self.save_datablock(asset) + + context.window_manager.clipboard = "%s%s" % ( + self.CLIPBOARD_ASSET_MARKER, + filepath, + ) + asset_browser.tag_redraw(context.screen) + self.report({"INFO"}, "Pose Asset copied, use Paste As New Asset in any Asset Browser to paste") + + # The asset has been saved to disk, so to clean up it has to loose its asset & fake user status. + asset.asset_clear() + asset.use_fake_user = False + + # The asset can be removed from the main DB, as it was purely created to + # be stored to disk, and not to be used in this file. + if asset.users > 0: + # This should never happen, and indicates a bug in the code. Having a warning about it is nice, + # but it shouldn't stand in the way of actually cleaning up the meant-to-be-temporary datablock. + self.report({"WARNING"}, "Unexpected non-zero user count for the asset, please report this as a bug") + + bpy.data.actions.remove(asset) + return {"FINISHED"} + + def save_datablock(self, action: Action) -> Path: + tempdir = Path(bpy.app.tempdir) + filepath = tempdir / "copied_asset.blend" + bpy.data.libraries.write( + str(filepath), + datablocks={action}, + path_remap="NONE", + fake_user=True, + compress=True, # Single-datablock blend file, likely little need to diff. + ) + return filepath + + +class POSELIB_OT_paste_asset(Operator): + bl_idname = "poselib.paste_asset" + bl_label = "Paste as New Asset" + bl_description = "Paste the Asset that was previously copied using Copy As Asset" + bl_options = {'REGISTER', 'UNDO'} + + @classmethod + def poll(cls, context: Context) -> bool: + if not asset_utils.SpaceAssetInfo.is_asset_browser(context.space_data): + cls.poll_message_set("Current editor is not an asset browser") + return False + + asset_lib_ref = context.space_data.params.asset_library_reference + if asset_lib_ref != 'LOCAL': + cls.poll_message_set("Asset Browser must be set to the Current File library") + return False + + # Delay checking the clipboard as much as possible, as it's CPU-heavier than the other checks. + clipboard: str = context.window_manager.clipboard + if not clipboard: + cls.poll_message_set("Clipboard is empty") + return False + + marker = POSELIB_OT_copy_as_asset.CLIPBOARD_ASSET_MARKER + if not clipboard.startswith(marker): + cls.poll_message_set("Clipboard does not contain an asset") + return False + + return True + + def execute(self, context: Context) -> Set[str]: + clipboard = context.window_manager.clipboard + marker_len = len(POSELIB_OT_copy_as_asset.CLIPBOARD_ASSET_MARKER) + filepath = Path(clipboard[marker_len:]) + + assets = functions.load_assets_from(filepath) + if not assets: + self.report({"ERROR"}, "Did not find any assets on clipboard") + return {"CANCELLED"} + + self.report({"INFO"}, tip_("Pasted %d assets") % len(assets)) + + bpy.ops.asset.library_refresh() + + asset_browser_area = asset_browser.area_from_context(context) + if not asset_browser_area: + return {"FINISHED"} + + # Assign same catalog as in asset browser. + catalog_id = asset_browser.active_catalog_id(asset_browser_area) + for asset in assets: + asset.asset_data.catalog_id = catalog_id + asset_browser.activate_asset(assets[0], asset_browser_area, deferred=True) + + return {"FINISHED"} + + +class PoseAssetUser: + @classmethod + def poll(cls, context: Context) -> bool: + if not ( + context.object + and context.object.mode == "POSE" # This condition may not be desired. + and context.asset + ): + return False + return context.asset.id_type == 'ACTION' + + def execute(self, context: Context) -> Set[str]: + asset: AssetRepresentation = context.asset + if asset.local_id: + return self.use_pose(context, asset.local_id) + return self._load_and_use_pose(context) + + def use_pose(self, context: Context, asset: bpy.types.ID) -> Set[str]: + # Implement in subclass. + pass + + def _load_and_use_pose(self, context: Context) -> Set[str]: + asset = context.asset + asset_lib_path = asset.full_library_path + + if not asset_lib_path: + self.report( # type: ignore + {"ERROR"}, + # TODO: Add some way to get the library name from the library reference + # (just asset_library_reference.name?). + tip_("Selected asset %s could not be located inside the asset library") % asset.name, + ) + return {"CANCELLED"} + if asset.id_type != 'ACTION': + self.report( # type: ignore + {"ERROR"}, + tip_("Selected asset %s is not an Action") % asset.name, + ) + return {"CANCELLED"} + + with bpy.types.BlendData.temp_data() as temp_data: + with temp_data.libraries.load(asset_lib_path) as (data_from, data_to): + data_to.actions = [asset.name] + + action: Action = data_to.actions[0] + return self.use_pose(context, action) + + +class POSELIB_OT_pose_asset_select_bones(PoseAssetUser, Operator): + bl_idname = "poselib.pose_asset_select_bones" + bl_label = "Select Bones" + bl_description = "Select those bones that are used in this pose" + bl_options = {'REGISTER', 'UNDO'} + + select: BoolProperty(name="Select", default=True) # type: ignore + flipped: BoolProperty(name="Flipped", default=False) # type: ignore + + def use_pose(self, context: Context, pose_asset: Action) -> Set[str]: + arm_object: Object = context.object + pose_usage.select_bones(arm_object, pose_asset, select=self.select, flipped=self.flipped) + if self.select: + msg = tip_("Selected bones from %s") % pose_asset.name + else: + msg = tip_("Deselected bones from %s") % pose_asset.name + self.report({"INFO"}, msg) + return {"FINISHED"} + + @classmethod + def description(cls, _context: Context, properties: 'POSELIB_OT_pose_asset_select_bones') -> str: + if properties.select: + return cls.bl_description + return cls.bl_description.replace("Select", "Deselect") + + +class POSELIB_OT_convert_old_poselib(Operator): + bl_idname = "poselib.convert_old_poselib" + bl_label = "Convert Legacy Pose Library" + bl_description = "Create a pose asset for each pose marker in the current action" + bl_options = {'REGISTER', 'UNDO'} + + @classmethod + def poll(cls, context: Context) -> bool: + action = context.object and context.object.animation_data and context.object.animation_data.action + if not action: + cls.poll_message_set("Active object has no Action") + return False + if not action.pose_markers: + cls.poll_message_set(tip_("Action %r is not a legacy pose library") % action.name) + return False + return True + + def execute(self, context: Context) -> Set[str]: + from . import conversion + + old_poselib = context.object.animation_data.action + new_actions = conversion.convert_old_poselib(old_poselib) + + if not new_actions: + self.report({'ERROR'}, "Unable to convert to pose assets") + return {'CANCELLED'} + + self.report({'INFO'}, tip_("Converted %d poses to pose assets") % len(new_actions)) + return {'FINISHED'} + + +class POSELIB_OT_convert_old_object_poselib(Operator): + bl_idname = "poselib.convert_old_object_poselib" + bl_label = "Convert Legacy Pose Library" + bl_description = "Create a pose asset for each pose marker in this legacy pose library data-block" + + # Mark this one as "internal", as it converts `context.object.pose_library` + # instead of its current animation Action. + bl_options = {'REGISTER', 'UNDO', 'INTERNAL'} + + @classmethod + def poll(cls, context: Context) -> bool: + action = context.object and context.object.pose_library + if not action: + cls.poll_message_set("Active object has no pose library Action") + return False + if not action.pose_markers: + cls.poll_message_set(tip_("Action %r is not a legacy pose library") % action.name) + return False + return True + + def execute(self, context: Context) -> Set[str]: + from . import conversion + + old_poselib = context.object.pose_library + new_actions = conversion.convert_old_poselib(old_poselib) + + if not new_actions: + self.report({'ERROR'}, "Unable to convert to pose assets") + return {'CANCELLED'} + + self.report({'INFO'}, tip_("Converted %d poses to pose assets") % len(new_actions)) + return {'FINISHED'} + + +classes = ( + ASSET_OT_assign_action, + POSELIB_OT_convert_old_poselib, + POSELIB_OT_convert_old_object_poselib, + POSELIB_OT_copy_as_asset, + POSELIB_OT_create_pose_asset, + POSELIB_OT_paste_asset, + POSELIB_OT_pose_asset_select_bones, + POSELIB_OT_restore_previous_action, +) + +register, unregister = bpy.utils.register_classes_factory(classes) diff --git a/scripts/addons_core/pose_library/pose_creation.py b/scripts/addons_core/pose_library/pose_creation.py new file mode 100644 index 00000000000..c7384041bc0 --- /dev/null +++ b/scripts/addons_core/pose_library/pose_creation.py @@ -0,0 +1,411 @@ +# SPDX-FileCopyrightText: 2021-2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +Pose Library - creation functions. +""" + +import dataclasses +import functools +import re + +from typing import Optional, FrozenSet, Set, Union, Iterable, cast + +if "functions" not in locals(): + from . import asset_browser, functions +else: + import importlib + + asset_browser = importlib.reload(asset_browser) + functions = importlib.reload(functions) + +import bpy +from bpy.types import ( + Action, + Bone, + Context, + FCurve, + Keyframe, +) + +FCurveValue = Union[float, int] + +pose_bone_re = re.compile(r'pose.bones\["([^"]+)"\]') +"""RegExp for matching FCurve data paths.""" + + +@dataclasses.dataclass(unsafe_hash=True, frozen=True) +class PoseCreationParams: + armature_ob: bpy.types.Object + src_action: Optional[Action] + src_frame_nr: float + bone_names: FrozenSet[str] + new_asset_name: str + + +class UnresolvablePathError(ValueError): + """Raised when a data_path cannot be resolved to a current value.""" + + +@dataclasses.dataclass(unsafe_hash=True) +class PoseActionCreator: + """Create an Action that's suitable for marking as Asset. + + Does not mark as asset yet, nor does it add asset metadata. + """ + + params: PoseCreationParams + + # These were taken from Blender's Action baking code in `anim_utils.py`. + # Items are (name, array_length) tuples. + _bbone_props = [ + ("bbone_curveinx", None), + ("bbone_curveoutx", None), + ("bbone_curveinz", None), + ("bbone_curveoutz", None), + ("bbone_rollin", None), + ("bbone_rollout", None), + ("bbone_scalein", 3), + ("bbone_scaleout", 3), + ("bbone_easein", None), + ("bbone_easeout", None), + ] + + def create(self) -> Optional[Action]: + """Create a single-frame Action containing only the given bones, or None if no anim data was found.""" + + try: + dst_action = self._create_new_action() + self._store_pose(dst_action) + finally: + # Prevent next instantiations of this class from reusing pointers to + # bones. They may not be valid by then any more. + self._find_bone.cache_clear() + + if len(dst_action.fcurves) == 0: + bpy.data.actions.remove(dst_action) + return None + + return dst_action + + def _create_new_action(self) -> Action: + dst_action = bpy.data.actions.new(self.params.new_asset_name) + if self.params.src_action: + dst_action.id_root = self.params.src_action.id_root + dst_action.user_clear() # actions.new() sets users=1, but marking as asset also increments user count. + return dst_action + + def _store_pose(self, dst_action: Action) -> None: + """Store the current pose into the given action.""" + self._store_bone_pose_parameters(dst_action) + self._store_animated_parameters(dst_action) + + def _store_bone_pose_parameters(self, dst_action: Action) -> None: + """Store loc/rot/scale/bbone values in the Action.""" + + for bone_name in sorted(self.params.bone_names): + self._store_location(dst_action, bone_name) + self._store_rotation(dst_action, bone_name) + self._store_scale(dst_action, bone_name) + self._store_bbone(dst_action, bone_name) + + def _store_animated_parameters(self, dst_action: Action) -> None: + """Store the current value of any animated bone properties.""" + if self.params.src_action is None: + return + + armature_ob = self.params.armature_ob + for fcurve in self.params.src_action.fcurves: + match = pose_bone_re.match(fcurve.data_path) + if not match: + # Not animating a bone property. + continue + + bone_name = match.group(1) + if bone_name not in self.params.bone_names: + # Bone is not our export set. + continue + + if dst_action.fcurves.find(fcurve.data_path, index=fcurve.array_index): + # This property is already handled by a previous _store_xxx() call. + continue + + # Only include in the pose if there is a key on this frame. + if not self._has_key_on_frame(fcurve): + continue + + try: + value = self._current_value(armature_ob, fcurve.data_path, fcurve.array_index) + except UnresolvablePathError: + # A once-animated property no longer exists. + continue + + dst_fcurve = dst_action.fcurves.new(fcurve.data_path, index=fcurve.array_index, action_group=bone_name) + dst_fcurve.keyframe_points.insert(self.params.src_frame_nr, value=value) + dst_fcurve.update() + + def _store_location(self, dst_action: Action, bone_name: str) -> None: + """Store bone location.""" + self._store_bone_array(dst_action, bone_name, "location", 3) + + def _store_rotation(self, dst_action: Action, bone_name: str) -> None: + """Store bone rotation given current rotation mode.""" + bone = self._find_bone(bone_name) + if bone.rotation_mode == "QUATERNION": + self._store_bone_array(dst_action, bone_name, "rotation_quaternion", 4) + elif bone.rotation_mode == "AXIS_ANGLE": + self._store_bone_array(dst_action, bone_name, "rotation_axis_angle", 4) + else: + self._store_bone_array(dst_action, bone_name, "rotation_euler", 3) + + def _store_scale(self, dst_action: Action, bone_name: str) -> None: + """Store bone scale.""" + self._store_bone_array(dst_action, bone_name, "scale", 3) + + def _store_bbone(self, dst_action: Action, bone_name: str) -> None: + """Store bendy-bone parameters.""" + for prop_name, array_length in self._bbone_props: + if array_length: + self._store_bone_array(dst_action, bone_name, prop_name, array_length) + else: + self._store_bone_property(dst_action, bone_name, prop_name) + + def _store_bone_array(self, dst_action: Action, bone_name: str, property_name: str, array_length: int) -> None: + """Store all elements of an array property.""" + for array_index in range(array_length): + self._store_bone_property(dst_action, bone_name, property_name, array_index) + + def _store_bone_property( + self, + dst_action: Action, + bone_name: str, + property_path: str, + array_index: int = -1, + ) -> None: + """Store the current value of a single bone property.""" + + bone = self._find_bone(bone_name) + value = self._current_value(bone, property_path, array_index) + + # Get the full 'pose.bones["bone_name"].blablabla' path suitable for FCurves. + rna_path = bone.path_from_id(property_path) + + fcurve: Optional[FCurve] = dst_action.fcurves.find(rna_path, index=array_index) + if fcurve is None: + fcurve = dst_action.fcurves.new(rna_path, index=array_index, action_group=bone_name) + + fcurve.keyframe_points.insert(self.params.src_frame_nr, value=value) + fcurve.update() + + @classmethod + def _current_value(cls, datablock: bpy.types.ID, data_path: str, array_index: int) -> FCurveValue: + """Resolve an RNA path + array index to an actual value.""" + value_or_array = cls._path_resolve(datablock, data_path) + + # Both indices -1 and 0 are used for non-array properties. + # -1 cannot be used in arrays, whereas 0 can be used in both arrays and non-arrays. + + if array_index == -1: + return cast(FCurveValue, value_or_array) + + if array_index == 0: + value_or_array = cls._path_resolve(datablock, data_path) + try: + # MyPy doesn't understand this try/except is to determine the type. + value = value_or_array[array_index] # type: ignore + except TypeError: + # Not an array after all. + return cast(FCurveValue, value_or_array) + return cast(FCurveValue, value) + + # MyPy doesn't understand that array_index>0 implies this is indexable. + return cast(FCurveValue, value_or_array[array_index]) # type: ignore + + @staticmethod + def _path_resolve(datablock: bpy.types.ID, data_path: str) -> Union[FCurveValue, Iterable[FCurveValue]]: + """Wrapper for datablock.path_resolve(data_path). + + Raise UnresolvablePathError when the path cannot be resolved. + This is easier to deal with upstream than the generic ValueError raised + by Blender. + """ + try: + return datablock.path_resolve(data_path) # type: ignore + except ValueError as ex: + raise UnresolvablePathError(str(ex)) from ex + + @functools.lru_cache(maxsize=1024) + def _find_bone(self, bone_name: str) -> Bone: + """Find a bone by name. + + Assumes the named bone exists, as the bones this class handles comes + from the user's selection, and you can't select a non-existent bone. + """ + + bone: Bone = self.params.armature_ob.pose.bones[bone_name] + return bone + + def _has_key_on_frame(self, fcurve: FCurve) -> bool: + """Return True iff the FCurve has a key on the source frame.""" + + points = fcurve.keyframe_points + if not points: + return False + + frame_to_find = self.params.src_frame_nr + margin = 0.001 + high = len(points) - 1 + low = 0 + while low <= high: + mid = (high + low) // 2 + diff = points[mid].co.x - frame_to_find + if abs(diff) < margin: + return True + if diff < 0: + # Frame to find is bigger than the current middle. + low = mid + 1 + else: + # Frame to find is smaller than the current middle + high = mid - 1 + return False + + +def create_pose_asset( + params: PoseCreationParams, +) -> Optional[Action]: + """Create a single-frame Action containing only the pose of the given bones. + + DOES mark as asset, DOES NOT configure asset metadata. + """ + + creator = PoseActionCreator(params) + pose_action = creator.create() + if pose_action is None: + return None + + pose_action.asset_mark() + pose_action.asset_generate_preview() + return pose_action + + +def create_pose_asset_from_context(context: Context, new_asset_name: str) -> Optional[Action]: + """Create Action asset from active object & selected bones.""" + + bones = context.selected_pose_bones_from_active_object + bone_names = {bone.name for bone in bones} + + params = PoseCreationParams( + context.object, + getattr(context.object.animation_data, "action", None), + context.scene.frame_current, + frozenset(bone_names), + new_asset_name, + ) + + return create_pose_asset(params) + + +def copy_fcurves( + dst_action: Action, + src_action: Action, + src_frame_nr: float, + bone_names: Set[str], +) -> int: + """Copy FCurves, returning number of curves copied.""" + num_fcurves_copied = 0 + for fcurve in src_action.fcurves: + match = pose_bone_re.match(fcurve.data_path) + if not match: + continue + + bone_name = match.group(1) + if bone_name not in bone_names: + continue + + # Check if there is a keyframe on this frame. + keyframe = find_keyframe(fcurve, src_frame_nr) + if keyframe is None: + continue + create_single_key_fcurve(dst_action, fcurve, keyframe) + num_fcurves_copied += 1 + return num_fcurves_copied + + +def create_single_key_fcurve(dst_action: Action, src_fcurve: FCurve, src_keyframe: Keyframe) -> FCurve: + """Create a copy of the source FCurve, but only for the given keyframe. + + Returns a new FCurve with just one keyframe. + """ + + dst_fcurve = copy_fcurve_without_keys(dst_action, src_fcurve) + copy_keyframe(dst_fcurve, src_keyframe) + return dst_fcurve + + +def copy_fcurve_without_keys(dst_action: Action, src_fcurve: FCurve) -> FCurve: + """Create a new FCurve and copy some properties.""" + + src_group_name = src_fcurve.group.name if src_fcurve.group else "" + dst_fcurve = dst_action.fcurves.new(src_fcurve.data_path, index=src_fcurve.array_index, action_group=src_group_name) + for propname in {"auto_smoothing", "color", "color_mode", "extrapolation"}: + setattr(dst_fcurve, propname, getattr(src_fcurve, propname)) + return dst_fcurve + + +def copy_keyframe(dst_fcurve: FCurve, src_keyframe: Keyframe) -> Keyframe: + """Copy a keyframe from one FCurve to the other.""" + + dst_keyframe = dst_fcurve.keyframe_points.insert( + src_keyframe.co.x, src_keyframe.co.y, options={'FAST'}, keyframe_type=src_keyframe.type + ) + + for propname in { + "amplitude", + "back", + "easing", + "handle_left", + "handle_left_type", + "handle_right", + "handle_right_type", + "interpolation", + "period", + }: + setattr(dst_keyframe, propname, getattr(src_keyframe, propname)) + dst_fcurve.update() + return dst_keyframe + + +def find_keyframe(fcurve: FCurve, frame: float) -> Optional[Keyframe]: + # Binary search adapted from https://pythonguides.com/python-binary-search/ + keyframes = fcurve.keyframe_points + low = 0 + high = len(keyframes) - 1 + mid = 0 + + # Accept any keyframe that's within 'epsilon' of the requested frame. + # This should account for rounding errors and the likes. + epsilon = 1e-4 + frame_lowerbound = frame - epsilon + frame_upperbound = frame + epsilon + while low <= high: + mid = (high + low) // 2 + keyframe = keyframes[mid] + if keyframe.co.x < frame_lowerbound: + low = mid + 1 + elif keyframe.co.x > frame_upperbound: + high = mid - 1 + else: + return keyframe + return None + + +def assign_from_asset_browser(asset: Action, asset_browser_area: bpy.types.Area) -> None: + """Assign some things from the asset browser to the asset. + + This sets the current catalog ID, and in the future could include tags + from the active dynamic catalog, etc. + """ + + cat_id = asset_browser.active_catalog_id(asset_browser_area) + asset.asset_data.catalog_id = cat_id diff --git a/scripts/addons_core/pose_library/pose_usage.py b/scripts/addons_core/pose_library/pose_usage.py new file mode 100644 index 00000000000..1a85f56ec55 --- /dev/null +++ b/scripts/addons_core/pose_library/pose_usage.py @@ -0,0 +1,51 @@ +# SPDX-FileCopyrightText: 2021-2022 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +Pose Library - usage functions. +""" + +from typing import Set +import re +import bpy + +from bpy.types import ( + Action, + Object, +) + + +def select_bones(arm_object: Object, action: Action, *, select: bool, flipped: bool) -> None: + pose_bone_re = re.compile(r'pose.bones\["([^"]+)"\]') + pose = arm_object.pose + + seen_bone_names: Set[str] = set() + + for fcurve in action.fcurves: + data_path: str = fcurve.data_path + match = pose_bone_re.match(data_path) + if not match: + continue + + bone_name = match.group(1) + + if bone_name in seen_bone_names: + continue + seen_bone_names.add(bone_name) + + if flipped: + bone_name = bpy.utils.flip_name(bone_name) + + try: + pose_bone = pose.bones[bone_name] + except KeyError: + continue + + pose_bone.bone.select = select + + +if __name__ == '__main__': + import doctest + + print(f"Test result: {doctest.testmod()}") diff --git a/scripts/addons_core/ui_translate/__init__.py b/scripts/addons_core/ui_translate/__init__.py new file mode 100644 index 00000000000..2c994b4ea7a --- /dev/null +++ b/scripts/addons_core/ui_translate/__init__.py @@ -0,0 +1,61 @@ +# SPDX-FileCopyrightText: 2012-2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +bl_info = { + "name": "Manage UI translations", + "author": "Bastien Montagne", + "version": (2, 0, 0), + "blender": (4, 0, 0), + "location": "Main \"File\" menu, text editor, any UI control", + "description": "Allows managing UI translations directly from Blender " + "(update main .po files, update scripts' translations, etc.)", + "warning": "Still in development, not all features are fully implemented yet!", + "doc_url": "https://developer.blender.org/docs/handbook/translating/translator_guide/", + "support": 'OFFICIAL', + "category": "System", +} + + +from . import ( + settings, + edit_translation, + update_repo, + update_addon, + update_ui, +) +if "bpy" in locals(): + import importlib + importlib.reload(settings) + importlib.reload(edit_translation) + importlib.reload(update_repo) + importlib.reload(update_addon) + importlib.reload(update_ui) + +import bpy + + +classes = settings.classes + edit_translation.classes + update_repo.classes + update_addon.classes + update_ui.classes + + +def register(): + for cls in classes: + bpy.utils.register_class(cls) + + bpy.types.WindowManager.i18n_update_settings = \ + bpy.props.PointerProperty(type=update_ui.I18nUpdateTranslationSettings) + + # Init addon's preferences (unfortunately, as we are using an external storage for the properties, + # the load/save user preferences process has no effect on them :( ). + if __name__ in bpy.context.preferences.addons: + pref = bpy.context.preferences.addons[__name__].preferences + import os + if os.path.isfile(pref.persistent_data_path): + pref._settings.load(pref.persistent_data_path, reset=True) + + +def unregister(): + for cls in classes: + bpy.utils.unregister_class(cls) + + del bpy.types.WindowManager.i18n_update_settings diff --git a/scripts/addons_core/ui_translate/edit_translation.py b/scripts/addons_core/ui_translate/edit_translation.py new file mode 100644 index 00000000000..f35b46c707f --- /dev/null +++ b/scripts/addons_core/ui_translate/edit_translation.py @@ -0,0 +1,408 @@ +# SPDX-FileCopyrightText: 2012-2022 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os +import shutil +if "bpy" in locals(): + import importlib + importlib.reload(settings) + importlib.reload(utils_i18n) +else: + import bpy + from bpy.types import Operator + from bpy.props import ( + BoolProperty, + EnumProperty, + StringProperty, + ) + from . import settings + from bl_i18n_utils import utils as utils_i18n + + +# A global cache for I18nMessages objects, as parsing po files takes a few seconds. +PO_CACHE = {} + + +def _get_messages(lang, fname): + if fname not in PO_CACHE: + PO_CACHE[fname] = utils_i18n.I18nMessages(uid=lang, kind='PO', key=fname, src=fname, settings=settings.settings) + return PO_CACHE[fname] + + +class UI_OT_i18n_edittranslation_update_mo(Operator): + """Try to "compile" given po file into relevant blender.mo file""" + """(WARNING: it will replace the official mo file in your user dir!)""" + bl_idname = "ui.i18n_edittranslation_update_mo" + bl_label = "Edit Translation Update Mo" + + # Operator Arguments + lang: StringProperty( + description="Current (translated) language", + options={'SKIP_SAVE'}, + ) + + po_file: StringProperty( + description="Path to the matching po file", + subtype='FILE_PATH', + options={'SKIP_SAVE'}, + ) + + clean_mo: BoolProperty( + description="Remove all local translation files, to be able to use the system ones again", + default=False, + options={'SKIP_SAVE'} + ) + # /End Operator Arguments + + def execute(self, context): + if self.clean_mo: + root = bpy.utils.user_resource('DATAFILES', path=settings.settings.MO_PATH_ROOT_RELATIVE) + if root: + shutil.rmtree(root) + elif not (self.lang and self.po_file): + return {'CANCELLED'} + else: + mo_dir = bpy.utils.user_resource( + 'DATAFILES', + path=settings.settings.MO_PATH_TEMPLATE_RELATIVE.format(self.lang), + create=True, + ) + mo_file = os.path.join(mo_dir, settings.settings.MO_FILE_NAME) + _get_messages(self.lang, self.po_file).write(kind='MO', dest=mo_file) + + bpy.ops.ui.reloadtranslation() + return {'FINISHED'} + + +class UI_OT_i18n_edittranslation(Operator): + """Translate the label and tooltip of the given property""" + bl_idname = "ui.edittranslation" + bl_label = "Edit Translation" + + # Operator Arguments + but_label: StringProperty( + description="Label of the control", + options={'SKIP_SAVE'}, + ) + + rna_label: StringProperty( + description="RNA-defined label of the control, if any", + options={'SKIP_SAVE'}, + ) + + enum_label: StringProperty( + description="Label of the enum item of the control, if any", + options={'SKIP_SAVE'}, + ) + + but_tip: StringProperty( + description="Tip of the control", + options={'SKIP_SAVE'}, + ) + + rna_tip: StringProperty( + description="RNA-defined tip of the control, if any", + options={'SKIP_SAVE'}, + ) + + enum_tip: StringProperty( + description="Tip of the enum item of the control, if any", + options={'SKIP_SAVE'}, + ) + + rna_struct: StringProperty( + description="Identifier of the RNA struct, if any", + options={'SKIP_SAVE'}, + ) + + rna_prop: StringProperty( + description="Identifier of the RNA property, if any", + options={'SKIP_SAVE'}, + ) + + rna_enum: StringProperty( + description="Identifier of the RNA enum item, if any", + options={'SKIP_SAVE'}, + ) + + rna_ctxt: StringProperty( + description="RNA context for label", + options={'SKIP_SAVE'}, + ) + + lang: StringProperty( + description="Current (translated) language", + options={'SKIP_SAVE'}, + ) + + po_file: StringProperty( + description="Path to the matching po file", + subtype='FILE_PATH', + options={'SKIP_SAVE'}, + ) + + # Found in po file. + org_but_label: StringProperty( + description="Original label of the control", + options={'SKIP_SAVE'}, + ) + + org_rna_label: StringProperty( + description="Original RNA-defined label of the control, if any", + options={'SKIP_SAVE'}, + ) + + org_enum_label: StringProperty( + description="Original label of the enum item of the control, if any", + options={'SKIP_SAVE'}, + ) + + org_but_tip: StringProperty( + description="Original tip of the control", + options={'SKIP_SAVE'}, + ) + + org_rna_tip: StringProperty( + description="Original RNA-defined tip of the control, if any", options={'SKIP_SAVE'} + ) + + org_enum_tip: StringProperty( + description="Original tip of the enum item of the control, if any", + options={'SKIP_SAVE'}, + ) + + flag_items = ( + ('FUZZY', "Fuzzy", "Message is marked as fuzzy in po file"), + ('ERROR', "Error", "Some error occurred with this message"), + ) + + but_label_flags: EnumProperty( + description="Flags about the label of the button", + items=flag_items, + options={'SKIP_SAVE', 'ENUM_FLAG'}, + ) + + rna_label_flags: EnumProperty( + description="Flags about the RNA-defined label of the button", + items=flag_items, + options={'SKIP_SAVE', 'ENUM_FLAG'}, + ) + + enum_label_flags: EnumProperty( + description="Flags about the RNA enum item label of the button", + items=flag_items, + options={'SKIP_SAVE', 'ENUM_FLAG'}, + ) + + but_tip_flags: EnumProperty( + description="Flags about the tip of the button", + items=flag_items, + options={'SKIP_SAVE', 'ENUM_FLAG'}, + ) + + rna_tip_flags: EnumProperty( + description="Flags about the RNA-defined tip of the button", + items=flag_items, + options={'SKIP_SAVE', 'ENUM_FLAG'}, + ) + + enum_tip_flags: EnumProperty( + description="Flags about the RNA enum item tip of the button", + items=flag_items, + options={'SKIP_SAVE', 'ENUM_FLAG'}, + ) + + stats_str: StringProperty( + description="Stats from opened po", options={'SKIP_SAVE'}) + + update_po: BoolProperty( + description="Update po file, try to rebuild mo file, and refresh Blender's UI", + default=False, + options={'SKIP_SAVE'}, + ) + + update_mo: BoolProperty( + description="Try to rebuild mo file, and refresh Blender's UI", + default=False, + options={'SKIP_SAVE'}, + ) + + clean_mo: BoolProperty( + description="Remove all local translation files, to be able to use the system ones again", + default=False, + options={'SKIP_SAVE'}, + ) + # /End Operator Arguments + + def execute(self, context): + if not hasattr(self, "msgmap"): + self.report('ERROR', "invoke() needs to be called before execute()") + return {'CANCELLED'} + + msgs = _get_messages(self.lang, self.po_file) + done_keys = set() + for mmap in self.msgmap.values(): + if 'ERROR' in getattr(self, mmap["msg_flags"]): + continue + k = mmap["key"] + if k not in done_keys and len(k) == 1: + k = tuple(k)[0] + msgs.msgs[k].msgstr = getattr(self, mmap["msgstr"]) + msgs.msgs[k].is_fuzzy = 'FUZZY' in getattr(self, mmap["msg_flags"]) + done_keys.add(k) + + if self.update_po: + # Try to overwrite .po file, may fail if there are no permissions. + try: + msgs.write(kind='PO', dest=self.po_file) + except Exception as e: + self.report('ERROR', "Could not write to po file ({})".format(str(e))) + # Always invalidate reverse messages cache afterward! + msgs.invalidate_reverse_cache() + if self.update_mo: + lang = os.path.splitext(os.path.basename(self.po_file))[0] + bpy.ops.ui.i18n_edittranslation_update_mo(po_file=self.po_file, lang=lang) + elif self.clean_mo: + bpy.ops.ui.i18n_edittranslation_update_mo(clean_mo=True) + return {'FINISHED'} + + def invoke(self, context, event): + self.msgmap = { + "but_label": { + "msgstr": "but_label", "msgid": "org_but_label", "msg_flags": "but_label_flags", "key": set()}, + "rna_label": { + "msgstr": "rna_label", "msgid": "org_rna_label", "msg_flags": "rna_label_flags", "key": set()}, + "enum_label": { + "msgstr": "enum_label", "msgid": "org_enum_label", "msg_flags": "enum_label_flags", "key": set()}, + "but_tip": { + "msgstr": "but_tip", "msgid": "org_but_tip", "msg_flags": "but_tip_flags", "key": set()}, + "rna_tip": { + "msgstr": "rna_tip", "msgid": "org_rna_tip", "msg_flags": "rna_tip_flags", "key": set()}, + "enum_tip": { + "msgstr": "enum_tip", "msgid": "org_enum_tip", "msg_flags": "enum_tip_flags", "key": set()}, + } + + msgs = _get_messages(self.lang, self.po_file) + msgs.find_best_messages_matches(self, self.msgmap, self.rna_ctxt, self.rna_struct, self.rna_prop, self.rna_enum) + msgs.update_info() + self.stats_str = "{}: {} messages, {} translated.".format(os.path.basename(self.po_file), msgs.nbr_msgs, + msgs.nbr_trans_msgs) + + for mmap in self.msgmap.values(): + k = tuple(mmap["key"]) + if k: + if len(k) == 1: + k = k[0] + ctxt, msgid = k + setattr(self, mmap["msgstr"], msgs.msgs[k].msgstr) + setattr(self, mmap["msgid"], msgid) + if msgs.msgs[k].is_fuzzy: + setattr(self, mmap["msg_flags"], {'FUZZY'}) + else: + setattr(self, mmap["msgid"], + "ERROR: Button label “{}” matches several messages in po file ({})!" + "".format(self.but_label, k)) + setattr(self, mmap["msg_flags"], {'ERROR'}) + else: + setattr(self, mmap["msgstr"], "") + setattr(self, mmap["msgid"], "") + + wm = context.window_manager + return wm.invoke_props_dialog(self, width=600) + + def draw(self, context): + layout = self.layout + layout.label(text=self.stats_str) + src, _a, _b = bpy.utils.make_rna_paths(self.rna_struct, self.rna_prop, self.rna_enum) + if src: + layout.label(text=" RNA Path: bpy.types." + src) + if self.rna_ctxt: + layout.label(text=" RNA Context: " + self.rna_ctxt) + + if self.org_but_label or self.org_rna_label or self.org_enum_label: + # XXX Can't use box, labels are not enough readable in them :/ + box = layout.box() + box.label(text="Labels:") + split = box.split(factor=0.15) + col1 = split.column() + col2 = split.column() + if self.org_but_label: + col1.label(text="Button Label:") + row = col2.row() + row.enabled = False + if 'ERROR' in self.but_label_flags: + row.alert = True + else: + col1.prop_enum(self, "but_label_flags", 'FUZZY', text="Fuzzy") + col2.prop(self, "but_label", text="") + row.prop(self, "org_but_label", text="") + if self.org_rna_label: + col1.label(text="RNA Label:") + row = col2.row() + row.enabled = False + if 'ERROR' in self.rna_label_flags: + row.alert = True + else: + col1.prop_enum(self, "rna_label_flags", 'FUZZY', text="Fuzzy") + col2.prop(self, "rna_label", text="") + row.prop(self, "org_rna_label", text="") + if self.org_enum_label: + col1.label(text="Enum Item Label:") + row = col2.row() + row.enabled = False + if 'ERROR' in self.enum_label_flags: + row.alert = True + else: + col1.prop_enum(self, "enum_label_flags", 'FUZZY', text="Fuzzy") + col2.prop(self, "enum_label", text="") + row.prop(self, "org_enum_label", text="") + + if self.org_but_tip or self.org_rna_tip or self.org_enum_tip: + # XXX Can't use box, labels are not enough readable in them :/ + box = layout.box() + box.label(text="Tool Tips:") + split = box.split(factor=0.15) + col1 = split.column() + col2 = split.column() + if self.org_but_tip: + col1.label(text="Button Tip:") + row = col2.row() + row.enabled = False + if 'ERROR' in self.but_tip_flags: + row.alert = True + else: + col1.prop_enum(self, "but_tip_flags", 'FUZZY', text="Fuzzy") + col2.prop(self, "but_tip", text="") + row.prop(self, "org_but_tip", text="") + if self.org_rna_tip: + col1.label(text="RNA Tip:") + row = col2.row() + row.enabled = False + if 'ERROR' in self.rna_tip_flags: + row.alert = True + else: + col1.prop_enum(self, "rna_tip_flags", 'FUZZY', text="Fuzzy") + col2.prop(self, "rna_tip", text="") + row.prop(self, "org_rna_tip", text="") + if self.org_enum_tip: + col1.label(text="Enum Item Tip:") + row = col2.row() + row.enabled = False + if 'ERROR' in self.enum_tip_flags: + row.alert = True + else: + col1.prop_enum(self, "enum_tip_flags", 'FUZZY', text="Fuzzy") + col2.prop(self, "enum_tip", text="") + row.prop(self, "org_enum_tip", text="") + + row = layout.row() + row.prop(self, "update_po", text="Save to PO File", toggle=True) + row.prop(self, "update_mo", text="Rebuild MO File", toggle=True) + row.prop(self, "clean_mo", text="Erase Local MO files", toggle=True) + + +classes = ( + UI_OT_i18n_edittranslation_update_mo, + UI_OT_i18n_edittranslation, +) diff --git a/scripts/addons_core/ui_translate/settings.py b/scripts/addons_core/ui_translate/settings.py new file mode 100644 index 00000000000..23d3b23aa91 --- /dev/null +++ b/scripts/addons_core/ui_translate/settings.py @@ -0,0 +1,194 @@ +# SPDX-FileCopyrightText: 2013-2022 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os + +if "bpy" in locals(): + import importlib + importlib.reload(settings_i18n) +else: + import bpy + from bpy.types import ( + Operator, + AddonPreferences, + ) + from bpy.props import ( + BoolProperty, + StringProperty, + ) + from bl_i18n_utils import settings as settings_i18n + + +settings = settings_i18n.I18nSettings() + + +# Operators ################################################################### + +class UI_OT_i18n_settings_load(Operator): + """Load translations' settings from a persistent JSon file""" + bl_idname = "ui.i18n_settings_load" + bl_label = "I18n Load Settings" + bl_option = {'REGISTER'} + + # Operator Arguments + filepath: StringProperty( + subtype='FILE_PATH', + description="Path to the saved settings file", + ) + + filter_glob: StringProperty( + default="*.json", + options={'HIDDEN'} + ) + # /End Operator Arguments + + def invoke(self, context, event): + if not self.properties.is_property_set("filepath"): + context.window_manager.fileselect_add(self) + return {'RUNNING_MODAL'} + else: + return self.execute(context) + + def execute(self, context): + if not (self.filepath and settings): + return {'CANCELLED'} + settings.load(self.filepath, reset=True) + return {'FINISHED'} + + +class UI_OT_i18n_settings_save(Operator): + """Save translations' settings in a persistent JSon file""" + bl_idname = "ui.i18n_settings_save" + bl_label = "I18n Save Settings" + bl_option = {'REGISTER'} + + # Operator Arguments + filepath: StringProperty( + description="Path to the saved settings file", + subtype='FILE_PATH', + ) + + filter_glob: StringProperty( + default="*.json", + options={'HIDDEN'}, + ) + # /End Operator Arguments + + def invoke(self, context, event): + if not self.properties.is_property_set("filepath"): + context.window_manager.fileselect_add(self) + return {'RUNNING_MODAL'} + else: + return self.execute(context) + + def execute(self, context): + if not (self.filepath and settings): + return {'CANCELLED'} + settings.save(self.filepath) + return {'FINISHED'} + + +# Addon Preferences ########################################################### + +def _setattr(self, name, val): + print(self, name, val) + setattr(self, name, val) + + +class UI_AP_i18n_settings(AddonPreferences): + bl_idname = __name__.split(".")[0] # We want "top" module name! + bl_option = {'REGISTER'} + + _settings = settings + + WARN_MSGID_NOT_CAPITALIZED: BoolProperty( + name="Warn Msgid Not Capitalized", + description="Warn about messages not starting by a capitalized letter (with a few allowed exceptions!)", + default=True, + get=lambda self: self._settings.WARN_MSGID_NOT_CAPITALIZED, + set=lambda self, val: _setattr(self._settings, "WARN_MSGID_NOT_CAPITALIZED", val), + ) + + FRIBIDI_LIB: StringProperty( + name="Fribidi Library", + description="The FriBidi C compiled library (.so under Linux, .dll under windows...), you’ll likely have " + "to edit it if you’re under Windows, e.g. using the one included in Blender libraries repository", + subtype='FILE_PATH', + default="libfribidi.so.0", + get=lambda self: self._settings.FRIBIDI_LIB, + set=lambda self, val: setattr(self._settings, "FRIBIDI_LIB", val), + ) + + SOURCE_DIR: StringProperty( + name="Source Root", + description="The Blender source root path", + subtype='FILE_PATH', + default="blender", + get=lambda self: self._settings.SOURCE_DIR, + set=lambda self, val: setattr(self._settings, "SOURCE_DIR", val), + ) + + I18N_DIR: StringProperty( + name="Translation Root", + description="The bf-translation repository", + subtype='FILE_PATH', + default="i18n", + get=lambda self: self._settings.I18N_DIR, + set=lambda self, val: setattr(self._settings, "I18N_DIR", val), + ) + + SPELL_CACHE: StringProperty( + name="Spell Cache", + description="A cache storing validated msgids, to avoid re-spellchecking them", + subtype='FILE_PATH', + default=os.path.join("/tmp", ".spell_cache"), + get=lambda self: self._settings.SPELL_CACHE, + set=lambda self, val: setattr(self._settings, "SPELL_CACHE", val), + ) + + PY_SYS_PATHS: StringProperty( + name="Import Paths", + description="Additional paths to add to sys.path (';' separated)", + default="", + get=lambda self: self._settings.PY_SYS_PATHS, + set=lambda self, val: setattr(self._settings, "PY_SYS_PATHS", val), + ) + + persistent_data_path: StringProperty( + name="Persistent Data Path", + description="The name of a json file storing those settings (unfortunately, Blender's system " + "does not work here)", + subtype='FILE_PATH', + default=os.path.join("ui_translate_settings.json"), + ) + _is_init = False + + def draw(self, context): + layout = self.layout + layout.label(text="WARNING: preferences are lost when add-on is disabled, be sure to use \"Save Persistent\" " + "if you want to keep your settings!") + layout.prop(self, "WARN_MSGID_NOT_CAPITALIZED") + layout.prop(self, "FRIBIDI_LIB") + layout.prop(self, "SOURCE_DIR") + layout.prop(self, "I18N_DIR") + layout.prop(self, "SPELL_CACHE") + layout.prop(self, "PY_SYS_PATHS") + + layout.separator() + split = layout.split(factor=0.75) + col = split.column() + col.prop(self, "persistent_data_path") + row = col.row() + row.operator("ui.i18n_settings_save", text="Save").filepath = self.persistent_data_path + row.operator("ui.i18n_settings_load", text="Load").filepath = self.persistent_data_path + col = split.column() + col.operator("ui.i18n_settings_save", text="Save Persistent To...") + col.operator("ui.i18n_settings_load", text="Load Persistent From...") + + +classes = ( + UI_OT_i18n_settings_load, + UI_OT_i18n_settings_save, + UI_AP_i18n_settings, +) diff --git a/scripts/addons_core/ui_translate/update_addon.py b/scripts/addons_core/ui_translate/update_addon.py new file mode 100644 index 00000000000..dcf47321eaa --- /dev/null +++ b/scripts/addons_core/ui_translate/update_addon.py @@ -0,0 +1,377 @@ +# SPDX-FileCopyrightText: 2013-2022 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +if "bpy" in locals(): + import importlib + importlib.reload(settings) + importlib.reload(utils_i18n) + importlib.reload(bl_extract_messages) +else: + import bpy + from bpy.types import Operator + from bpy.props import ( + BoolProperty, + EnumProperty, + StringProperty, + ) + from . import settings + from bl_i18n_utils import utils as utils_i18n + from bl_i18n_utils import bl_extract_messages + +from bpy.app.translations import pgettext_iface as iface_ +import addon_utils + +import io +import os +import shutil +import subprocess +import tempfile + + +# Helpers ################################################################### + +def validate_module(op, context): + module_name = op.module_name + addon = getattr(context, "active_addon", None) + if addon: + module_name = addon.module + + if not module_name: + op.report({'ERROR'}, "No add-on module given!") + return None, None + + mod = utils_i18n.enable_addons(addons={module_name}, check_only=True) + if not mod: + op.report({'ERROR'}, "Add-on '{}' not found!".format(module_name)) + return None, None + return module_name, mod[0] + + +# As it's a bit time heavy, I'd like to cache that enum, but this does not seem easy to do! :/ +# That "self" is not the same thing as the "self" that operators get in their invoke/execute/etc. funcs... :( +_cached_enum_addons = [] + + +def enum_addons(self, context): + global _cached_enum_addons + setts = getattr(self, "settings", settings.settings) + if not _cached_enum_addons: + for mod in addon_utils.modules(module_cache=addon_utils.addons_fake_modules): + mod_info = addon_utils.module_bl_info(mod) + # Skip OFFICIAL addons, they are already translated in main i18n system (together with Blender itself). + if mod_info["support"] in {'OFFICIAL'}: + continue + src = mod.__file__ + if src.endswith("__init__.py"): + src = os.path.dirname(src) + has_translation, _ = utils_i18n.I18n.check_py_module_has_translations(src, setts) + name = mod_info["name"] + if has_translation: + name = name + " *" + _cached_enum_addons.append((mod.__name__, name, mod_info["description"])) + _cached_enum_addons.sort(key=lambda i: i[1]) + return _cached_enum_addons + + +# Operators ################################################################### + +# This one is a helper one, as we sometimes need another invoke function (like e.g. file selection)... +class UI_OT_i18n_addon_translation_invoke(Operator): + """Wrapper operator which will invoke given op after setting its module_name""" + bl_idname = "ui.i18n_addon_translation_invoke" + bl_label = "Update I18n Add-on" + bl_property = "module_name" + + # Operator Arguments + module_name: EnumProperty( + name="Add-on", + description="Add-on to process", + items=enum_addons, + options=set(), + ) + op_id: StringProperty( + name="Operator Name", + description="Name (id) of the operator to invoke", + ) + # /End Operator Arguments + + def invoke(self, context, event): + global _cached_enum_addons + _cached_enum_addons[:] = [] + context.window_manager.invoke_search_popup(self) + return {'RUNNING_MODAL'} + + def execute(self, context): + global _cached_enum_addons + _cached_enum_addons[:] = [] + if not self.op_id: + return {'CANCELLED'} + op = bpy.ops + for item in self.op_id.split('.'): + op = getattr(op, item, None) + if op is None: + return {'CANCELLED'} + return op('INVOKE_DEFAULT', module_name=self.module_name) + + +class UI_OT_i18n_addon_translation_update(Operator): + """Update given add-on's translation data (found as a py tuple in the add-on's source code)""" + bl_idname = "ui.i18n_addon_translation_update" + bl_label = "Update I18n Add-on" + + # Operator Arguments + module_name: EnumProperty( + name="Add-on", + description="Add-on to process", + items=enum_addons, + options=set() + ) + # /End Operator Arguments + + def execute(self, context): + global _cached_enum_addons + _cached_enum_addons[:] = [] + if not hasattr(self, "settings"): + self.settings = settings.settings + i18n_sett = context.window_manager.i18n_update_settings + + module_name, mod = validate_module(self, context) + + # Generate addon-specific messages (no need for another blender instance here, this should not have any + # influence over the final result). + pot = bl_extract_messages.dump_addon_messages(module_name, False, self.settings) + + # Now (try to) get current i18n data from the addon... + path = mod.__file__ + if path.endswith("__init__.py"): + path = os.path.dirname(path) + + trans = utils_i18n.I18n(kind='PY', src=path, settings=self.settings) + + uids = set() + for lng in i18n_sett.langs: + if lng.uid in self.settings.IMPORT_LANGUAGES_SKIP: + print("Skipping {} language ({}), edit settings if you want to enable it.".format(lng.name, lng.uid)) + continue + if not lng.use: + print("Skipping {} language ({}).".format(lng.name, lng.uid)) + continue + uids.add(lng.uid) + # For now, add to processed uids all those not found in "official" list, minus "tech" ones. + uids |= (trans.trans.keys() - {lng.uid for lng in i18n_sett.langs} - + {self.settings.PARSER_TEMPLATE_ID, self.settings.PARSER_PY_ID}) + + # And merge! + for uid in uids: + if uid not in trans.trans: + trans.trans[uid] = utils_i18n.I18nMessages(uid=uid, settings=self.settings) + trans.trans[uid].update(pot, keep_old_commented=False) + trans.trans[self.settings.PARSER_TEMPLATE_ID] = pot + + # For now we write all languages found in this trans! + trans.write(kind='PY') + + return {'FINISHED'} + + +class UI_OT_i18n_addon_translation_import(Operator): + """Import given add-on's translation data from PO files""" + bl_idname = "ui.i18n_addon_translation_import" + bl_label = "I18n Add-on Import" + + # Operator Arguments + module_name: EnumProperty( + name="Add-on", + description="Add-on to process", options=set(), + items=enum_addons, + ) + + directory: StringProperty( + subtype='FILE_PATH', maxlen=1024, + options={'HIDDEN', 'SKIP_SAVE'} + ) + # /End Operator Arguments + + def _dst(self, trans, path, uid, kind): + if kind == 'PO': + if uid == self.settings.PARSER_TEMPLATE_ID: + return os.path.join(self.directory, "blender.pot") + path = os.path.join(self.directory, uid) + if os.path.isdir(path): + return os.path.join(path, uid + ".po") + return path + ".po" + elif kind == 'PY': + return trans._dst(trans, path, uid, kind) + return path + + def invoke(self, context, event): + global _cached_enum_addons + _cached_enum_addons[:] = [] + if not hasattr(self, "settings"): + self.settings = settings.settings + module_name, mod = validate_module(self, context) + if mod: + self.directory = os.path.dirname(mod.__file__) + self.module_name = module_name + context.window_manager.fileselect_add(self) + return {'RUNNING_MODAL'} + + def execute(self, context): + global _cached_enum_addons + _cached_enum_addons[:] = [] + if not hasattr(self, "settings"): + self.settings = settings.settings + i18n_sett = context.window_manager.i18n_update_settings + + module_name, mod = validate_module(self, context) + if not (module_name and mod): + return {'CANCELLED'} + + path = mod.__file__ + if path.endswith("__init__.py"): + path = os.path.dirname(path) + + trans = utils_i18n.I18n(kind='PY', src=path, settings=self.settings) + + # Now search given dir, to find po's matching given languages... + # Mapping po_uid: po_file. + po_files = dict(utils_i18n.get_po_files_from_dir(self.directory)) + + # Note: uids in i18n_sett.langs and addon's py code should be the same (both taken from the locale's languages + # file). So we just try to find the best match in po's for each enabled uid. + for lng in i18n_sett.langs: + if lng.uid in self.settings.IMPORT_LANGUAGES_SKIP: + print("Skipping {} language ({}), edit settings if you want to enable it.".format(lng.name, lng.uid)) + continue + if not lng.use: + print("Skipping {} language ({}).".format(lng.name, lng.uid)) + continue + uid = lng.uid + po_uid = utils_i18n.find_best_isocode_matches(uid, po_files.keys()) + if not po_uid: + print("Skipping {} language, no PO file found for it ({}).".format(lng.name, uid)) + continue + po_uid = po_uid[0] + msgs = utils_i18n.I18nMessages(uid=uid, kind='PO', key=uid, src=po_files[po_uid], settings=self.settings) + if uid in trans.trans: + trans.trans[uid].merge(msgs, replace=True) + else: + trans.trans[uid] = msgs + + trans.write(kind='PY') + + return {'FINISHED'} + + +class UI_OT_i18n_addon_translation_export(Operator): + """Export given add-on's translation data as PO files""" + + bl_idname = "ui.i18n_addon_translation_export" + bl_label = "I18n Add-on Export" + + # Operator Arguments + module_name: EnumProperty( + name="Add-on", + description="Add-on to process", + items=enum_addons, + options=set() + ) + + use_export_pot: BoolProperty( + name="Export POT", + description="Export (generate) a POT file too", + default=True, + ) + + use_update_existing: BoolProperty( + name="Update Existing", + description="Update existing po files, if any, instead of overwriting them", + default=True, + ) + + directory: StringProperty( + subtype='FILE_PATH', maxlen=1024, + options={'HIDDEN', 'SKIP_SAVE'} + ) + # /End Operator Arguments + + def _dst(self, trans, path, uid, kind): + if kind == 'PO': + if uid == self.settings.PARSER_TEMPLATE_ID: + return os.path.join(self.directory, "blender.pot") + path = os.path.join(self.directory, uid) + if os.path.isdir(path): + return os.path.join(path, uid + ".po") + return path + ".po" + elif kind == 'PY': + return trans._dst(trans, path, uid, kind) + return path + + def invoke(self, context, event): + global _cached_enum_addons + _cached_enum_addons[:] = [] + if not hasattr(self, "settings"): + self.settings = settings.settings + module_name, mod = validate_module(self, context) + if mod: + self.directory = os.path.dirname(mod.__file__) + self.module_name = module_name + context.window_manager.fileselect_add(self) + return {'RUNNING_MODAL'} + + def execute(self, context): + global _cached_enum_addons + _cached_enum_addons[:] = [] + if not hasattr(self, "settings"): + self.settings = settings.settings + i18n_sett = context.window_manager.i18n_update_settings + + module_name, mod = validate_module(self, context) + if not (module_name and mod): + return {'CANCELLED'} + + path = mod.__file__ + if path.endswith("__init__.py"): + path = os.path.dirname(path) + + trans = utils_i18n.I18n(kind='PY', src=path, settings=self.settings) + trans.dst = self._dst + + uids = [self.settings.PARSER_TEMPLATE_ID] if self.use_export_pot else [] + for lng in i18n_sett.langs: + if lng.uid in self.settings.IMPORT_LANGUAGES_SKIP: + print("Skipping {} language ({}), edit settings if you want to enable it.".format(lng.name, lng.uid)) + continue + if not lng.use: + print("Skipping {} language ({}).".format(lng.name, lng.uid)) + continue + translation_keys = {k for k in trans.trans.keys() + if k != self.settings.PARSER_TEMPLATE_ID} + uid = utils_i18n.find_best_isocode_matches(lng.uid, translation_keys) + if uid: + uids.append(uid[0]) + + # Try to update existing POs instead of overwriting them, if asked to do so! + if self.use_update_existing: + for uid in uids: + if uid == self.settings.PARSER_TEMPLATE_ID: + continue + path = trans.dst(trans, trans.src[uid], uid, 'PO') + if not os.path.isfile(path): + continue + msgs = utils_i18n.I18nMessages(kind='PO', src=path, settings=self.settings) + msgs.update(trans.trans[self.settings.PARSER_TEMPLATE_ID]) + trans.trans[uid] = msgs + + trans.write(kind='PO', langs=set(uids)) + + return {'FINISHED'} + + +classes = ( + UI_OT_i18n_addon_translation_invoke, + UI_OT_i18n_addon_translation_update, + UI_OT_i18n_addon_translation_import, + UI_OT_i18n_addon_translation_export, +) diff --git a/scripts/addons_core/ui_translate/update_repo.py b/scripts/addons_core/ui_translate/update_repo.py new file mode 100644 index 00000000000..e634ec27864 --- /dev/null +++ b/scripts/addons_core/ui_translate/update_repo.py @@ -0,0 +1,256 @@ +# SPDX-FileCopyrightText: 2013-2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +if "bpy" in locals(): + import importlib + importlib.reload(settings) + importlib.reload(utils_i18n) + importlib.reload(utils_languages_menu) +else: + import bpy + from bpy.types import Operator + from bpy.props import ( + BoolProperty, + EnumProperty, + ) + from . import settings + from bl_i18n_utils import utils as utils_i18n + from bl_i18n_utils import utils_languages_menu + +import concurrent.futures +import io +import os +import shutil +import subprocess +import tempfile + + +# Operators ################################################################### + +def i18n_updatetranslation_work_repo_callback(pot, lng, settings): + if not lng['use']: + return + if os.path.isfile(lng['po_path']): + po = utils_i18n.I18nMessages(uid=lng['uid'], kind='PO', src=lng['po_path'], settings=settings) + po.update(pot) + else: + po = pot + po.write(kind="PO", dest=lng['po_path']) + print("{} PO written!".format(lng['uid'])) + + +class UI_OT_i18n_updatetranslation_work_repo(Operator): + """Update i18n working repository (po files)""" + bl_idname = "ui.i18n_updatetranslation_work_repo" + bl_label = "Update I18n Work Repository" + + use_skip_pot_gen: BoolProperty( + name="Skip POT", + description="Skip POT file generation", + default=False, + ) + + def execute(self, context): + if not hasattr(self, "settings"): + self.settings = settings.settings + i18n_sett = context.window_manager.i18n_update_settings + self.settings.FILE_NAME_POT = i18n_sett.pot_path + + context.window_manager.progress_begin(0, len(i18n_sett.langs) + 1) + context.window_manager.progress_update(0) + if not self.use_skip_pot_gen: + env = os.environ.copy() + env["ASAN_OPTIONS"] = "exitcode=0:" + os.environ.get("ASAN_OPTIONS", "") + # Generate base pot from RNA messages (we use another blender instance here, to be able to perfectly + # control our environment (factory startup, specific addons enabled/disabled...)). + # However, we need to export current user settings about this addon! + cmmd = ( + bpy.app.binary_path, + "--background", + "--factory-startup", + "--python", + os.path.join(os.path.dirname(utils_i18n.__file__), "bl_extract_messages.py"), + "--", + "--no_checks", + "--settings", + self.settings.to_json(), + ) + # Not working (UI is not refreshed...). + #self.report({'INFO'}, "Extracting messages, this will take some time...") + context.window_manager.progress_update(1) + ret = subprocess.run(cmmd, env=env) + if ret.returncode != 0: + self.report({'ERROR'}, "Message extraction process failed!") + context.window_manager.progress_end() + return {'CANCELLED'} + + # Now we should have a valid POT file, we have to merge it in all languages po's... + with concurrent.futures.ProcessPoolExecutor() as exctr: + pot = utils_i18n.I18nMessages(kind='PO', src=self.settings.FILE_NAME_POT, settings=self.settings) + num_langs = len(i18n_sett.langs) + for progress, _ in enumerate(exctr.map(i18n_updatetranslation_work_repo_callback, + (pot,) * num_langs, + [dict(lng.items()) for lng in i18n_sett.langs], + (self.settings,) * num_langs, + chunksize=4)): + context.window_manager.progress_update(progress + 2) + context.window_manager.progress_end() + return {'FINISHED'} + + def invoke(self, context, event): + wm = context.window_manager + return wm.invoke_props_dialog(self) + + +def i18n_cleanuptranslation_work_repo_callback(lng, settings): + if not lng['use']: + print("Skipping {} language ({}).".format(lng['name'], lng['uid'])) + return + po = utils_i18n.I18nMessages(uid=lng['uid'], kind='PO', src=lng['po_path'], settings=settings) + errs = po.check(fix=True) + cleanedup_commented = po.clean_commented() + po.write(kind="PO", dest=lng['po_path']) + print("Processing {} language ({}).\n" + "Cleaned up {} commented messages.\n".format(lng['name'], lng['uid'], cleanedup_commented) + + ("Errors in this po, solved as best as possible!\n\t" + "\n\t".join(errs) if errs else "") + "\n") + + +class UI_OT_i18n_cleanuptranslation_work_repo(Operator): + """Clean up i18n working repository (po files)""" + bl_idname = "ui.i18n_cleanuptranslation_work_repo" + bl_label = "Clean up I18n Work Repository" + + def execute(self, context): + if not hasattr(self, "settings"): + self.settings = settings.settings + i18n_sett = context.window_manager.i18n_update_settings + # 'DEFAULT' and en_US are always valid, fully-translated "languages"! + stats = {"DEFAULT": 1.0, "en_US": 1.0} + + context.window_manager.progress_begin(0, len(i18n_sett.langs) + 1) + context.window_manager.progress_update(0) + with concurrent.futures.ProcessPoolExecutor() as exctr: + num_langs = len(i18n_sett.langs) + for progress, _ in enumerate(exctr.map(i18n_cleanuptranslation_work_repo_callback, + [dict(lng.items()) for lng in i18n_sett.langs], + (self.settings,) * num_langs, + chunksize=4)): + context.window_manager.progress_update(progress + 1) + + context.window_manager.progress_end() + + return {'FINISHED'} + + +def i18n_updatetranslation_blender_repo_callback(lng, settings): + reports = [] + if lng['uid'] in settings.IMPORT_LANGUAGES_SKIP: + reports.append( + "Skipping {} language ({}), edit settings if you want to enable it.".format( + lng['name'], lng['uid'])) + return lng['uid'], 0.0, reports + if not lng['use']: + reports.append("Skipping {} language ({}).".format(lng['name'], lng['uid'])) + return lng['uid'], 0.0, reports + po = utils_i18n.I18nMessages(uid=lng['uid'], kind='PO', src=lng['po_path'], settings=settings) + errs = po.check(fix=True) + reports.append("Processing {} language ({}).\n" + "Cleaned up {} commented messages.\n".format(lng['name'], lng['uid'], po.clean_commented()) + + ("Errors in this po, solved as best as possible!\n\t" + "\n\t".join(errs) if errs else "")) + if lng['uid'] in settings.IMPORT_LANGUAGES_RTL: + po.rtl_process() + po.write(kind="PO_COMPACT", dest=lng['po_path_blender']) + po.update_info() + return lng['uid'], po.nbr_trans_msgs / po.nbr_msgs, reports + + +class UI_OT_i18n_updatetranslation_blender_repo(Operator): + """Update i18n data (po files) in Blender source code repository""" + bl_idname = "ui.i18n_updatetranslation_blender_repo" + bl_label = "Update I18n Blender Repository" + + def execute(self, context): + if not hasattr(self, "settings"): + self.settings = settings.settings + i18n_sett = context.window_manager.i18n_update_settings + # 'DEFAULT' and en_US are always valid, fully-translated "languages"! + stats = {"DEFAULT": 1.0, "en_US": 1.0} + + context.window_manager.progress_begin(0, len(i18n_sett.langs) + 1) + context.window_manager.progress_update(0) + with concurrent.futures.ProcessPoolExecutor() as exctr: + num_langs = len(i18n_sett.langs) + for progress, (lng_uid, stats_val, reports) in enumerate(exctr.map(i18n_updatetranslation_blender_repo_callback, [ + dict(lng.items()) for lng in i18n_sett.langs], (self.settings,) * num_langs, chunksize=4)): + context.window_manager.progress_update(progress + 1) + stats[lng_uid] = stats_val + print("".join(reports) + "\n") + + print("Generating languages' menu...") + context.window_manager.progress_update(progress + 2) + languages_menu_lines = utils_languages_menu.gen_menu_file(stats, self.settings) + with open(os.path.join(self.settings.BLENDER_I18N_ROOT, self.settings.LANGUAGES_FILE), 'w', encoding="utf8") as f: + f.write("\n".join(languages_menu_lines)) + context.window_manager.progress_end() + + return {'FINISHED'} + + +class UI_OT_i18n_updatetranslation_statistics(Operator): + """Create or extend a 'i18n_info.txt' Text datablock""" + """(it will contain statistics and checks about current working repository PO files)""" + bl_idname = "ui.i18n_updatetranslation_statistics" + bl_label = "Update I18n Statistics" + + report_name = "i18n_info.txt" + + def execute(self, context): + if not hasattr(self, "settings"): + self.settings = settings.settings + i18n_sett = context.window_manager.i18n_update_settings + + buff = io.StringIO() + lst = [(lng, lng.po_path) for lng in i18n_sett.langs] + + context.window_manager.progress_begin(0, len(lst)) + context.window_manager.progress_update(0) + for progress, (lng, path) in enumerate(lst): + context.window_manager.progress_update(progress + 1) + if not lng.use: + print("Skipping {} language ({}).".format(lng.name, lng.uid)) + continue + buff.write("Processing {} language ({}, {}).\n".format(lng.name, lng.uid, path)) + po = utils_i18n.I18nMessages(uid=lng.uid, kind='PO', src=path, settings=self.settings) + po.print_info(prefix=" ", output=buff.write) + errs = po.check(fix=False) + if errs: + buff.write(" WARNING! Po contains following errors:\n") + buff.write(" " + "\n ".join(errs)) + buff.write("\n") + buff.write("\n\n") + + text = None + if self.report_name not in bpy.data.texts: + text = bpy.data.texts.new(self.report_name) + else: + text = bpy.data.texts[self.report_name] + data = text.as_string() + data = data + "\n" + buff.getvalue() + text.from_string(data) + self.report({'INFO'}, "Info written to %s text datablock!" % self.report_name) + context.window_manager.progress_end() + + return {'FINISHED'} + + def invoke(self, context, event): + wm = context.window_manager + return wm.invoke_props_dialog(self) + + +classes = ( + UI_OT_i18n_updatetranslation_work_repo, + UI_OT_i18n_cleanuptranslation_work_repo, + UI_OT_i18n_updatetranslation_blender_repo, + UI_OT_i18n_updatetranslation_statistics, +) diff --git a/scripts/addons_core/ui_translate/update_ui.py b/scripts/addons_core/ui_translate/update_ui.py new file mode 100644 index 00000000000..e316989904b --- /dev/null +++ b/scripts/addons_core/ui_translate/update_ui.py @@ -0,0 +1,272 @@ +# SPDX-FileCopyrightText: 2013-2022 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os + +if "bpy" in locals(): + import importlib + importlib.reload(settings) + importlib.reload(utils_i18n) +else: + import bpy + from bpy.types import ( + Operator, + Panel, + PropertyGroup, + UIList, + ) + from bpy.props import ( + BoolProperty, + IntProperty, + StringProperty, + CollectionProperty, + ) + from . import settings + from bl_i18n_utils import utils as utils_i18n + +from bpy.app.translations import pgettext_iface as iface_ + + +# Data ######################################################################## + +class I18nUpdateTranslationLanguage(PropertyGroup): + """Settings/info about a language""" + + uid: StringProperty( + name="Language ID", + description="ISO code (eg. \"fr_FR\")", + default="", + ) + + num_id: IntProperty( + name="Numeric ID", + description="Numeric ID (read only!)", + default=0, min=0, + ) + + name: StringProperty( + name="Language Name", + description="Language label (eg. \"French (Français)\")", + default="", + ) + + use: BoolProperty( + name="Use", + description="If this language should be used in the current operator", + default=True, + ) + + po_path: StringProperty( + name="PO Work File Path", + description="Path to the relevant po file in the work repository", + subtype='FILE_PATH', + default="", + ) + + po_path_blender: StringProperty( + name="PO Blender File Path", + description="Path to the relevant po file in Blender's source repository", + subtype='FILE_PATH', + default="", + ) + + +class I18nUpdateTranslationSettings(PropertyGroup): + """Settings/info about a language""" + + langs: CollectionProperty( + name="Languages", + type=I18nUpdateTranslationLanguage, + description="Languages to update in work repository", + ) + + active_lang: IntProperty( + name="Active Language", + default=0, + description="Index of active language in langs collection", + ) + + pot_path: StringProperty( + name="POT File Path", + description="Path to the pot template file", + subtype='FILE_PATH', + default="", + ) + + is_init: BoolProperty( + description="Whether these settings have already been auto-set or not", + default=False, + options={'HIDDEN'}, + ) + + +# UI ########################################################################## + +class UI_UL_i18n_languages(UIList): + """ """ + + def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index): + if self.layout_type in {'DEFAULT', 'COMPACT'}: + layout.label(text=item.name, icon_value=icon) + layout.prop(item, "use", text="") + elif self.layout_type in {'GRID'}: + layout.alignment = 'CENTER' + layout.label(text=item.uid) + layout.prop(item, "use", text="") + + +class UI_PT_i18n_update_translations_settings(Panel): + """ """ + + bl_label = "I18n Update Translation" + bl_space_type = "PROPERTIES" + bl_region_type = "WINDOW" + bl_context = "render" + + def draw(self, context): + layout = self.layout + i18n_sett = context.window_manager.i18n_update_settings + + if not i18n_sett.is_init and bpy.ops.ui.i18n_updatetranslation_init_settings.poll(): + # Cannot call the operator from here, this code might run while `pyrna_write_check()` returns False + # (which prevents any operator call from Python), during initialization of Blender. + UI_OT_i18n_updatetranslation_init_settings.execute_static(context, settings.settings) + + if not i18n_sett.is_init: + layout.label(text="Could not init languages data!") + layout.label(text="Please edit the preferences of the UI Translate add-on") + layout.operator("ui.i18n_updatetranslation_init_settings", text="Init Settings") + else: + split = layout.split(factor=0.75) + split.template_list("UI_UL_i18n_languages", "", i18n_sett, "langs", i18n_sett, "active_lang", rows=8) + col = split.column() + col.operator("ui.i18n_updatetranslation_init_settings", text="Reset Settings") + deselect = any(l.use for l in i18n_sett.langs) + op = col.operator("ui.i18n_updatetranslation_settings_select", + text="Deselect All" if deselect else "Select All") + op.use_invert = False + op.use_select = not deselect + col.operator("ui.i18n_updatetranslation_settings_select", text="Invert Selection").use_invert = True + col.separator() + col.operator("ui.i18n_updatetranslation_work_repo", text="Update Work Repository") + col.operator("ui.i18n_cleanuptranslation_work_repo", text="Clean up Work Repository") + col.separator() + col.operator("ui.i18n_updatetranslation_blender_repo", text="Update Blender Repository") + col.separator() + col.operator("ui.i18n_updatetranslation_statistics", text="Statistics") + + if i18n_sett.active_lang >= 0 and i18n_sett.active_lang < len(i18n_sett.langs): + lng = i18n_sett.langs[i18n_sett.active_lang] + col = layout.column() + col.active = lng.use + row = col.row() + row.label(text="[{}]: \"{}\" ({})".format(lng.uid, iface_(lng.name), lng.num_id), translate=False) + row.prop(lng, "use", text="") + col.prop(lng, "po_path") + col.prop(lng, "po_path_blender") + layout.separator() + layout.prop(i18n_sett, "pot_path") + + layout.separator() + layout.label(text="Add-ons:") + row = layout.row() + op = row.operator("ui.i18n_addon_translation_invoke", text="Refresh I18n Data...") + op.op_id = "ui.i18n_addon_translation_update" + op = row.operator("ui.i18n_addon_translation_invoke", text="Export PO...") + op.op_id = "ui.i18n_addon_translation_export" + op = row.operator("ui.i18n_addon_translation_invoke", text="Import PO...") + op.op_id = "ui.i18n_addon_translation_import" + + +# Operators ################################################################### + +class UI_OT_i18n_updatetranslation_init_settings(Operator): + """Init settings for i18n files update operators""" + + bl_idname = "ui.i18n_updatetranslation_init_settings" + bl_label = "Init I18n Update Settings" + bl_option = {'REGISTER'} + + @classmethod + def poll(cls, context): + return context.window_manager is not None + + @staticmethod + def execute_static(context, self_settings): + i18n_sett = context.window_manager.i18n_update_settings + + # First, create the list of languages from settings. + i18n_sett.langs.clear() + root_work = self_settings.WORK_DIR + root_blender_po = self_settings.BLENDER_I18N_PO_DIR + print(root_work) + print(root_blender_po) + print(self_settings.FILE_NAME_POT) + if not (os.path.isdir(root_work) and os.path.isdir(root_blender_po)): + i18n_sett.is_init = False + return + for can_use, uid, num_id, name, isocode, po_path_work in utils_i18n.list_po_dir(root_work, self_settings): + lng = i18n_sett.langs.add() + lng.use = can_use + lng.uid = uid + lng.num_id = num_id + lng.name = name + if can_use: + lng.po_path = po_path_work + lng.po_path_blender = os.path.join(root_blender_po, isocode + ".po") + + i18n_sett.pot_path = self_settings.FILE_NAME_POT + i18n_sett.is_init = True + + def execute(self, context): + if not hasattr(self, "settings"): + self.settings = settings.settings + + self.execute_static(context, self.settings) + + if context.window_manager.i18n_update_settings.is_init is False: + return {'CANCELLED'} + return {'FINISHED'} + + +class UI_OT_i18n_updatetranslation_settings_select(Operator): + """(De)select (or invert selection of) all languages for i18n files update operators""" + + bl_idname = "ui.i18n_updatetranslation_settings_select" + bl_label = "Init I18n Update Select Languages" + + use_select: BoolProperty( + name="Select All", + description="Select all if True, else deselect all", + default=True, + ) + + use_invert: BoolProperty( + name="Invert Selection", + description="Inverse selection (overrides 'Select All' when True)", + default=False, + ) + + @classmethod + def poll(cls, context): + return context.window_manager is not None + + def execute(self, context): + if self.use_invert: + for lng in context.window_manager.i18n_update_settings.langs: + lng.use = not lng.use + else: + for lng in context.window_manager.i18n_update_settings.langs: + lng.use = self.use_select + return {'FINISHED'} + + +classes = ( + I18nUpdateTranslationLanguage, + I18nUpdateTranslationSettings, + UI_UL_i18n_languages, + UI_PT_i18n_update_translations_settings, + UI_OT_i18n_updatetranslation_init_settings, + UI_OT_i18n_updatetranslation_settings_select, +) diff --git a/scripts/addons_core/viewport_vr_preview/__init__.py b/scripts/addons_core/viewport_vr_preview/__init__.py new file mode 100644 index 00000000000..e5eea8d8ac5 --- /dev/null +++ b/scripts/addons_core/viewport_vr_preview/__init__.py @@ -0,0 +1,52 @@ +# SPDX-FileCopyrightText: 2021-2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +bl_info = { + "name": "VR Scene Inspection", + "author": "Julian Eisel (Severin), Sebastian Koenig, Peter Kim (muxed-reality)", + "version": (0, 11, 2), + "blender": (3, 2, 0), + "location": "3D View > Sidebar > VR", + "description": ("View the viewport with virtual reality glasses " + "(head-mounted displays)"), + "support": "OFFICIAL", + "warning": "This is an early, limited preview of in development " + "VR support for Blender.", + "doc_url": "{BLENDER_MANUAL_URL}/addons/3d_view/vr_scene_inspection.html", + "category": "3D View", +} + + +if "bpy" in locals(): + import importlib + importlib.reload(action_map) + importlib.reload(gui) + importlib.reload(operators) + importlib.reload(properties) +else: + from . import action_map, gui, operators, properties + +import bpy + + +def register(): + if not bpy.app.build_options.xr_openxr: + bpy.utils.register_class(gui.VIEW3D_PT_vr_info) + return + + action_map.register() + gui.register() + operators.register() + properties.register() + + +def unregister(): + if not bpy.app.build_options.xr_openxr: + bpy.utils.unregister_class(gui.VIEW3D_PT_vr_info) + return + + action_map.unregister() + gui.unregister() + operators.unregister() + properties.unregister() diff --git a/scripts/addons_core/viewport_vr_preview/action_map.py b/scripts/addons_core/viewport_vr_preview/action_map.py new file mode 100644 index 00000000000..cd95ea4fa89 --- /dev/null +++ b/scripts/addons_core/viewport_vr_preview/action_map.py @@ -0,0 +1,165 @@ +# SPDX-FileCopyrightText: 2021-2022 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +if "bpy" in locals(): + import importlib + importlib.reload(defaults) +else: + from . import action_map_io, defaults + +import bpy +from bpy.app.handlers import persistent +from bpy_extras.io_utils import ExportHelper, ImportHelper +import importlib.util +import os.path + + +def vr_actionset_active_update(context): + session_state = context.window_manager.xr_session_state + if not session_state or len(session_state.actionmaps) < 1: + return + + scene = context.scene + + if scene.vr_actions_use_gamepad and session_state.actionmaps.find( + session_state, defaults.VRDefaultActionmaps.GAMEPAD.value): + session_state.active_action_set_set(context, defaults.VRDefaultActionmaps.GAMEPAD.value) + else: + # Use first action map. + session_state.active_action_set_set(context, session_state.actionmaps[0].name) + + +def vr_actions_use_gamepad_update(self, context): + vr_actionset_active_update(context) + + +@persistent +def vr_create_actions(context: bpy.context): + context = bpy.context + session_state = context.window_manager.xr_session_state + if not session_state: + return + + # Check if actions are enabled. + scene = context.scene + if not scene.vr_actions_enable: + return + + # Ensure default action maps. + if not defaults.vr_ensure_default_actionmaps(session_state): + return + + for am in session_state.actionmaps: + if len(am.actionmap_items) < 1: + continue + + ok = session_state.action_set_create(context, am) + if not ok: + return + + controller_grip_name = "" + controller_aim_name = "" + + for ami in am.actionmap_items: + if len(ami.bindings) < 1: + continue + + ok = session_state.action_create(context, am, ami) + if not ok: + return + + if ami.type == 'POSE': + if ami.pose_is_controller_grip: + controller_grip_name = ami.name + if ami.pose_is_controller_aim: + controller_aim_name = ami.name + + for amb in ami.bindings: + # Check for bindings that require OpenXR extensions. + if amb.name == defaults.VRDefaultActionbindings.REVERB_G2.value: + if not scene.vr_actions_enable_reverb_g2: + continue + elif amb.name == defaults.VRDefaultActionbindings.VIVE_COSMOS.value: + if not scene.vr_actions_enable_vive_cosmos: + continue + elif amb.name == defaults.VRDefaultActionbindings.VIVE_FOCUS.value: + if not scene.vr_actions_enable_vive_focus: + continue + elif amb.name == defaults.VRDefaultActionbindings.HUAWEI.value: + if not scene.vr_actions_enable_huawei: + continue + + ok = session_state.action_binding_create(context, am, ami, amb) + if not ok: + return + + # Set controller pose actions. + if controller_grip_name and controller_aim_name: + session_state.controller_pose_actions_set(context, am.name, controller_grip_name, controller_aim_name) + + # Set active action set. + vr_actionset_active_update(context) + + +def vr_load_actionmaps(session_state, filepath): + if not os.path.exists(filepath): + return False + + spec = importlib.util.spec_from_file_location(os.path.basename(filepath), filepath) + file = importlib.util.module_from_spec(spec) + spec.loader.exec_module(file) + + action_map_io.actionconfig_init_from_data(session_state, file.actionconfig_data, file.actionconfig_version) + + return True + + +def vr_save_actionmaps(session_state, filepath, sort=False): + action_map_io.actionconfig_export_as_data(session_state, filepath, sort=sort) + + print("Saved XR actionmaps: " + filepath) + + return True + + +def register(): + bpy.types.Scene.vr_actions_enable = bpy.props.BoolProperty( + name="Use Controller Actions", + description="Enable default VR controller actions, including controller poses and haptics", + default=True, + ) + bpy.types.Scene.vr_actions_use_gamepad = bpy.props.BoolProperty( + description="Use input from gamepad (Microsoft Xbox Controller) instead of motion controllers", + default=False, + update=vr_actions_use_gamepad_update, + ) + bpy.types.Scene.vr_actions_enable_huawei = bpy.props.BoolProperty( + description="Enable bindings for the Huawei controllers. Note that this may not be supported by all OpenXR runtimes", + default=False, + ) + bpy.types.Scene.vr_actions_enable_reverb_g2 = bpy.props.BoolProperty( + description="Enable bindings for the HP Reverb G2 controllers. Note that this may not be supported by all OpenXR runtimes", + default=False, + ) + bpy.types.Scene.vr_actions_enable_vive_cosmos = bpy.props.BoolProperty( + description="Enable bindings for the HTC Vive Cosmos controllers. Note that this may not be supported by all OpenXR runtimes", + default=False, + ) + bpy.types.Scene.vr_actions_enable_vive_focus = bpy.props.BoolProperty( + description="Enable bindings for the HTC Vive Focus 3 controllers. Note that this may not be supported by all OpenXR runtimes", + default=False, + ) + + bpy.app.handlers.xr_session_start_pre.append(vr_create_actions) + + +def unregister(): + del bpy.types.Scene.vr_actions_enable + del bpy.types.Scene.vr_actions_use_gamepad + del bpy.types.Scene.vr_actions_enable_huawei + del bpy.types.Scene.vr_actions_enable_reverb_g2 + del bpy.types.Scene.vr_actions_enable_vive_cosmos + del bpy.types.Scene.vr_actions_enable_vive_focus + + bpy.app.handlers.xr_session_start_pre.remove(vr_create_actions) diff --git a/scripts/addons_core/viewport_vr_preview/action_map_io.py b/scripts/addons_core/viewport_vr_preview/action_map_io.py new file mode 100644 index 00000000000..938e217ddce --- /dev/null +++ b/scripts/addons_core/viewport_vr_preview/action_map_io.py @@ -0,0 +1,346 @@ +# SPDX-FileCopyrightText: 2021-2022 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +# ----------------------------------------------------------------------------- +# Export Functions + +__all__ = ( + "actionconfig_export_as_data", + "actionconfig_import_from_data", + "actionconfig_init_from_data", + "actionmap_init_from_data", + "actionmap_item_init_from_data", +) + + +def indent(levels): + return levels * " " + + +def round_float_32(f): + from struct import pack, unpack + return unpack("f", pack("f", f))[0] + + +def repr_f32(f): + f_round = round_float_32(f) + f_str = repr(f) + f_str_frac = f_str.partition(".")[2] + if not f_str_frac: + return f_str + for i in range(1, len(f_str_frac)): + f_test = round(f, i) + f_test_round = round_float_32(f_test) + if f_test_round == f_round: + return "%.*f" % (i, f_test) + return f_str + + +def ami_args_as_data(ami): + s = [ + f"\"type\": '{ami.type}'", + ] + + sup = f"\"user_paths\": [" + for user_path in ami.user_paths: + sup += f"'{user_path.path}', " + if len(ami.user_paths) > 0: + sup = sup[:-2] + sup += "]" + s.append(sup) + + if ami.type == 'FLOAT' or ami.type == 'VECTOR2D': + s.append(f"\"op\": '{ami.op}'") + s.append(f"\"op_mode\": '{ami.op_mode}'") + s.append(f"\"bimanual\": '{ami.bimanual}'") + s.append(f"\"haptic_name\": '{ami.haptic_name}'") + s.append(f"\"haptic_match_user_paths\": '{ami.haptic_match_user_paths}'") + s.append(f"\"haptic_duration\": '{ami.haptic_duration}'") + s.append(f"\"haptic_frequency\": '{ami.haptic_frequency}'") + s.append(f"\"haptic_amplitude\": '{ami.haptic_amplitude}'") + s.append(f"\"haptic_mode\": '{ami.haptic_mode}'") + elif ami.type == 'POSE': + s.append(f"\"pose_is_controller_grip\": '{ami.pose_is_controller_grip}'") + s.append(f"\"pose_is_controller_aim\": '{ami.pose_is_controller_aim}'") + + return "{" + ", ".join(s) + "}" + + +def ami_data_from_args(ami, args): + ami.type = args["type"] + + for path in args["user_paths"]: + ami.user_paths.new(path) + + if ami.type == 'FLOAT' or ami.type == 'VECTOR2D': + ami.op = args["op"] + ami.op_mode = args["op_mode"] + ami.bimanual = True if (args["bimanual"] == 'True') else False + ami.haptic_name = args["haptic_name"] + ami.haptic_match_user_paths = True if (args["haptic_match_user_paths"] == 'True') else False + ami.haptic_duration = float(args["haptic_duration"]) + ami.haptic_frequency = float(args["haptic_frequency"]) + ami.haptic_amplitude = float(args["haptic_amplitude"]) + ami.haptic_mode = args["haptic_mode"] + elif ami.type == 'POSE': + ami.pose_is_controller_grip = True if (args["pose_is_controller_grip"] == 'True') else False + ami.pose_is_controller_aim = True if (args["pose_is_controller_aim"] == 'True') else False + + +def _ami_properties_to_lines_recursive(level, properties, lines): + from bpy.types import OperatorProperties + + def string_value(value): + if isinstance(value, (str, bool, int, set)): + return repr(value) + elif isinstance(value, float): + return repr_f32(value) + elif getattr(value, '__len__', False): + return repr(tuple(value)) + raise Exception(f"Export action configuration: can't write {value!r}") + + for pname in properties.bl_rna.properties.keys(): + if pname != "rna_type": + value = getattr(properties, pname) + if isinstance(value, OperatorProperties): + lines_test = [] + _ami_properties_to_lines_recursive(level + 2, value, lines_test) + if lines_test: + lines.append(f"(") + lines.append(f"\"{pname}\",\n") + lines.append(f"{indent(level + 3)}" "[") + lines.extend(lines_test) + lines.append("],\n") + lines.append(f"{indent(level + 3)}" "),\n" f"{indent(level + 2)}") + del lines_test + elif properties.is_property_set(pname): + value = string_value(value) + lines.append((f"(\"{pname}\", {value:s}),\n" f"{indent(level + 2)}")) + + +def _ami_properties_to_lines(level, ami_props, lines): + if ami_props is None: + return + + lines_test = [f"\"op_properties\":\n" f"{indent(level + 1)}" "["] + _ami_properties_to_lines_recursive(level, ami_props, lines_test) + if len(lines_test) > 1: + lines_test.append("],\n") + lines.extend(lines_test) + + +def _ami_attrs_or_none(level, ami): + lines = [] + _ami_properties_to_lines(level + 1, ami.op_properties, lines) + if not lines: + return None + return "".join(lines) + + +def amb_args_as_data(amb, type): + s = [ + f"\"profile\": '{amb.profile}'", + ] + + scp = f"\"component_paths\": [" + for component_path in amb.component_paths: + scp += f"'{component_path.path}', " + if len(amb.component_paths) > 0: + scp = scp[:-2] + scp += "]" + s.append(scp) + + if type == 'FLOAT' or type == 'VECTOR2D': + s.append(f"\"threshold\": '{amb.threshold}'") + if type == 'FLOAT': + s.append(f"\"axis_region\": '{amb.axis0_region}'") + else: # type == 'VECTOR2D': + s.append(f"\"axis0_region\": '{amb.axis0_region}'") + s.append(f"\"axis1_region\": '{amb.axis1_region}'") + elif type == 'POSE': + s.append(f"\"pose_location\": '{amb.pose_location.x, amb.pose_location.y, amb.pose_location.z}'") + s.append(f"\"pose_rotation\": '{amb.pose_rotation.x, amb.pose_rotation.y, amb.pose_rotation.z}'") + + return "{" + ", ".join(s) + "}" + + +def amb_data_from_args(amb, args, type): + amb.profile = args["profile"] + + for path in args["component_paths"]: + amb.component_paths.new(path) + + if type == 'FLOAT' or type == 'VECTOR2D': + amb.threshold = float(args["threshold"]) + if type == 'FLOAT': + amb.axis0_region = args["axis_region"] + else: # type == 'VECTOR2D': + amb.axis0_region = args["axis0_region"] + amb.axis1_region = args["axis1_region"] + elif type == 'POSE': + l = args["pose_location"].strip(')(').split(', ') + amb.pose_location.x = float(l[0]) + amb.pose_location.y = float(l[1]) + amb.pose_location.z = float(l[2]) + l = args["pose_rotation"].strip(')(').split(', ') + amb.pose_rotation.x = float(l[0]) + amb.pose_rotation.y = float(l[1]) + amb.pose_rotation.z = float(l[2]) + + +def actionconfig_export_as_data(session_state, filepath, *, sort=False): + export_actionmaps = [] + + for am in session_state.actionmaps: + export_actionmaps.append(am) + + if sort: + export_actionmaps.sort(key=lambda k: k.name) + + with open(filepath, "w", encoding="utf-8") as fh: + fw = fh.write + + # Use the file version since it includes the sub-version + # which we can bump multiple times between releases. + from bpy.app import version_file + fw(f"actionconfig_version = {version_file!r}\n") + del version_file + + fw("actionconfig_data = \\\n[") + + for am in export_actionmaps: + fw("(") + fw(f"\"{am.name:s}\",\n") + + fw(f"{indent(2)}" "{") + fw(f"\"items\":\n") + fw(f"{indent(3)}[") + for ami in am.actionmap_items: + fw(f"(") + fw(f"\"{ami.name:s}\"") + ami_args = ami_args_as_data(ami) + ami_data = _ami_attrs_or_none(4, ami) + if ami_data is None: + fw(f", ") + else: + fw(",\n" f"{indent(5)}") + + fw(ami_args) + if ami_data is None: + fw(", None,\n") + else: + fw(",\n") + fw(f"{indent(5)}" "{") + fw(ami_data) + fw(f"{indent(6)}") + fw("}," f"{indent(5)}") + fw("\n") + + fw(f"{indent(5)}" "{") + fw(f"\"bindings\":\n") + fw(f"{indent(6)}[") + for amb in ami.bindings: + fw(f"(") + fw(f"\"{amb.name:s}\"") + fw(f", ") + amb_args = amb_args_as_data(amb, ami.type) + fw(amb_args) + fw("),\n" f"{indent(7)}") + fw("],\n" f"{indent(6)}") + fw("},\n" f"{indent(5)}") + fw("),\n" f"{indent(4)}") + + fw("],\n" f"{indent(3)}") + fw("},\n" f"{indent(2)}") + fw("),\n" f"{indent(1)}") + + fw("]\n") + fw("\n\n") + fw("if __name__ == \"__main__\":\n") + + # We could remove this in the future, as loading new action-maps in older Blender versions + # makes less and less sense as Blender changes. + fw(" # Only add keywords that are supported.\n") + fw(" from bpy.app import version as blender_version\n") + fw(" keywords = {}\n") + fw(" if blender_version >= (3, 0, 0):\n") + fw(" keywords[\"actionconfig_version\"] = actionconfig_version\n") + + fw(" import os\n") + fw(" from viewport_vr_preview.io import actionconfig_import_from_data\n") + fw(" actionconfig_import_from_data(\n") + fw(" os.path.splitext(os.path.basename(__file__))[0],\n") + fw(" actionconfig_data,\n") + fw(" **keywords,\n") + fw(" )\n") + + +# ----------------------------------------------------------------------------- +# Import Functions + +def _ami_props_setattr(ami_name, ami_props, attr, value): + if type(value) is list: + ami_subprop = getattr(ami_props, attr) + for subattr, subvalue in value: + _ami_props_setattr(ami_subprop, subattr, subvalue) + return + + try: + setattr(ami_props, attr, value) + except AttributeError: + print(f"Warning: property '{attr}' not found in action map item '{ami_name}'") + except Exception as ex: + print(f"Warning: {ex!r}") + + +def actionmap_item_init_from_data(ami, ami_bindings): + new_fn = getattr(ami.bindings, "new") + for (amb_name, amb_args) in ami_bindings: + amb = new_fn(amb_name, True) + amb_data_from_args(amb, amb_args, ami.type) + + +def actionmap_init_from_data(am, am_items): + new_fn = getattr(am.actionmap_items, "new") + for (ami_name, ami_args, ami_data, ami_content) in am_items: + ami = new_fn(ami_name, True) + ami_data_from_args(ami, ami_args) + if ami_data is not None: + ami_props_data = ami_data.get("op_properties", None) + if ami_props_data is not None: + ami_props = ami.op_properties + assert type(ami_props_data) is list + for attr, value in ami_props_data: + _ami_props_setattr(ami_name, ami_props, attr, value) + ami_bindings = ami_content["bindings"] + assert type(ami_bindings) is list + actionmap_item_init_from_data(ami, ami_bindings) + + +def actionconfig_init_from_data(session_state, actionconfig_data, actionconfig_version): + # Load data in the format defined above. + # + # Runs at load time, keep this fast! + if actionconfig_version is not None: + from .versioning import actionconfig_update + actionconfig_data = actionconfig_update(actionconfig_data, actionconfig_version) + + for (am_name, am_content) in actionconfig_data: + am = session_state.actionmaps.new(session_state, am_name, True) + am_items = am_content["items"] + # Check here instead of inside 'actionmap_init_from_data' + # because we want to allow both tuple & list types in that case. + # + # For full action maps, ensure these are always lists to allow for extending them + # in a generic way that doesn't have to check for the type each time. + assert type(am_items) is list + actionmap_init_from_data(am, am_items) + + +def actionconfig_import_from_data(session_state, actionconfig_data, *, actionconfig_version=(0, 0, 0)): + # Load data in the format defined above. + # + # Runs at load time, keep this fast! + import bpy + actionconfig_init_from_data(session_state, actionconfig_data, actionconfig_version) diff --git a/scripts/addons_core/viewport_vr_preview/configs/default.py b/scripts/addons_core/viewport_vr_preview/configs/default.py new file mode 100644 index 00000000000..3c87aab2e12 --- /dev/null +++ b/scripts/addons_core/viewport_vr_preview/configs/default.py @@ -0,0 +1,424 @@ +# SPDX-FileCopyrightText: 2021-2022 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +actionconfig_version = (3, 2, 3) +actionconfig_data = \ + [("blender_default", + {"items": + [("controller_grip", {"type": 'POSE', "user_paths": ['/user/hand/left', '/user/hand/right'], "pose_is_controller_grip": 'True', "pose_is_controller_aim": 'False'}, None, + {"bindings": + [("huawei", {"profile": '/interaction_profiles/huawei/controller', "component_paths": ['/input/grip/pose', '/input/grip/pose'], "pose_location": '(0.0, 0.0, 0.0)', "pose_rotation": '(0.0, 0.0, 0.0)'}), + ("index", {"profile": '/interaction_profiles/valve/index_controller', "component_paths": ['/input/grip/pose', '/input/grip/pose'], "pose_location": '(0.0, 0.0, 0.0)', "pose_rotation": '(0.0, 0.0, 0.0)'}), + ("oculus", {"profile": '/interaction_profiles/oculus/touch_controller', "component_paths": ['/input/grip/pose', '/input/grip/pose'], "pose_location": '(0.0, 0.0, 0.0)', "pose_rotation": '(0.0, 0.0, 0.0)'}), + ("reverb_g2", {"profile": '/interaction_profiles/hp/mixed_reality_controller', "component_paths": ['/input/grip/pose', '/input/grip/pose'], "pose_location": '(0.0, 0.0, 0.0)', "pose_rotation": '(0.0, 0.0, 0.0)'}), + ("simple", {"profile": '/interaction_profiles/khr/simple_controller', "component_paths": ['/input/grip/pose', '/input/grip/pose'], "pose_location": '(0.0, 0.0, 0.0)', "pose_rotation": '(0.0, 0.0, 0.0)'}), + ("vive", {"profile": '/interaction_profiles/htc/vive_controller', "component_paths": ['/input/grip/pose', '/input/grip/pose'], "pose_location": '(0.0, 0.0, 0.0)', "pose_rotation": '(0.0, 0.0, 0.0)'}), + ("vive_cosmos", {"profile": '/interaction_profiles/htc/vive_cosmos_controller', "component_paths": ['/input/grip/pose', '/input/grip/pose'], "pose_location": '(0.0, 0.0, 0.0)', "pose_rotation": '(0.0, 0.0, 0.0)'}), + ("vive_focus", {"profile": '/interaction_profiles/htc/vive_focus3_controller', "component_paths": ['/input/grip/pose', '/input/grip/pose'], "pose_location": '(0.0, 0.0, 0.0)', "pose_rotation": '(0.0, 0.0, 0.0)'}), + ("wmr", {"profile": '/interaction_profiles/microsoft/motion_controller', "component_paths": ['/input/grip/pose', '/input/grip/pose'], "pose_location": '(0.0, 0.0, 0.0)', "pose_rotation": '(0.0, 0.0, 0.0)'}), + ], + }, + ), + ("controller_aim", {"type": 'POSE', "user_paths": ['/user/hand/left', '/user/hand/right'], "pose_is_controller_grip": 'False', "pose_is_controller_aim": 'True'}, None, + {"bindings": + [("huawei", {"profile": '/interaction_profiles/huawei/controller', "component_paths": ['/input/aim/pose', '/input/aim/pose'], "pose_location": '(0.0, 0.0, 0.0)', "pose_rotation": '(0.0, 0.0, 0.0)'}), + ("index", {"profile": '/interaction_profiles/valve/index_controller', "component_paths": ['/input/aim/pose', '/input/aim/pose'], "pose_location": '(0.0, 0.0, 0.0)', "pose_rotation": '(0.0, 0.0, 0.0)'}), + ("oculus", {"profile": '/interaction_profiles/oculus/touch_controller', "component_paths": ['/input/aim/pose', '/input/aim/pose'], "pose_location": '(0.0, 0.0, 0.0)', "pose_rotation": '(0.0, 0.0, 0.0)'}), + ("reverb_g2", {"profile": '/interaction_profiles/hp/mixed_reality_controller', "component_paths": ['/input/aim/pose', '/input/aim/pose'], "pose_location": '(0.0, 0.0, 0.0)', "pose_rotation": '(0.0, 0.0, 0.0)'}), + ("simple", {"profile": '/interaction_profiles/khr/simple_controller', "component_paths": ['/input/aim/pose', '/input/aim/pose'], "pose_location": '(0.0, 0.0, 0.0)', "pose_rotation": '(0.0, 0.0, 0.0)'}), + ("vive", {"profile": '/interaction_profiles/htc/vive_controller', "component_paths": ['/input/aim/pose', '/input/aim/pose'], "pose_location": '(0.0, 0.0, 0.0)', "pose_rotation": '(0.0, 0.0, 0.0)'}), + ("vive_cosmos", {"profile": '/interaction_profiles/htc/vive_cosmos_controller', "component_paths": ['/input/aim/pose', '/input/aim/pose'], "pose_location": '(0.0, 0.0, 0.0)', "pose_rotation": '(0.0, 0.0, 0.0)'}), + ("vive_focus", {"profile": '/interaction_profiles/htc/vive_focus3_controller', "component_paths": ['/input/aim/pose', '/input/aim/pose'], "pose_location": '(0.0, 0.0, 0.0)', "pose_rotation": '(0.0, 0.0, 0.0)'}), + ("wmr", {"profile": '/interaction_profiles/microsoft/motion_controller', "component_paths": ['/input/aim/pose', '/input/aim/pose'], "pose_location": '(0.0, 0.0, 0.0)', "pose_rotation": '(0.0, 0.0, 0.0)'}), + ], + }, + ), + ("teleport", {"type": 'FLOAT', "user_paths": ['/user/hand/left', '/user/hand/right'], "op": 'wm.xr_navigation_teleport', "op_mode": 'MODAL', "bimanual": 'False', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, + {"op_properties": + [("interpolation", 0.9), + ("color", (0.0, 1.0, 1.0, 1.0)), + ], + }, + {"bindings": + [("huawei", {"profile": '/interaction_profiles/huawei/controller', "component_paths": ['/input/trigger/value', '/input/trigger/value'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("index", {"profile": '/interaction_profiles/valve/index_controller', "component_paths": ['/input/trigger/value', '/input/trigger/value'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("oculus", {"profile": '/interaction_profiles/oculus/touch_controller', "component_paths": ['/input/trigger/value', '/input/trigger/value'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("reverb_g2", {"profile": '/interaction_profiles/hp/mixed_reality_controller', "component_paths": ['/input/trigger/value', '/input/trigger/value'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("simple", {"profile": '/interaction_profiles/khr/simple_controller', "component_paths": ['/input/select/click', '/input/select/click'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("vive", {"profile": '/interaction_profiles/htc/vive_controller', "component_paths": ['/input/trigger/value', '/input/trigger/value'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("vive_cosmos", {"profile": '/interaction_profiles/htc/vive_cosmos_controller', "component_paths": ['/input/trigger/value', '/input/trigger/value'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("vive_focus", {"profile": '/interaction_profiles/htc/vive_focus3_controller', "component_paths": ['/input/trigger/value', '/input/trigger/value'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("wmr", {"profile": '/interaction_profiles/microsoft/motion_controller', "component_paths": ['/input/trigger/value', '/input/trigger/value'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ], + }, + ), + ("nav_grab", {"type": 'FLOAT', "user_paths": ['/user/hand/left', '/user/hand/right'], "op": 'wm.xr_navigation_grab', "op_mode": 'MODAL', "bimanual": 'True', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, + {"op_properties": + [("lock_rotation", True), + ], + }, + {"bindings": + [("huawei", {"profile": '/interaction_profiles/huawei/controller', "component_paths": ['/input/trackpad/click', '/input/trackpad/click'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("index", {"profile": '/interaction_profiles/valve/index_controller', "component_paths": ['/input/squeeze/force', '/input/squeeze/force'], "threshold": '0.5', "axis_region": 'ANY'}), + ("oculus", {"profile": '/interaction_profiles/oculus/touch_controller', "component_paths": ['/input/squeeze/value', '/input/squeeze/value'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("reverb_g2", {"profile": '/interaction_profiles/hp/mixed_reality_controller', "component_paths": ['/input/squeeze/value', '/input/squeeze/value'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("simple", {"profile": '/interaction_profiles/khr/simple_controller', "component_paths": ['/input/menu/click', '/input/menu/click'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("vive", {"profile": '/interaction_profiles/htc/vive_controller', "component_paths": ['/input/squeeze/click', '/input/squeeze/click'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("vive_cosmos", {"profile": '/interaction_profiles/htc/vive_cosmos_controller', "component_paths": ['/input/squeeze/click', '/input/squeeze/click'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("vive_focus", {"profile": '/interaction_profiles/htc/vive_focus3_controller', "component_paths": ['/input/squeeze/click', '/input/squeeze/click'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("wmr", {"profile": '/interaction_profiles/microsoft/motion_controller', "component_paths": ['/input/squeeze/click', '/input/squeeze/click'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ], + }, + ), + ("fly_forward", {"type": 'FLOAT', "user_paths": ['/user/hand/left'], "op": 'wm.xr_navigation_fly', "op_mode": 'MODAL', "bimanual": 'False', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, + {"op_properties": + [("mode", 'VIEWER_FORWARD'), + ("lock_location_z", True), + ], + }, + {"bindings": + [("huawei", {"profile": '/interaction_profiles/huawei/controller', "component_paths": ['/input/trackpad/y'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("index", {"profile": '/interaction_profiles/valve/index_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("oculus", {"profile": '/interaction_profiles/oculus/touch_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("reverb_g2", {"profile": '/interaction_profiles/hp/mixed_reality_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("vive", {"profile": '/interaction_profiles/htc/vive_controller', "component_paths": ['/input/trackpad/y'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("vive_cosmos", {"profile": '/interaction_profiles/htc/vive_cosmos_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("vive_focus", {"profile": '/interaction_profiles/htc/vive_focus3_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("wmr", {"profile": '/interaction_profiles/microsoft/motion_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ], + }, + ), + ("fly_back", {"type": 'FLOAT', "user_paths": ['/user/hand/left'], "op": 'wm.xr_navigation_fly', "op_mode": 'MODAL', "bimanual": 'False', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, + {"op_properties": + [("mode", 'VIEWER_BACK'), + ("lock_location_z", True), + ], + }, + {"bindings": + [("huawei", {"profile": '/interaction_profiles/huawei/controller', "component_paths": ['/input/trackpad/y'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("index", {"profile": '/interaction_profiles/valve/index_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("oculus", {"profile": '/interaction_profiles/oculus/touch_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("reverb_g2", {"profile": '/interaction_profiles/hp/mixed_reality_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("vive", {"profile": '/interaction_profiles/htc/vive_controller', "component_paths": ['/input/trackpad/y'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("vive_cosmos", {"profile": '/interaction_profiles/htc/vive_cosmos_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("vive_focus", {"profile": '/interaction_profiles/htc/vive_focus3_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("wmr", {"profile": '/interaction_profiles/microsoft/motion_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ], + }, + ), + ("fly_left", {"type": 'FLOAT', "user_paths": ['/user/hand/left'], "op": 'wm.xr_navigation_fly', "op_mode": 'MODAL', "bimanual": 'False', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, + {"op_properties": + [("mode", 'VIEWER_LEFT'), + ("lock_location_z", True), + ], + }, + {"bindings": + [("huawei", {"profile": '/interaction_profiles/huawei/controller', "component_paths": ['/input/trackpad/x'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("index", {"profile": '/interaction_profiles/valve/index_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("oculus", {"profile": '/interaction_profiles/oculus/touch_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("reverb_g2", {"profile": '/interaction_profiles/hp/mixed_reality_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("vive", {"profile": '/interaction_profiles/htc/vive_controller', "component_paths": ['/input/trackpad/x'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("vive_cosmos", {"profile": '/interaction_profiles/htc/vive_cosmos_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("vive_focus", {"profile": '/interaction_profiles/htc/vive_focus3_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("wmr", {"profile": '/interaction_profiles/microsoft/motion_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ], + }, + ), + ("fly_right", {"type": 'FLOAT', "user_paths": ['/user/hand/left'], "op": 'wm.xr_navigation_fly', "op_mode": 'MODAL', "bimanual": 'False', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, + {"op_properties": + [("mode", 'VIEWER_RIGHT'), + ("lock_location_z", True), + ], + }, + {"bindings": + [("huawei", {"profile": '/interaction_profiles/huawei/controller', "component_paths": ['/input/trackpad/x'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("index", {"profile": '/interaction_profiles/valve/index_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("oculus", {"profile": '/interaction_profiles/oculus/touch_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("reverb_g2", {"profile": '/interaction_profiles/hp/mixed_reality_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("vive", {"profile": '/interaction_profiles/htc/vive_controller', "component_paths": ['/input/trackpad/x'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("vive_cosmos", {"profile": '/interaction_profiles/htc/vive_cosmos_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("vive_focus", {"profile": '/interaction_profiles/htc/vive_focus3_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("wmr", {"profile": '/interaction_profiles/microsoft/motion_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ], + }, + ), + ("fly_up", {"type": 'FLOAT', "user_paths": ['/user/hand/right'], "op": 'wm.xr_navigation_fly', "op_mode": 'MODAL', "bimanual": 'False', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, + {"op_properties": + [("mode", 'UP'), + ("speed_min", 0.014), + ("speed_max", 0.042), + ], + }, + {"bindings": + [("huawei", {"profile": '/interaction_profiles/huawei/controller', "component_paths": ['/input/trackpad/y'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("index", {"profile": '/interaction_profiles/valve/index_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("oculus", {"profile": '/interaction_profiles/oculus/touch_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("reverb_g2", {"profile": '/interaction_profiles/hp/mixed_reality_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("vive", {"profile": '/interaction_profiles/htc/vive_controller', "component_paths": ['/input/trackpad/y'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("vive_cosmos", {"profile": '/interaction_profiles/htc/vive_cosmos_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("vive_focus", {"profile": '/interaction_profiles/htc/vive_focus3_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("wmr", {"profile": '/interaction_profiles/microsoft/motion_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ], + }, + ), + ("fly_down", {"type": 'FLOAT', "user_paths": ['/user/hand/right'], "op": 'wm.xr_navigation_fly', "op_mode": 'MODAL', "bimanual": 'False', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, + {"op_properties": + [("mode", 'DOWN'), + ("speed_min", 0.014), + ("speed_max", 0.042), + ], + }, + {"bindings": + [("huawei", {"profile": '/interaction_profiles/huawei/controller', "component_paths": ['/input/trackpad/y'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("index", {"profile": '/interaction_profiles/valve/index_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("oculus", {"profile": '/interaction_profiles/oculus/touch_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("reverb_g2", {"profile": '/interaction_profiles/hp/mixed_reality_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("vive", {"profile": '/interaction_profiles/htc/vive_controller', "component_paths": ['/input/trackpad/y'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("vive_cosmos", {"profile": '/interaction_profiles/htc/vive_cosmos_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("vive_focus", {"profile": '/interaction_profiles/htc/vive_focus3_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("wmr", {"profile": '/interaction_profiles/microsoft/motion_controller', "component_paths": ['/input/thumbstick/y'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ], + }, + ), + ("fly_turnleft", {"type": 'FLOAT', "user_paths": ['/user/hand/right'], "op": 'wm.xr_navigation_fly', "op_mode": 'MODAL', "bimanual": 'False', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, + {"op_properties": + [("mode", 'TURNLEFT'), + ("speed_min", 0.01), + ("speed_max", 0.03), + ], + }, + {"bindings": + [("huawei", {"profile": '/interaction_profiles/huawei/controller', "component_paths": ['/input/trackpad/x'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("index", {"profile": '/interaction_profiles/valve/index_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("oculus", {"profile": '/interaction_profiles/oculus/touch_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("reverb_g2", {"profile": '/interaction_profiles/hp/mixed_reality_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("vive", {"profile": '/interaction_profiles/htc/vive_controller', "component_paths": ['/input/trackpad/x'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("vive_cosmos", {"profile": '/interaction_profiles/htc/vive_cosmos_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("vive_focus", {"profile": '/interaction_profiles/htc/vive_focus3_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ("wmr", {"profile": '/interaction_profiles/microsoft/motion_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ], + }, + ), + ("fly_turnright", {"type": 'FLOAT', "user_paths": ['/user/hand/right'], "op": 'wm.xr_navigation_fly', "op_mode": 'MODAL', "bimanual": 'False', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, + {"op_properties": + [("mode", 'TURNRIGHT'), + ("speed_min", 0.01), + ("speed_max", 0.03), + ], + }, + {"bindings": + [("huawei", {"profile": '/interaction_profiles/huawei/controller', "component_paths": ['/input/trackpad/x'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("index", {"profile": '/interaction_profiles/valve/index_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("oculus", {"profile": '/interaction_profiles/oculus/touch_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("reverb_g2", {"profile": '/interaction_profiles/hp/mixed_reality_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("vive", {"profile": '/interaction_profiles/htc/vive_controller', "component_paths": ['/input/trackpad/x'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("vive_cosmos", {"profile": '/interaction_profiles/htc/vive_cosmos_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("vive_focus", {"profile": '/interaction_profiles/htc/vive_focus3_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ("wmr", {"profile": '/interaction_profiles/microsoft/motion_controller', "component_paths": ['/input/thumbstick/x'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ], + }, + ), + ("nav_reset", {"type": 'FLOAT', "user_paths": ['/user/hand/left', '/user/hand/right'], "op": 'wm.xr_navigation_reset', "op_mode": 'PRESS', "bimanual": 'False', "haptic_name": 'haptic', "haptic_match_user_paths": 'True', "haptic_duration": '0.30000001192092896', "haptic_frequency": '3000.0', "haptic_amplitude": '0.5', "haptic_mode": 'PRESS'}, + {"op_properties": + [("location", False), + ("rotation", False), + ("scale", True), + ], + }, + {"bindings": + [("huawei", {"profile": '/interaction_profiles/huawei/controller', "component_paths": ['/input/back/click', '/input/back/click'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("index", {"profile": '/interaction_profiles/valve/index_controller', "component_paths": ['/input/a/click', '/input/a/click'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("oculus", {"profile": '/interaction_profiles/oculus/touch_controller', "component_paths": ['/input/x/click', '/input/a/click'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("reverb_g2", {"profile": '/interaction_profiles/hp/mixed_reality_controller', "component_paths": ['/input/x/click', '/input/a/click'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("vive", {"profile": '/interaction_profiles/htc/vive_controller', "component_paths": ['/input/menu/click', '/input/menu/click'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("vive_cosmos", {"profile": '/interaction_profiles/htc/vive_cosmos_controller', "component_paths": ['/input/x/click', '/input/a/click'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("vive_focus", {"profile": '/interaction_profiles/htc/vive_focus3_controller', "component_paths": ['/input/x/click', '/input/a/click'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ("wmr", {"profile": '/interaction_profiles/microsoft/motion_controller', "component_paths": ['/input/menu/click', '/input/menu/click'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ], + }, + ), + ("haptic", {"type": 'VIBRATION', "user_paths": ['/user/hand/left', '/user/hand/right']}, None, + {"bindings": + [("huawei", {"profile": '/interaction_profiles/huawei/controller', "component_paths": ['/output/haptic', '/output/haptic']}), + ("index", {"profile": '/interaction_profiles/valve/index_controller', "component_paths": ['/output/haptic', '/output/haptic']}), + ("oculus", {"profile": '/interaction_profiles/oculus/touch_controller', "component_paths": ['/output/haptic', '/output/haptic']}), + ("reverb_g2", {"profile": '/interaction_profiles/hp/mixed_reality_controller', "component_paths": ['/output/haptic', '/output/haptic']}), + ("simple", {"profile": '/interaction_profiles/khr/simple_controller', "component_paths": ['/output/haptic', '/output/haptic']}), + ("vive", {"profile": '/interaction_profiles/htc/vive_controller', "component_paths": ['/output/haptic', '/output/haptic']}), + ("vive_cosmos", {"profile": '/interaction_profiles/htc/vive_cosmos_controller', "component_paths": ['/output/haptic', '/output/haptic']}), + ("vive_focus", {"profile": '/interaction_profiles/htc/vive_focus3_controller', "component_paths": ['/output/haptic', '/output/haptic']}), + ("wmr", {"profile": '/interaction_profiles/microsoft/motion_controller', "component_paths": ['/output/haptic', '/output/haptic']}), + ], + }, + ), + ], + }, + ), + ("blender_default_gamepad", + {"items": + [("teleport", {"type": 'FLOAT', "user_paths": ['/user/gamepad'], "op": 'wm.xr_navigation_teleport', "op_mode": 'MODAL', "bimanual": 'False', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, + {"op_properties": + [("interpolation", 0.9), + ("from_viewer", True), + ("color", (0.0, 1.0, 1.0, 1.0)), + ], + }, + {"bindings": + [("gamepad", {"profile": '/interaction_profiles/microsoft/xbox_controller', "component_paths": ['/input/trigger_right/value'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ], + }, + ), + ("fly", {"type": 'FLOAT', "user_paths": ['/user/gamepad'], "op": 'wm.xr_navigation_fly', "op_mode": 'MODAL', "bimanual": 'False', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, None, + {"bindings": + [("gamepad", {"profile": '/interaction_profiles/microsoft/xbox_controller', "component_paths": ['/input/trigger_left/value'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ], + }, + ), + ("fly_forward", {"type": 'FLOAT', "user_paths": ['/user/gamepad'], "op": 'wm.xr_navigation_fly', "op_mode": 'MODAL', "bimanual": 'False', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, + {"op_properties": + [("mode", 'VIEWER_FORWARD'), + ("lock_location_z", True), + ], + }, + {"bindings": + [("gamepad", {"profile": '/interaction_profiles/microsoft/xbox_controller', "component_paths": ['/input/thumbstick_left/y'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ], + }, + ), + ("fly_back", {"type": 'FLOAT', "user_paths": ['/user/gamepad'], "op": 'wm.xr_navigation_fly', "op_mode": 'MODAL', "bimanual": 'False', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, + {"op_properties": + [("mode", 'VIEWER_BACK'), + ("lock_location_z", True), + ], + }, + {"bindings": + [("gamepad", {"profile": '/interaction_profiles/microsoft/xbox_controller', "component_paths": ['/input/thumbstick_left/y'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ], + }, + ), + ("fly_left", {"type": 'FLOAT', "user_paths": ['/user/gamepad'], "op": 'wm.xr_navigation_fly', "op_mode": 'MODAL', "bimanual": 'False', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, + {"op_properties": + [("mode", 'VIEWER_LEFT'), + ("lock_location_z", True), + ], + }, + {"bindings": + [("gamepad", {"profile": '/interaction_profiles/microsoft/xbox_controller', "component_paths": ['/input/thumbstick_left/x'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ], + }, + ), + ("fly_right", {"type": 'FLOAT', "user_paths": ['/user/gamepad'], "op": 'wm.xr_navigation_fly', "op_mode": 'MODAL', "bimanual": 'False', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, + {"op_properties": + [("mode", 'VIEWER_RIGHT'), + ("lock_location_z", True), + ], + }, + {"bindings": + [("gamepad", {"profile": '/interaction_profiles/microsoft/xbox_controller', "component_paths": ['/input/thumbstick_left/x'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ], + }, + ), + ("fly_up", {"type": 'FLOAT', "user_paths": ['/user/gamepad'], "op": 'wm.xr_navigation_fly', "op_mode": 'MODAL', "bimanual": 'False', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, + {"op_properties": + [("mode", 'UP'), + ("speed_min", 0.014), + ("speed_max", 0.042), + ], + }, + {"bindings": + [("gamepad", {"profile": '/interaction_profiles/microsoft/xbox_controller', "component_paths": ['/input/thumbstick_right/y'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ], + }, + ), + ("fly_down", {"type": 'FLOAT', "user_paths": ['/user/gamepad'], "op": 'wm.xr_navigation_fly', "op_mode": 'MODAL', "bimanual": 'False', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, + {"op_properties": + [("mode", 'DOWN'), + ("speed_min", 0.014), + ("speed_max", 0.042), + ], + }, + {"bindings": + [("gamepad", {"profile": '/interaction_profiles/microsoft/xbox_controller', "component_paths": ['/input/thumbstick_right/y'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ], + }, + ), + ("fly_turnleft", {"type": 'FLOAT', "user_paths": ['/user/gamepad'], "op": 'wm.xr_navigation_fly', "op_mode": 'MODAL', "bimanual": 'False', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, + {"op_properties": + [("mode", 'TURNLEFT'), + ("speed_min", 0.01), + ("speed_max", 0.03), + ], + }, + {"bindings": + [("gamepad", {"profile": '/interaction_profiles/microsoft/xbox_controller', "component_paths": ['/input/thumbstick_right/x'], "threshold": '0.30000001192092896', "axis_region": 'NEGATIVE'}), + ], + }, + ), + ("fly_turnright", {"type": 'FLOAT', "user_paths": ['/user/gamepad'], "op": 'wm.xr_navigation_fly', "op_mode": 'MODAL', "bimanual": 'False', "haptic_name": '', "haptic_match_user_paths": 'False', "haptic_duration": '0.0', "haptic_frequency": '0.0', "haptic_amplitude": '0.0', "haptic_mode": 'PRESS'}, + {"op_properties": + [("mode", 'TURNRIGHT'), + ("speed_min", 0.01), + ("speed_max", 0.03), + ], + }, + {"bindings": + [("gamepad", {"profile": '/interaction_profiles/microsoft/xbox_controller', "component_paths": ['/input/thumbstick_right/x'], "threshold": '0.30000001192092896', "axis_region": 'POSITIVE'}), + ], + }, + ), + ("nav_reset", {"type": 'FLOAT', "user_paths": ['/user/gamepad'], "op": 'wm.xr_navigation_reset', "op_mode": 'PRESS', "bimanual": 'False', "haptic_name": 'haptic_right', "haptic_match_user_paths": 'True', "haptic_duration": '0.30000001192092896', "haptic_frequency": '3000.0', "haptic_amplitude": '0.5', "haptic_mode": 'PRESS'}, + {"op_properties": + [("location", False), + ("rotation", False), + ("scale", True), + ], + }, + {"bindings": + [("gamepad", {"profile": '/interaction_profiles/microsoft/xbox_controller', "component_paths": ['/input/a/click'], "threshold": '0.30000001192092896', "axis_region": 'ANY'}), + ], + }, + ), + ("haptic_left", {"type": 'VIBRATION', "user_paths": ['/user/gamepad']}, None, + {"bindings": + [("gamepad", {"profile": '/interaction_profiles/microsoft/xbox_controller', "component_paths": ['/output/haptic_left']}), + ], + }, + ), + ("haptic_right", {"type": 'VIBRATION', "user_paths": ['/user/gamepad']}, None, + {"bindings": + [("gamepad", {"profile": '/interaction_profiles/microsoft/xbox_controller', "component_paths": ['/output/haptic_right']}), + ], + }, + ), + ("haptic_lefttrigger", {"type": 'VIBRATION', "user_paths": ['/user/gamepad']}, None, + {"bindings": + [("gamepad", {"profile": '/interaction_profiles/microsoft/xbox_controller', "component_paths": ['/output/haptic_left_trigger']}), + ], + }, + ), + ("haptic_righttrigger", {"type": 'VIBRATION', "user_paths": ['/user/gamepad']}, None, + {"bindings": + [("gamepad", {"profile": '/interaction_profiles/microsoft/xbox_controller', "component_paths": ['/output/haptic_right_trigger']}), + ], + }, + ), + ], + }, + ), + ] + + +if __name__ == "__main__": + # Only add keywords that are supported. + from bpy.app import version as blender_version + keywords = {} + if blender_version >= (3, 0, 0): + keywords["actionconfig_version"] = actionconfig_version + import os + from viewport_vr_preview.io import actionconfig_import_from_data + actionconfig_import_from_data( + os.path.splitext(os.path.basename(__file__))[0], + actionconfig_data, + **keywords, + ) diff --git a/scripts/addons_core/viewport_vr_preview/defaults.py b/scripts/addons_core/viewport_vr_preview/defaults.py new file mode 100644 index 00000000000..efbc88eee59 --- /dev/null +++ b/scripts/addons_core/viewport_vr_preview/defaults.py @@ -0,0 +1,1501 @@ +# SPDX-FileCopyrightText: 2021-2022 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +if "bpy" in locals(): + import importlib + importlib.reload(action_map) +else: + from . import action_map + +import bpy +from bpy.app.handlers import persistent +from enum import Enum +import math +import os.path + + +# Default action maps. +class VRDefaultActionmaps(Enum): + DEFAULT = "blender_default" + GAMEPAD = "blender_default_gamepad" + + +# Default actions. +class VRDefaultActions(Enum): + CONTROLLER_GRIP = "controller_grip" + CONTROLLER_AIM = "controller_aim" + TELEPORT = "teleport" + NAV_GRAB = "nav_grab" + FLY = "fly" + FLY_FORWARD = "fly_forward" + FLY_BACK = "fly_back" + FLY_LEFT = "fly_left" + FLY_RIGHT = "fly_right" + FLY_UP = "fly_up" + FLY_DOWN = "fly_down" + FLY_TURNLEFT = "fly_turnleft" + FLY_TURNRIGHT = "fly_turnright" + NAV_RESET = "nav_reset" + HAPTIC = "haptic" + HAPTIC_LEFT = "haptic_left" + HAPTIC_RIGHT = "haptic_right" + HAPTIC_LEFTTRIGGER = "haptic_lefttrigger" + HAPTIC_RIGHTTRIGGER = "haptic_righttrigger" + + +# Default action bindings. +class VRDefaultActionbindings(Enum): + GAMEPAD = "gamepad" + HUAWEI = "huawei" + INDEX = "index" + OCULUS = "oculus" + REVERB_G2 = "reverb_g2" + SIMPLE = "simple" + VIVE = "vive" + VIVE_COSMOS = "vive_cosmos" + VIVE_FOCUS = "vive_focus" + WMR = "wmr" + + +class VRDefaultActionprofiles(Enum): + GAMEPAD = "/interaction_profiles/microsoft/xbox_controller" + HUAWEI = "/interaction_profiles/huawei/controller" + INDEX = "/interaction_profiles/valve/index_controller" + OCULUS = "/interaction_profiles/oculus/touch_controller" + REVERB_G2 = "/interaction_profiles/hp/mixed_reality_controller" + SIMPLE = "/interaction_profiles/khr/simple_controller" + VIVE = "/interaction_profiles/htc/vive_controller" + VIVE_COSMOS = "/interaction_profiles/htc/vive_cosmos_controller" + VIVE_FOCUS = "/interaction_profiles/htc/vive_focus3_controller" + WMR = "/interaction_profiles/microsoft/motion_controller" + + +def vr_defaults_actionmap_add(session_state, name): + am = session_state.actionmaps.new(session_state, name, True) + + return am + + +def vr_defaults_action_add(am, + name, + user_paths, + op, + op_mode, + bimanual, + haptic_name, + haptic_match_user_paths, + haptic_duration, + haptic_frequency, + haptic_amplitude, + haptic_mode): + + ami = am.actionmap_items.new(name, True) + if ami: + ami.type = 'FLOAT' + for path in user_paths: + ami.user_paths.new(path) + ami.op = op + ami.op_mode = op_mode + ami.bimanual = bimanual + ami.haptic_name = haptic_name + ami.haptic_match_user_paths = haptic_match_user_paths + ami.haptic_duration = haptic_duration + ami.haptic_frequency = haptic_frequency + ami.haptic_amplitude = haptic_amplitude + ami.haptic_mode = haptic_mode + + return ami + + +def vr_defaults_pose_action_add(am, + name, + user_paths, + is_controller_grip, + is_controller_aim): + ami = am.actionmap_items.new(name, True) + if ami: + ami.type = 'POSE' + for path in user_paths: + ami.user_paths.new(path) + ami.pose_is_controller_grip = is_controller_grip + ami.pose_is_controller_aim = is_controller_aim + + return ami + + +def vr_defaults_haptic_action_add(am, + name, + user_paths): + ami = am.actionmap_items.new(name, True) + if ami: + ami.type = 'VIBRATION' + for path in user_paths: + ami.user_paths.new(path) + + return ami + + +def vr_defaults_actionbinding_add(ami, + name, + profile, + component_paths, + threshold, + axis0_region, + axis1_region): + amb = ami.bindings.new(name, True) + if amb: + amb.profile = profile + for path in component_paths: + amb.component_paths.new(path) + amb.threshold = threshold + amb.axis0_region = axis0_region + amb.axis1_region = axis1_region + + return amb + + +def vr_defaults_pose_actionbinding_add(ami, + name, + profile, + component_paths, + location, + rotation): + amb = ami.bindings.new(name, True) + if amb: + amb.profile = profile + for path in component_paths: + amb.component_paths.new(path) + amb.pose_location = location + amb.pose_rotation = rotation + + return amb + + +def vr_defaults_haptic_actionbinding_add(ami, + name, + profile, + component_paths): + amb = ami.bindings.new(name, True) + if amb: + amb.profile = profile + for path in component_paths: + amb.component_paths.new(path) + + return amb + + +def vr_defaults_create_default(session_state): + am = vr_defaults_actionmap_add(session_state, + VRDefaultActionmaps.DEFAULT.value) + if not am: + return + + ami = vr_defaults_pose_action_add(am, + VRDefaultActions.CONTROLLER_GRIP.value, + ["/user/hand/left", + "/user/hand/right"], + True, + False) + if ami: + vr_defaults_pose_actionbinding_add(ami, + VRDefaultActionbindings.HUAWEI.value, + VRDefaultActionprofiles.HUAWEI.value, + ["/input/grip/pose", + "/input/grip/pose"], + (0, 0, 0), + (0, 0, 0)) + vr_defaults_pose_actionbinding_add(ami, + VRDefaultActionbindings.INDEX.value, + VRDefaultActionprofiles.INDEX.value, + ["/input/grip/pose", + "/input/grip/pose"], + (0, 0, 0), + (0, 0, 0)) + vr_defaults_pose_actionbinding_add(ami, + VRDefaultActionbindings.OCULUS.value, + VRDefaultActionprofiles.OCULUS.value, + ["/input/grip/pose", + "/input/grip/pose"], + (0, 0, 0), + (0, 0, 0)) + vr_defaults_pose_actionbinding_add(ami, + VRDefaultActionbindings.REVERB_G2.value, + VRDefaultActionprofiles.REVERB_G2.value, + ["/input/grip/pose", + "/input/grip/pose"], + (0, 0, 0), + (0, 0, 0)) + vr_defaults_pose_actionbinding_add(ami, + VRDefaultActionbindings.SIMPLE.value, + VRDefaultActionprofiles.SIMPLE.value, + ["/input/grip/pose", + "/input/grip/pose"], + (0, 0, 0), + (0, 0, 0)) + vr_defaults_pose_actionbinding_add(ami, + VRDefaultActionbindings.VIVE.value, + VRDefaultActionprofiles.VIVE.value, + ["/input/grip/pose", + "/input/grip/pose"], + (0, 0, 0), + (0, 0, 0)) + vr_defaults_pose_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_COSMOS.value, + VRDefaultActionprofiles.VIVE_COSMOS.value, + ["/input/grip/pose", + "/input/grip/pose"], + (0, 0, 0), + (0, 0, 0)) + vr_defaults_pose_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_FOCUS.value, + VRDefaultActionprofiles.VIVE_FOCUS.value, + ["/input/grip/pose", + "/input/grip/pose"], + (0, 0, 0), + (0, 0, 0)) + vr_defaults_pose_actionbinding_add(ami, + VRDefaultActionbindings.WMR.value, + VRDefaultActionprofiles.WMR.value, + ["/input/grip/pose", + "/input/grip/pose"], + (0, 0, 0), + (0, 0, 0)) + + ami = vr_defaults_pose_action_add(am, + VRDefaultActions.CONTROLLER_AIM.value, + ["/user/hand/left", + "/user/hand/right"], + False, + True) + if ami: + vr_defaults_pose_actionbinding_add(ami, + VRDefaultActionbindings.HUAWEI.value, + VRDefaultActionprofiles.HUAWEI.value, + ["/input/aim/pose", + "/input/aim/pose"], + (0, 0, 0), + (0, 0, 0)) + vr_defaults_pose_actionbinding_add(ami, + VRDefaultActionbindings.INDEX.value, + VRDefaultActionprofiles.INDEX.value, + ["/input/aim/pose", + "/input/aim/pose"], + (0, 0, 0), + (0, 0, 0)) + vr_defaults_pose_actionbinding_add(ami, + VRDefaultActionbindings.OCULUS.value, + VRDefaultActionprofiles.OCULUS.value, + ["/input/aim/pose", + "/input/aim/pose"], + (0, 0, 0), + (0, 0, 0)) + vr_defaults_pose_actionbinding_add(ami, + VRDefaultActionbindings.REVERB_G2.value, + VRDefaultActionprofiles.REVERB_G2.value, + ["/input/aim/pose", + "/input/aim/pose"], + (0, 0, 0), + (0, 0, 0)) + vr_defaults_pose_actionbinding_add(ami, + VRDefaultActionbindings.SIMPLE.value, + VRDefaultActionprofiles.SIMPLE.value, + ["/input/aim/pose", + "/input/aim/pose"], + (0, 0, 0), + (0, 0, 0)) + vr_defaults_pose_actionbinding_add(ami, + VRDefaultActionbindings.VIVE.value, + VRDefaultActionprofiles.VIVE.value, + ["/input/aim/pose", + "/input/aim/pose"], + (0, 0, 0), + (0, 0, 0)) + vr_defaults_pose_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_COSMOS.value, + VRDefaultActionprofiles.VIVE_COSMOS.value, + ["/input/aim/pose", + "/input/aim/pose"], + (0, 0, 0), + (0, 0, 0)) + vr_defaults_pose_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_FOCUS.value, + VRDefaultActionprofiles.VIVE_FOCUS.value, + ["/input/aim/pose", + "/input/aim/pose"], + (0, 0, 0), + (0, 0, 0)) + vr_defaults_pose_actionbinding_add(ami, + VRDefaultActionbindings.WMR.value, + VRDefaultActionprofiles.WMR.value, + ["/input/aim/pose", + "/input/aim/pose"], + (0, 0, 0), + (0, 0, 0)) + + ami = vr_defaults_action_add(am, + VRDefaultActions.TELEPORT.value, + ["/user/hand/left", + "/user/hand/right"], + "wm.xr_navigation_teleport", + 'MODAL', + False, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.HUAWEI.value, + VRDefaultActionprofiles.HUAWEI.value, + ["/input/trigger/value", + "/input/trigger/value"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.INDEX.value, + VRDefaultActionprofiles.INDEX.value, + ["/input/trigger/value", + "/input/trigger/value"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.OCULUS.value, + VRDefaultActionprofiles.OCULUS.value, + ["/input/trigger/value", + "/input/trigger/value"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.REVERB_G2.value, + VRDefaultActionprofiles.REVERB_G2.value, + ["/input/trigger/value", + "/input/trigger/value"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.SIMPLE.value, + VRDefaultActionprofiles.SIMPLE.value, + ["/input/select/click", + "/input/select/click"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE.value, + VRDefaultActionprofiles.VIVE.value, + ["/input/trigger/value", + "/input/trigger/value"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_COSMOS.value, + VRDefaultActionprofiles.VIVE_COSMOS.value, + ["/input/trigger/value", + "/input/trigger/value"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_FOCUS.value, + VRDefaultActionprofiles.VIVE_FOCUS.value, + ["/input/trigger/value", + "/input/trigger/value"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.WMR.value, + VRDefaultActionprofiles.WMR.value, + ["/input/trigger/value", + "/input/trigger/value"], + 0.3, + 'ANY', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.NAV_GRAB.value, + ["/user/hand/left", + "/user/hand/right"], + "wm.xr_navigation_grab", + 'MODAL', + True, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.HUAWEI.value, + VRDefaultActionprofiles.HUAWEI.value, + ["/input/trackpad/click", + "/input/trackpad/click"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.INDEX.value, + VRDefaultActionprofiles.INDEX.value, + ["/input/squeeze/force", + "/input/squeeze/force"], + 0.5, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.OCULUS.value, + VRDefaultActionprofiles.OCULUS.value, + ["/input/squeeze/value", + "/input/squeeze/value"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.REVERB_G2.value, + VRDefaultActionprofiles.REVERB_G2.value, + ["/input/squeeze/value", + "/input/squeeze/value"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.SIMPLE.value, + VRDefaultActionprofiles.SIMPLE.value, + ["/input/menu/click", + "/input/menu/click"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE.value, + VRDefaultActionprofiles.VIVE.value, + ["/input/squeeze/click", + "/input/squeeze/click"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_COSMOS.value, + VRDefaultActionprofiles.VIVE_COSMOS.value, + ["/input/squeeze/click", + "/input/squeeze/click"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_FOCUS.value, + VRDefaultActionprofiles.VIVE_FOCUS.value, + ["/input/squeeze/click", + "/input/squeeze/click"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.WMR.value, + VRDefaultActionprofiles.WMR.value, + ["/input/squeeze/click", + "/input/squeeze/click"], + 0.3, + 'ANY', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.FLY_FORWARD.value, + ["/user/hand/left"], + "wm.xr_navigation_fly", + 'MODAL', + False, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.HUAWEI.value, + VRDefaultActionprofiles.HUAWEI.value, + ["/input/trackpad/y"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.INDEX.value, + VRDefaultActionprofiles.INDEX.value, + ["/input/thumbstick/y"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.OCULUS.value, + VRDefaultActionprofiles.OCULUS.value, + ["/input/thumbstick/y"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.REVERB_G2.value, + VRDefaultActionprofiles.REVERB_G2.value, + ["/input/thumbstick/y"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE.value, + VRDefaultActionprofiles.VIVE.value, + ["/input/trackpad/y"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_COSMOS.value, + VRDefaultActionprofiles.VIVE_COSMOS.value, + ["/input/thumbstick/y"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_FOCUS.value, + VRDefaultActionprofiles.VIVE_FOCUS.value, + ["/input/thumbstick/y"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.WMR.value, + VRDefaultActionprofiles.WMR.value, + ["/input/thumbstick/y"], + 0.3, + 'POSITIVE', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.FLY_BACK.value, + ["/user/hand/left"], + "wm.xr_navigation_fly", + 'MODAL', + False, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.HUAWEI.value, + VRDefaultActionprofiles.HUAWEI.value, + ["/input/trackpad/y"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.INDEX.value, + VRDefaultActionprofiles.INDEX.value, + ["/input/thumbstick/y"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.OCULUS.value, + VRDefaultActionprofiles.OCULUS.value, + ["/input/thumbstick/y"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.REVERB_G2.value, + VRDefaultActionprofiles.REVERB_G2.value, + ["/input/thumbstick/y"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE.value, + VRDefaultActionprofiles.VIVE.value, + ["/input/trackpad/y"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_COSMOS.value, + VRDefaultActionprofiles.VIVE_COSMOS.value, + ["/input/thumbstick/y"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_FOCUS.value, + VRDefaultActionprofiles.VIVE_FOCUS.value, + ["/input/thumbstick/y"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.WMR.value, + VRDefaultActionprofiles.WMR.value, + ["/input/thumbstick/y"], + 0.3, + 'NEGATIVE', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.FLY_LEFT.value, + ["/user/hand/left"], + "wm.xr_navigation_fly", + 'MODAL', + False, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.HUAWEI.value, + VRDefaultActionprofiles.HUAWEI.value, + ["/input/trackpad/x"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.INDEX.value, + VRDefaultActionprofiles.INDEX.value, + ["/input/thumbstick/x"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.OCULUS.value, + VRDefaultActionprofiles.OCULUS.value, + ["/input/thumbstick/x"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.REVERB_G2.value, + VRDefaultActionprofiles.REVERB_G2.value, + ["/input/thumbstick/x"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE.value, + VRDefaultActionprofiles.VIVE.value, + ["/input/trackpad/x"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_COSMOS.value, + VRDefaultActionprofiles.VIVE_COSMOS.value, + ["/input/thumbstick/x"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_FOCUS.value, + VRDefaultActionprofiles.VIVE_FOCUS.value, + ["/input/thumbstick/x"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.WMR.value, + VRDefaultActionprofiles.WMR.value, + ["/input/thumbstick/x"], + 0.3, + 'NEGATIVE', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.FLY_RIGHT.value, + ["/user/hand/left"], + "wm.xr_navigation_fly", + 'MODAL', + False, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.HUAWEI.value, + VRDefaultActionprofiles.HUAWEI.value, + ["/input/trackpad/x"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.INDEX.value, + VRDefaultActionprofiles.INDEX.value, + ["/input/thumbstick/x"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.OCULUS.value, + VRDefaultActionprofiles.OCULUS.value, + ["/input/thumbstick/x"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.REVERB_G2.value, + VRDefaultActionprofiles.REVERB_G2.value, + ["/input/thumbstick/x"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE.value, + VRDefaultActionprofiles.VIVE.value, + ["/input/trackpad/x"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_COSMOS.value, + VRDefaultActionprofiles.VIVE_COSMOS.value, + ["/input/thumbstick/x"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_FOCUS.value, + VRDefaultActionprofiles.VIVE_FOCUS.value, + ["/input/thumbstick/x"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.WMR.value, + VRDefaultActionprofiles.WMR.value, + ["/input/thumbstick/x"], + 0.3, + 'POSITIVE', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.FLY_UP.value, + ["/user/hand/right"], + "wm.xr_navigation_fly", + 'MODAL', + False, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.HUAWEI.value, + VRDefaultActionprofiles.HUAWEI.value, + ["/input/trackpad/y"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.INDEX.value, + VRDefaultActionprofiles.INDEX.value, + ["/input/thumbstick/y"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.OCULUS.value, + VRDefaultActionprofiles.OCULUS.value, + ["/input/thumbstick/y"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.REVERB_G2.value, + VRDefaultActionprofiles.REVERB_G2.value, + ["/input/thumbstick/y"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE.value, + VRDefaultActionprofiles.VIVE.value, + ["/input/trackpad/y"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_COSMOS.value, + VRDefaultActionprofiles.VIVE_COSMOS.value, + ["/input/thumbstick/y"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_FOCUS.value, + VRDefaultActionprofiles.VIVE_FOCUS.value, + ["/input/thumbstick/y"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.WMR.value, + VRDefaultActionprofiles.WMR.value, + ["/input/thumbstick/y"], + 0.3, + 'POSITIVE', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.FLY_DOWN.value, + ["/user/hand/right"], + "wm.xr_navigation_fly", + 'MODAL', + False, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.HUAWEI.value, + VRDefaultActionprofiles.HUAWEI.value, + ["/input/trackpad/y"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.INDEX.value, + VRDefaultActionprofiles.INDEX.value, + ["/input/thumbstick/y"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.OCULUS.value, + VRDefaultActionprofiles.OCULUS.value, + ["/input/thumbstick/y"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.REVERB_G2.value, + VRDefaultActionprofiles.REVERB_G2.value, + ["/input/thumbstick/y"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE.value, + VRDefaultActionprofiles.VIVE.value, + ["/input/trackpad/y"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_COSMOS.value, + VRDefaultActionprofiles.VIVE_COSMOS.value, + ["/input/thumbstick/y"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_FOCUS.value, + VRDefaultActionprofiles.VIVE_FOCUS.value, + ["/input/thumbstick/y"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.WMR.value, + VRDefaultActionprofiles.WMR.value, + ["/input/thumbstick/y"], + 0.3, + 'NEGATIVE', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.FLY_TURNLEFT.value, + ["/user/hand/right"], + "wm.xr_navigation_fly", + 'MODAL', + False, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.HUAWEI.value, + VRDefaultActionprofiles.HUAWEI.value, + ["/input/trackpad/x"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.INDEX.value, + VRDefaultActionprofiles.INDEX.value, + ["/input/thumbstick/x"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.OCULUS.value, + VRDefaultActionprofiles.OCULUS.value, + ["/input/thumbstick/x"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.REVERB_G2.value, + VRDefaultActionprofiles.REVERB_G2.value, + ["/input/thumbstick/x"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE.value, + VRDefaultActionprofiles.VIVE.value, + ["/input/trackpad/x"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_COSMOS.value, + VRDefaultActionprofiles.VIVE_COSMOS.value, + ["/input/thumbstick/x"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_FOCUS.value, + VRDefaultActionprofiles.VIVE_FOCUS.value, + ["/input/thumbstick/x"], + 0.3, + 'NEGATIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.WMR.value, + VRDefaultActionprofiles.WMR.value, + ["/input/thumbstick/x"], + 0.3, + 'NEGATIVE', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.FLY_TURNRIGHT.value, + ["/user/hand/right"], + "wm.xr_navigation_fly", + 'MODAL', + False, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.HUAWEI.value, + VRDefaultActionprofiles.HUAWEI.value, + ["/input/trackpad/x"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.INDEX.value, + VRDefaultActionprofiles.INDEX.value, + ["/input/thumbstick/x"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.OCULUS.value, + VRDefaultActionprofiles.OCULUS.value, + ["/input/thumbstick/x"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.REVERB_G2.value, + VRDefaultActionprofiles.REVERB_G2.value, + ["/input/thumbstick/x"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE.value, + VRDefaultActionprofiles.VIVE.value, + ["/input/trackpad/x"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_COSMOS.value, + VRDefaultActionprofiles.VIVE_COSMOS.value, + ["/input/thumbstick/x"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_FOCUS.value, + VRDefaultActionprofiles.VIVE_FOCUS.value, + ["/input/thumbstick/x"], + 0.3, + 'POSITIVE', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.WMR.value, + VRDefaultActionprofiles.WMR.value, + ["/input/thumbstick/x"], + 0.3, + 'POSITIVE', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.NAV_RESET.value, + ["/user/hand/left", + "/user/hand/right"], + "wm.xr_navigation_reset", + 'PRESS', + False, + "haptic", + True, + 0.3, + 3000.0, + 0.5, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.HUAWEI.value, + VRDefaultActionprofiles.HUAWEI.value, + ["/input/back/click", + "/input/back/click"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.INDEX.value, + VRDefaultActionprofiles.INDEX.value, + ["/input/a/click", + "/input/a/click"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.OCULUS.value, + VRDefaultActionprofiles.OCULUS.value, + ["/input/x/click", + "/input/a/click"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.REVERB_G2.value, + VRDefaultActionprofiles.REVERB_G2.value, + ["/input/x/click", + "/input/a/click"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE.value, + VRDefaultActionprofiles.VIVE.value, + ["/input/menu/click", + "/input/menu/click"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_COSMOS.value, + VRDefaultActionprofiles.VIVE_COSMOS.value, + ["/input/x/click", + "/input/a/click"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_FOCUS.value, + VRDefaultActionprofiles.VIVE_FOCUS.value, + ["/input/x/click", + "/input/a/click"], + 0.3, + 'ANY', + 'ANY') + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.WMR.value, + VRDefaultActionprofiles.WMR.value, + ["/input/menu/click", + "/input/menu/click"], + 0.3, + 'ANY', + 'ANY') + + ami = vr_defaults_haptic_action_add(am, + VRDefaultActions.HAPTIC.value, + ["/user/hand/left", + "/user/hand/right"]) + if ami: + vr_defaults_haptic_actionbinding_add(ami, + VRDefaultActionbindings.HUAWEI.value, + VRDefaultActionprofiles.HUAWEI.value, + ["/output/haptic", + "/output/haptic"]) + vr_defaults_haptic_actionbinding_add(ami, + VRDefaultActionbindings.INDEX.value, + VRDefaultActionprofiles.INDEX.value, + ["/output/haptic", + "/output/haptic"]) + vr_defaults_haptic_actionbinding_add(ami, + VRDefaultActionbindings.OCULUS.value, + VRDefaultActionprofiles.OCULUS.value, + ["/output/haptic", + "/output/haptic"]) + vr_defaults_haptic_actionbinding_add(ami, + VRDefaultActionbindings.REVERB_G2.value, + VRDefaultActionprofiles.REVERB_G2.value, + ["/output/haptic", + "/output/haptic"]) + vr_defaults_haptic_actionbinding_add(ami, + VRDefaultActionbindings.SIMPLE.value, + VRDefaultActionprofiles.SIMPLE.value, + ["/output/haptic", + "/output/haptic"]) + vr_defaults_haptic_actionbinding_add(ami, + VRDefaultActionbindings.VIVE.value, + VRDefaultActionprofiles.VIVE.value, + ["/output/haptic", + "/output/haptic"]) + vr_defaults_haptic_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_COSMOS.value, + VRDefaultActionprofiles.VIVE_COSMOS.value, + ["/output/haptic", + "/output/haptic"]) + vr_defaults_haptic_actionbinding_add(ami, + VRDefaultActionbindings.VIVE_FOCUS.value, + VRDefaultActionprofiles.VIVE_FOCUS.value, + ["/output/haptic", + "/output/haptic"]) + vr_defaults_haptic_actionbinding_add(ami, + VRDefaultActionbindings.WMR.value, + VRDefaultActionprofiles.WMR.value, + ["/output/haptic", + "/output/haptic"]) + + +def vr_defaults_create_default_gamepad(session_state): + am = vr_defaults_actionmap_add(session_state, + VRDefaultActionmaps.GAMEPAD.value) + + ami = vr_defaults_action_add(am, + VRDefaultActions.TELEPORT.value, + ["/user/gamepad"], + "wm.xr_navigation_teleport", + 'MODAL', + False, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.GAMEPAD.value, + VRDefaultActionprofiles.GAMEPAD.value, + ["/input/trigger_right/value"], + 0.3, + 'ANY', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.FLY.value, + ["/user/gamepad"], + "wm.xr_navigation_fly", + 'MODAL', + False, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.GAMEPAD.value, + VRDefaultActionprofiles.GAMEPAD.value, + ["/input/trigger_left/value"], + 0.3, + 'ANY', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.FLY_FORWARD.value, + ["/user/gamepad"], + "wm.xr_navigation_fly", + 'MODAL', + False, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.GAMEPAD.value, + VRDefaultActionprofiles.GAMEPAD.value, + ["/input/thumbstick_left/y"], + 0.3, + 'POSITIVE', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.FLY_BACK.value, + ["/user/gamepad"], + "wm.xr_navigation_fly", + 'MODAL', + False, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.GAMEPAD.value, + VRDefaultActionprofiles.GAMEPAD.value, + ["/input/thumbstick_left/y"], + 0.3, + 'NEGATIVE', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.FLY_LEFT.value, + ["/user/gamepad"], + "wm.xr_navigation_fly", + 'MODAL', + False, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.GAMEPAD.value, + VRDefaultActionprofiles.GAMEPAD.value, + ["/input/thumbstick_left/x"], + 0.3, + 'NEGATIVE', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.FLY_RIGHT.value, + ["/user/gamepad"], + "wm.xr_navigation_fly", + 'MODAL', + False, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.GAMEPAD.value, + VRDefaultActionprofiles.GAMEPAD.value, + ["/input/thumbstick_left/x"], + 0.3, + 'POSITIVE', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.FLY_UP.value, + ["/user/gamepad"], + "wm.xr_navigation_fly", + 'MODAL', + False, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.GAMEPAD.value, + VRDefaultActionprofiles.GAMEPAD.value, + ["/input/thumbstick_right/y"], + 0.3, + 'POSITIVE', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.FLY_DOWN.value, + ["/user/gamepad"], + "wm.xr_navigation_fly", + 'MODAL', + False, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.GAMEPAD.value, + VRDefaultActionprofiles.GAMEPAD.value, + ["/input/thumbstick_right/y"], + 0.3, + 'NEGATIVE', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.FLY_TURNLEFT.value, + ["/user/gamepad"], + "wm.xr_navigation_fly", + 'MODAL', + False, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.GAMEPAD.value, + VRDefaultActionprofiles.GAMEPAD.value, + ["/input/thumbstick_right/x"], + 0.3, + 'NEGATIVE', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.FLY_TURNRIGHT.value, + ["/user/gamepad"], + "wm.xr_navigation_fly", + 'MODAL', + False, + "", + False, + 0.0, + 0.0, + 0.0, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.GAMEPAD.value, + VRDefaultActionprofiles.GAMEPAD.value, + ["/input/thumbstick_right/x"], + 0.3, + 'POSITIVE', + 'ANY') + + ami = vr_defaults_action_add(am, + VRDefaultActions.NAV_RESET.value, + ["/user/gamepad"], + "wm.xr_navigation_reset", + 'PRESS', + False, + "haptic_right", + True, + 0.3, + 3000.0, + 0.5, + 'PRESS') + if ami: + vr_defaults_actionbinding_add(ami, + VRDefaultActionbindings.GAMEPAD.value, + VRDefaultActionprofiles.GAMEPAD.value, + ["/input/a/click"], + 0.3, + 'ANY', + 'ANY') + + ami = vr_defaults_haptic_action_add(am, + VRDefaultActions.HAPTIC_LEFT.value, + ["/user/gamepad"]) + if ami: + vr_defaults_haptic_actionbinding_add(ami, + VRDefaultActionbindings.GAMEPAD.value, + VRDefaultActionprofiles.GAMEPAD.value, + ["/output/haptic_left"]) + + ami = vr_defaults_haptic_action_add(am, + VRDefaultActions.HAPTIC_RIGHT.value, + ["/user/gamepad"]) + if ami: + vr_defaults_haptic_actionbinding_add(ami, + VRDefaultActionbindings.GAMEPAD.value, + VRDefaultActionprofiles.GAMEPAD.value, + ["/output/haptic_right"]) + + ami = vr_defaults_haptic_action_add(am, + VRDefaultActions.HAPTIC_LEFTTRIGGER.value, + ["/user/gamepad"]) + if ami: + vr_defaults_haptic_actionbinding_add(ami, + VRDefaultActionbindings.GAMEPAD.value, + VRDefaultActionprofiles.GAMEPAD.value, + ["/output/haptic_left_trigger"]) + + ami = vr_defaults_haptic_action_add(am, + VRDefaultActions.HAPTIC_RIGHTTRIGGER.value, + ["/user/gamepad"]) + if ami: + vr_defaults_haptic_actionbinding_add(ami, + VRDefaultActionbindings.GAMEPAD.value, + VRDefaultActionprofiles.GAMEPAD.value, + ["/output/haptic_right_trigger"]) + + +def vr_get_default_config_path(): + filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), "configs") + return os.path.join(filepath, "default.py") + + +def vr_ensure_default_actionmaps(session_state): + loaded = True + + for name in VRDefaultActionmaps: + if not session_state.actionmaps.find(session_state, name.value): + loaded = False + break + + if loaded: + return loaded + + # Load default action maps. + filepath = vr_get_default_config_path() + + if not os.path.exists(filepath): + # Create and save default action maps. + vr_defaults_create_default(session_state) + vr_defaults_create_default_gamepad(session_state) + + action_map.vr_save_actionmaps(session_state, filepath, sort=False) + + loaded = action_map.vr_load_actionmaps(session_state, filepath) + + return loaded diff --git a/scripts/addons_core/viewport_vr_preview/gui.py b/scripts/addons_core/viewport_vr_preview/gui.py new file mode 100644 index 00000000000..43ebfefd8de --- /dev/null +++ b/scripts/addons_core/viewport_vr_preview/gui.py @@ -0,0 +1,284 @@ +# SPDX-FileCopyrightText: 2021-2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +if "bpy" in locals(): + import importlib + importlib.reload(properties) +else: + from . import properties + +import bpy +from bpy.app.translations import pgettext_iface as iface_ +from bpy.types import ( + Menu, + Panel, + UIList, +) +# Add space_view3d.py to module search path for VIEW3D_PT_object_type_visibility import. +import os.path +import sys +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../startup/bl_ui'))) +from space_view3d import VIEW3D_PT_object_type_visibility + + +# Session. +class VIEW3D_PT_vr_session(Panel): + bl_space_type = 'VIEW_3D' + bl_region_type = 'UI' + bl_category = "VR" + bl_label = "VR Session" + + def draw(self, context): + layout = self.layout + session_settings = context.window_manager.xr_session_settings + scene = context.scene + + layout.use_property_split = True + layout.use_property_decorate = False # No animation. + + is_session_running = bpy.types.XrSessionState.is_running(context) + + # Using SNAP_FACE because it looks like a stop icon -- I shouldn't + # have commit rights... + toggle_info = ((iface_("Start VR Session"), 'PLAY') if not is_session_running + else (iface_("Stop VR Session"), 'SNAP_FACE')) + layout.operator("wm.xr_session_toggle", text=toggle_info[0], + translate=False, icon=toggle_info[1]) + + layout.separator() + + col = layout.column(align=True, heading="Tracking") + col.prop(session_settings, "use_positional_tracking", text="Positional") + col.prop(session_settings, "use_absolute_tracking", text="Absolute") + + col = layout.column(align=True, heading="Actions") + col.prop(scene, "vr_actions_enable") + + +# View. +class VIEW3D_PT_vr_session_view(Panel): + bl_space_type = 'VIEW_3D' + bl_region_type = 'UI' + bl_category = "VR" + bl_label = "View" + + def draw(self, context): + layout = self.layout + session_settings = context.window_manager.xr_session_settings + + layout.use_property_split = True + layout.use_property_decorate = False # No animation. + + col = layout.column(align=True, heading="Show") + col.prop(session_settings, "show_floor", text="Floor") + col.prop(session_settings, "show_annotation", text="Annotations") + + col.prop(session_settings, "show_selection", text="Selection") + col.prop(session_settings, "show_controllers", text="Controllers") + col.prop(session_settings, "show_custom_overlays", text="Custom Overlays") + col.prop(session_settings, "show_object_extras", text="Object Extras") + + col = col.row(align=True, heading=" ") + col.scale_x = 2.0 + col.popover( + panel="VIEW3D_PT_vr_session_view_object_type_visibility", + icon_value=session_settings.icon_from_show_object_viewport, + text="", + ) + + col = layout.column(align=True) + col.prop(session_settings, "controller_draw_style", text="Controller Style") + + col = layout.column(align=True) + col.prop(session_settings, "clip_start", text="Clip Start") + col.prop(session_settings, "clip_end", text="End") + + +class VIEW3D_PT_vr_session_view_object_type_visibility(VIEW3D_PT_object_type_visibility): + def draw(self, context): + session_settings = context.window_manager.xr_session_settings + self.draw_ex(context, session_settings, False) # Pass session settings instead of 3D view. + + +# Landmarks. +class VIEW3D_MT_vr_landmark_menu(Menu): + bl_label = "Landmark Controls" + + def draw(self, _context): + layout = self.layout + + layout.operator("view3d.vr_camera_landmark_from_session") + layout.operator("view3d.vr_landmark_from_camera") + layout.operator("view3d.update_vr_landmark") + layout.separator() + layout.operator("view3d.cursor_to_vr_landmark") + layout.operator("view3d.camera_to_vr_landmark") + layout.operator("view3d.add_camera_from_vr_landmark") + + +class VIEW3D_UL_vr_landmarks(UIList): + def draw_item(self, context, layout, _data, item, icon, _active_data, + _active_propname, index): + landmark = item + landmark_active_idx = context.scene.vr_landmarks_active + + layout.emboss = 'NONE' + + layout.prop(landmark, "name", text="") + + icon = ( + 'RADIOBUT_ON' if (index == landmark_active_idx) else 'RADIOBUT_OFF' + ) + props = layout.operator( + "view3d.vr_landmark_activate", text="", icon=icon) + props.index = index + + +class VIEW3D_PT_vr_landmarks(Panel): + bl_space_type = 'VIEW_3D' + bl_region_type = 'UI' + bl_category = "VR" + bl_label = "Landmarks" + bl_options = {'DEFAULT_CLOSED'} + + def draw(self, context): + layout = self.layout + scene = context.scene + landmark_selected = properties.VRLandmark.get_selected_landmark(context) + + layout.use_property_split = True + layout.use_property_decorate = False # No animation. + + row = layout.row() + + row.template_list("VIEW3D_UL_vr_landmarks", "", scene, "vr_landmarks", + scene, "vr_landmarks_selected", rows=3) + + col = row.column(align=True) + col.operator("view3d.vr_landmark_add", icon='ADD', text="") + col.operator("view3d.vr_landmark_remove", icon='REMOVE', text="") + col.operator("view3d.vr_landmark_from_session", icon='PLUS', text="") + + col.menu("VIEW3D_MT_vr_landmark_menu", icon='DOWNARROW_HLT', text="") + + if landmark_selected: + layout.prop(landmark_selected, "type") + + if landmark_selected.type == 'OBJECT': + layout.prop(landmark_selected, "base_pose_object") + layout.prop(landmark_selected, "base_scale", text="Scale") + elif landmark_selected.type == 'CUSTOM': + layout.prop(landmark_selected, + "base_pose_location", text="Location") + layout.prop(landmark_selected, + "base_pose_angle", text="Angle") + layout.prop(landmark_selected, + "base_scale", text="Scale") + + +# Actions. +class VIEW3D_PT_vr_actionmaps(Panel): + bl_space_type = 'VIEW_3D' + bl_region_type = 'UI' + bl_category = "VR" + bl_label = "Action Maps" + bl_options = {'DEFAULT_CLOSED'} + + def draw(self, context): + layout = self.layout + scene = context.scene + + layout.use_property_split = True + layout.use_property_decorate = False # No animation. + + col = layout.column(align=True) + col.prop(scene, "vr_actions_use_gamepad", text="Gamepad") + + col = layout.column(align=True, heading="Extensions") + col.prop(scene, "vr_actions_enable_reverb_g2", text="HP Reverb G2") + col.prop(scene, "vr_actions_enable_vive_cosmos", text="HTC Vive Cosmos") + col.prop(scene, "vr_actions_enable_vive_focus", text="HTC Vive Focus") + col.prop(scene, "vr_actions_enable_huawei", text="Huawei") + + +# Viewport feedback. +class VIEW3D_PT_vr_viewport_feedback(Panel): + bl_space_type = 'VIEW_3D' + bl_region_type = 'UI' + bl_category = "VR" + bl_label = "Viewport Feedback" + bl_options = {'DEFAULT_CLOSED'} + + def draw(self, context): + layout = self.layout + scene = context.scene + view3d = context.space_data + session_settings = context.window_manager.xr_session_settings + + col = layout.column(align=True) + col.label(icon='ERROR', text="Note:") + col.label(text="Settings here may have a significant") + col.label(text="performance impact!") + + layout.separator() + + layout.prop(view3d.shading, "vr_show_virtual_camera") + layout.prop(view3d.shading, "vr_show_controllers") + layout.prop(view3d.shading, "vr_show_landmarks") + layout.prop(view3d, "mirror_xr_session") + + +# Info. +class VIEW3D_PT_vr_info(bpy.types.Panel): + bl_space_type = 'VIEW_3D' + bl_region_type = 'UI' + bl_category = "VR" + bl_label = "VR Info" + + @classmethod + def poll(cls, context): + return not bpy.app.build_options.xr_openxr + + def draw(self, context): + layout = self.layout + layout.label(icon='ERROR', text="Built without VR/OpenXR features") + + +classes = ( + VIEW3D_PT_vr_session, + VIEW3D_PT_vr_session_view, + VIEW3D_PT_vr_session_view_object_type_visibility, + VIEW3D_PT_vr_landmarks, + VIEW3D_PT_vr_actionmaps, + VIEW3D_PT_vr_viewport_feedback, + + VIEW3D_UL_vr_landmarks, + VIEW3D_MT_vr_landmark_menu, +) + + +def register(): + for cls in classes: + bpy.utils.register_class(cls) + + # View3DShading is the only per 3D-View struct with custom property + # support, so "abusing" that to get a per 3D-View option. + bpy.types.View3DShading.vr_show_virtual_camera = bpy.props.BoolProperty( + name="Show VR Camera" + ) + bpy.types.View3DShading.vr_show_controllers = bpy.props.BoolProperty( + name="Show VR Controllers" + ) + bpy.types.View3DShading.vr_show_landmarks = bpy.props.BoolProperty( + name="Show Landmarks" + ) + + +def unregister(): + for cls in classes: + bpy.utils.unregister_class(cls) + + del bpy.types.View3DShading.vr_show_virtual_camera + del bpy.types.View3DShading.vr_show_controllers + del bpy.types.View3DShading.vr_show_landmarks diff --git a/scripts/addons_core/viewport_vr_preview/operators.py b/scripts/addons_core/viewport_vr_preview/operators.py new file mode 100644 index 00000000000..34c7ebd3758 --- /dev/null +++ b/scripts/addons_core/viewport_vr_preview/operators.py @@ -0,0 +1,540 @@ +# SPDX-FileCopyrightText: 2021-2023 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +if "bpy" in locals(): + import importlib + importlib.reload(properties) +else: + from . import properties + +import bpy +import gpu +from bpy.app.translations import pgettext_data as data_ +from bpy.types import ( + Gizmo, + GizmoGroup, + Operator, +) +import math +from math import radians +from mathutils import Euler, Matrix, Quaternion, Vector + + +# Landmarks. +class VIEW3D_OT_vr_landmark_add(Operator): + bl_idname = "view3d.vr_landmark_add" + bl_label = "Add VR Landmark" + bl_description = "Add a new VR landmark to the list and select it" + bl_options = {'UNDO', 'REGISTER'} + + def execute(self, context): + scene = context.scene + landmarks = scene.vr_landmarks + + landmarks.add() + + # select newly created set + scene.vr_landmarks_selected = len(landmarks) - 1 + + return {'FINISHED'} + + +class VIEW3D_OT_vr_landmark_from_camera(Operator): + bl_idname = "view3d.vr_landmark_from_camera" + bl_label = "Add VR Landmark from Camera" + bl_description = "Add a new VR landmark from the active camera object to the list and select it" + bl_options = {'UNDO', 'REGISTER'} + + @classmethod + def poll(cls, context): + cam_selected = False + + vl_objects = bpy.context.view_layer.objects + if vl_objects.active and vl_objects.active.type == 'CAMERA': + cam_selected = True + return cam_selected + + def execute(self, context): + scene = context.scene + landmarks = scene.vr_landmarks + cam = context.view_layer.objects.active + lm = landmarks.add() + lm.type = 'OBJECT' + lm.base_pose_object = cam + lm.name = "LM_" + cam.name + + # select newly created set + scene.vr_landmarks_selected = len(landmarks) - 1 + + return {'FINISHED'} + + +class VIEW3D_OT_vr_landmark_from_session(Operator): + bl_idname = "view3d.vr_landmark_from_session" + bl_label = "Add VR Landmark from Session" + bl_description = "Add VR landmark from the viewer pose of the running VR session to the list and select it" + bl_options = {'UNDO', 'REGISTER'} + + @classmethod + def poll(cls, context): + return bpy.types.XrSessionState.is_running(context) + + def execute(self, context): + scene = context.scene + landmarks = scene.vr_landmarks + wm = context.window_manager + + lm = landmarks.add() + lm.type = "CUSTOM" + scene.vr_landmarks_selected = len(landmarks) - 1 + + loc = wm.xr_session_state.viewer_pose_location + rot = wm.xr_session_state.viewer_pose_rotation.to_euler() + + lm.base_pose_location = loc + lm.base_pose_angle = rot[2] + + return {'FINISHED'} + + +class VIEW3D_OT_vr_camera_landmark_from_session(Operator): + bl_idname = "view3d.vr_camera_landmark_from_session" + bl_label = "Add Camera and VR Landmark from Session" + bl_description = "Create a new Camera and VR Landmark from the viewer pose of the running VR session and select it" + bl_options = {'UNDO', 'REGISTER'} + + @classmethod + def poll(cls, context): + return bpy.types.XrSessionState.is_running(context) + + def execute(self, context): + scene = context.scene + landmarks = scene.vr_landmarks + wm = context.window_manager + + lm = landmarks.add() + lm.type = 'OBJECT' + scene.vr_landmarks_selected = len(landmarks) - 1 + + loc = wm.xr_session_state.viewer_pose_location + rot = wm.xr_session_state.viewer_pose_rotation.to_euler() + + cam = bpy.data.cameras.new(data_("Camera") + "_" + lm.name) + new_cam = bpy.data.objects.new(data_("Camera") + "_" + lm.name, cam) + scene.collection.objects.link(new_cam) + new_cam.location = loc + new_cam.rotation_euler = rot + + lm.base_pose_object = new_cam + + return {'FINISHED'} + + +class VIEW3D_OT_update_vr_landmark(Operator): + bl_idname = "view3d.update_vr_landmark" + bl_label = "Update Custom VR Landmark" + bl_description = "Update the selected landmark from the current viewer pose in the VR session" + bl_options = {'UNDO', 'REGISTER'} + + @classmethod + def poll(cls, context): + selected_landmark = properties.VRLandmark.get_selected_landmark(context) + return bpy.types.XrSessionState.is_running(context) and selected_landmark.type == 'CUSTOM' + + def execute(self, context): + wm = context.window_manager + + lm = properties.VRLandmark.get_selected_landmark(context) + + loc = wm.xr_session_state.viewer_pose_location + rot = wm.xr_session_state.viewer_pose_rotation.to_euler() + + lm.base_pose_location = loc + lm.base_pose_angle = rot + + # Re-activate the landmark to trigger viewer reset and flush landmark settings to the session settings. + properties.vr_landmark_active_update(None, context) + + return {'FINISHED'} + + +class VIEW3D_OT_vr_landmark_remove(Operator): + bl_idname = "view3d.vr_landmark_remove" + bl_label = "Remove VR Landmark" + bl_description = "Delete the selected VR landmark from the list" + bl_options = {'UNDO', 'REGISTER'} + + def execute(self, context): + scene = context.scene + landmarks = scene.vr_landmarks + + if len(landmarks) > 1: + landmark_selected_idx = scene.vr_landmarks_selected + landmarks.remove(landmark_selected_idx) + + scene.vr_landmarks_selected -= 1 + + return {'FINISHED'} + + +class VIEW3D_OT_cursor_to_vr_landmark(Operator): + bl_idname = "view3d.cursor_to_vr_landmark" + bl_label = "Cursor to VR Landmark" + bl_description = "Move the 3D Cursor to the selected VR Landmark" + bl_options = {'UNDO', 'REGISTER'} + + @classmethod + def poll(cls, context): + lm = properties.VRLandmark.get_selected_landmark(context) + if lm.type == 'SCENE_CAMERA': + return context.scene.camera is not None + elif lm.type == 'OBJECT': + return lm.base_pose_object is not None + + return True + + def execute(self, context): + scene = context.scene + lm = properties.VRLandmark.get_selected_landmark(context) + if lm.type == 'SCENE_CAMERA': + lm_pos = scene.camera.location + elif lm.type == 'OBJECT': + lm_pos = lm.base_pose_object.location + else: + lm_pos = lm.base_pose_location + scene.cursor.location = lm_pos + + return{'FINISHED'} + + +class VIEW3D_OT_add_camera_from_vr_landmark(Operator): + bl_idname = "view3d.add_camera_from_vr_landmark" + bl_label = "New Camera from VR Landmark" + bl_description = "Create a new Camera from the selected VR Landmark" + bl_options = {'UNDO', 'REGISTER'} + + def execute(self, context): + scene = context.scene + lm = properties.VRLandmark.get_selected_landmark(context) + + cam = bpy.data.cameras.new(data_("Camera") + "_" + lm.name) + new_cam = bpy.data.objects.new(data_("Camera") + "_" + lm.name, cam) + scene.collection.objects.link(new_cam) + angle = lm.base_pose_angle + new_cam.location = lm.base_pose_location + new_cam.rotation_euler = (math.pi / 2, 0, angle) + + return {'FINISHED'} + + +class VIEW3D_OT_camera_to_vr_landmark(Operator): + bl_idname = "view3d.camera_to_vr_landmark" + bl_label = "Scene Camera to VR Landmark" + bl_description = "Position the scene camera at the selected landmark" + bl_options = {'UNDO', 'REGISTER'} + + @classmethod + def poll(cls, context): + return context.scene.camera is not None + + def execute(self, context): + scene = context.scene + lm = properties.VRLandmark.get_selected_landmark(context) + + cam = scene.camera + angle = lm.base_pose_angle + cam.location = lm.base_pose_location + cam.rotation_euler = (math.pi / 2, 0, angle) + + return {'FINISHED'} + + +class VIEW3D_OT_vr_landmark_activate(Operator): + bl_idname = "view3d.vr_landmark_activate" + bl_label = "Activate VR Landmark" + bl_description = "Change to the selected VR landmark from the list" + bl_options = {'UNDO', 'REGISTER'} + + index: bpy.props.IntProperty( + name="Index", + options={'HIDDEN'}, + ) + + def execute(self, context): + scene = context.scene + + if self.index >= len(scene.vr_landmarks): + return {'CANCELLED'} + + scene.vr_landmarks_active = ( + self.index if self.properties.is_property_set( + "index") else scene.vr_landmarks_selected + ) + + return {'FINISHED'} + + +# Gizmos. +class VIEW3D_GT_vr_camera_cone(Gizmo): + bl_idname = "VIEW_3D_GT_vr_camera_cone" + + aspect = 1.0, 1.0 + + def draw(self, context): + if not hasattr(self, "frame_shape"): + aspect = self.aspect + + frame_shape_verts = ( + (-aspect[0], -aspect[1], -1.0), + (aspect[0], -aspect[1], -1.0), + (aspect[0], aspect[1], -1.0), + (-aspect[0], aspect[1], -1.0), + ) + lines_shape_verts = ( + (0.0, 0.0, 0.0), + frame_shape_verts[0], + (0.0, 0.0, 0.0), + frame_shape_verts[1], + (0.0, 0.0, 0.0), + frame_shape_verts[2], + (0.0, 0.0, 0.0), + frame_shape_verts[3], + ) + + self.frame_shape = self.new_custom_shape( + 'LINE_LOOP', frame_shape_verts) + self.lines_shape = self.new_custom_shape( + 'LINES', lines_shape_verts) + + # Ensure correct GL state (otherwise other gizmos might mess that up) + gpu.state.line_width_set(1.0) + gpu.state.blend_set('ALPHA') + + self.draw_custom_shape(self.frame_shape) + self.draw_custom_shape(self.lines_shape) + + +class VIEW3D_GT_vr_controller_grip(Gizmo): + bl_idname = "VIEW_3D_GT_vr_controller_grip" + + def draw(self, context): + gpu.state.line_width_set(1.0) + gpu.state.blend_set('ALPHA') + + self.color = 0.422, 0.438, 0.446 + self.draw_preset_circle(self.matrix_basis, axis='POS_X') + self.draw_preset_circle(self.matrix_basis, axis='POS_Y') + self.draw_preset_circle(self.matrix_basis, axis='POS_Z') + + +class VIEW3D_GT_vr_controller_aim(Gizmo): + bl_idname = "VIEW_3D_GT_vr_controller_aim" + + def draw(self, context): + gpu.state.line_width_set(1.0) + gpu.state.blend_set('ALPHA') + + self.color = 1.0, 0.2, 0.322 + self.draw_preset_arrow(self.matrix_basis, axis='POS_X') + self.color = 0.545, 0.863, 0.0 + self.draw_preset_arrow(self.matrix_basis, axis='POS_Y') + self.color = 0.157, 0.565, 1.0 + self.draw_preset_arrow(self.matrix_basis, axis='POS_Z') + + +class VIEW3D_GGT_vr_viewer_pose(GizmoGroup): + bl_idname = "VIEW3D_GGT_vr_viewer_pose" + bl_label = "VR Viewer Pose Indicator" + bl_space_type = 'VIEW_3D' + bl_region_type = 'WINDOW' + bl_options = {'3D', 'PERSISTENT', 'SCALE', 'VR_REDRAWS'} + + @classmethod + def poll(cls, context): + view3d = context.space_data + return ( + view3d.shading.vr_show_virtual_camera and + bpy.types.XrSessionState.is_running(context) and + not view3d.mirror_xr_session + ) + + @staticmethod + def _get_viewer_pose_matrix(context): + wm = context.window_manager + + loc = wm.xr_session_state.viewer_pose_location + rot = wm.xr_session_state.viewer_pose_rotation + + rotmat = Matrix.Identity(3) + rotmat.rotate(rot) + rotmat.resize_4x4() + transmat = Matrix.Translation(loc) + + return transmat @ rotmat + + def setup(self, context): + gizmo = self.gizmos.new(VIEW3D_GT_vr_camera_cone.bl_idname) + gizmo.aspect = 1 / 3, 1 / 4 + + gizmo.color = gizmo.color_highlight = 0.2, 0.6, 1.0 + gizmo.alpha = 1.0 + + self.gizmo = gizmo + + def draw_prepare(self, context): + self.gizmo.matrix_basis = self._get_viewer_pose_matrix(context) + + +class VIEW3D_GGT_vr_controller_poses(GizmoGroup): + bl_idname = "VIEW3D_GGT_vr_controller_poses" + bl_label = "VR Controller Poses Indicator" + bl_space_type = 'VIEW_3D' + bl_region_type = 'WINDOW' + bl_options = {'3D', 'PERSISTENT', 'SCALE', 'VR_REDRAWS'} + + @classmethod + def poll(cls, context): + view3d = context.space_data + return ( + view3d.shading.vr_show_controllers and + bpy.types.XrSessionState.is_running(context) and + not view3d.mirror_xr_session + ) + + @staticmethod + def _get_controller_pose_matrix(context, idx, is_grip, scale): + wm = context.window_manager + + loc = None + rot = None + if is_grip: + loc = wm.xr_session_state.controller_grip_location_get(context, idx) + rot = wm.xr_session_state.controller_grip_rotation_get(context, idx) + else: + loc = wm.xr_session_state.controller_aim_location_get(context, idx) + rot = wm.xr_session_state.controller_aim_rotation_get(context, idx) + + rotmat = Matrix.Identity(3) + rotmat.rotate(Quaternion(Vector(rot))) + rotmat.resize_4x4() + transmat = Matrix.Translation(loc) + scalemat = Matrix.Scale(scale, 4) + + return transmat @ rotmat @ scalemat + + def setup(self, context): + for idx in range(2): + self.gizmos.new(VIEW3D_GT_vr_controller_grip.bl_idname) + self.gizmos.new(VIEW3D_GT_vr_controller_aim.bl_idname) + + for gizmo in self.gizmos: + gizmo.aspect = 1 / 3, 1 / 4 + gizmo.color_highlight = 1.0, 1.0, 1.0 + gizmo.alpha = 1.0 + + def draw_prepare(self, context): + grip_idx = 0 + aim_idx = 0 + idx = 0 + scale = 1.0 + for gizmo in self.gizmos: + is_grip = (gizmo.bl_idname == VIEW3D_GT_vr_controller_grip.bl_idname) + if (is_grip): + idx = grip_idx + grip_idx += 1 + scale = 0.1 + else: + idx = aim_idx + aim_idx += 1 + scale = 0.5 + gizmo.matrix_basis = self._get_controller_pose_matrix(context, idx, is_grip, scale) + + +class VIEW3D_GGT_vr_landmarks(GizmoGroup): + bl_idname = "VIEW3D_GGT_vr_landmarks" + bl_label = "VR Landmark Indicators" + bl_space_type = 'VIEW_3D' + bl_region_type = 'WINDOW' + bl_options = {'3D', 'PERSISTENT', 'SCALE'} + + @classmethod + def poll(cls, context): + view3d = context.space_data + return ( + view3d.shading.vr_show_landmarks + ) + + def setup(self, context): + pass + + def draw_prepare(self, context): + # first delete the old gizmos + for g in self.gizmos: + self.gizmos.remove(g) + + scene = context.scene + landmarks = scene.vr_landmarks + + for lm in landmarks: + if ((lm.type == 'SCENE_CAMERA' and not scene.camera) or + (lm.type == 'OBJECT' and not lm.base_pose_object)): + continue + + gizmo = self.gizmos.new(VIEW3D_GT_vr_camera_cone.bl_idname) + gizmo.aspect = 1 / 3, 1 / 4 + + gizmo.color = gizmo.color_highlight = 0.2, 1.0, 0.6 + gizmo.alpha = 1.0 + + self.gizmo = gizmo + + if lm.type == 'SCENE_CAMERA': + cam = scene.camera + lm_mat = cam.matrix_world if cam else Matrix.Identity(4) + elif lm.type == 'OBJECT': + lm_mat = lm.base_pose_object.matrix_world + else: + angle = lm.base_pose_angle + raw_rot = Euler((radians(90.0), 0, angle)) + + rotmat = Matrix.Identity(3) + rotmat.rotate(raw_rot) + rotmat.resize_4x4() + + transmat = Matrix.Translation(lm.base_pose_location) + + lm_mat = transmat @ rotmat + + self.gizmo.matrix_basis = lm_mat + + +classes = ( + VIEW3D_OT_vr_landmark_add, + VIEW3D_OT_vr_landmark_remove, + VIEW3D_OT_vr_landmark_activate, + VIEW3D_OT_vr_landmark_from_session, + VIEW3D_OT_vr_camera_landmark_from_session, + VIEW3D_OT_add_camera_from_vr_landmark, + VIEW3D_OT_camera_to_vr_landmark, + VIEW3D_OT_vr_landmark_from_camera, + VIEW3D_OT_cursor_to_vr_landmark, + VIEW3D_OT_update_vr_landmark, + + VIEW3D_GT_vr_camera_cone, + VIEW3D_GT_vr_controller_grip, + VIEW3D_GT_vr_controller_aim, + VIEW3D_GGT_vr_viewer_pose, + VIEW3D_GGT_vr_controller_poses, + VIEW3D_GGT_vr_landmarks, +) + + +def register(): + for cls in classes: + bpy.utils.register_class(cls) + + +def unregister(): + for cls in classes: + bpy.utils.unregister_class(cls) diff --git a/scripts/addons_core/viewport_vr_preview/properties.py b/scripts/addons_core/viewport_vr_preview/properties.py new file mode 100644 index 00000000000..a08b4f15395 --- /dev/null +++ b/scripts/addons_core/viewport_vr_preview/properties.py @@ -0,0 +1,228 @@ +# SPDX-FileCopyrightText: 2021-2022 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import bpy +from bpy.types import ( + PropertyGroup, +) +from bpy.app.handlers import persistent + + +# Landmarks. +@persistent +def vr_ensure_default_landmark(context: bpy.context): + # Ensure there's a default landmark (scene camera by default). + landmarks = bpy.context.scene.vr_landmarks + if not landmarks: + landmarks.add() + landmarks[0].type = 'SCENE_CAMERA' + + +def vr_landmark_active_type_update(self, context): + wm = context.window_manager + session_settings = wm.xr_session_settings + landmark_active = VRLandmark.get_active_landmark(context) + + # Update session's base pose type to the matching type. + if landmark_active.type == 'SCENE_CAMERA': + session_settings.base_pose_type = 'SCENE_CAMERA' + elif landmark_active.type == 'OBJECT': + session_settings.base_pose_type = 'OBJECT' + elif landmark_active.type == 'CUSTOM': + session_settings.base_pose_type = 'CUSTOM' + + +def vr_landmark_active_base_pose_object_update(self, context): + session_settings = context.window_manager.xr_session_settings + landmark_active = VRLandmark.get_active_landmark(context) + + # Update the anchor object to the (new) camera of this landmark. + session_settings.base_pose_object = landmark_active.base_pose_object + + +def vr_landmark_active_base_pose_location_update(self, context): + session_settings = context.window_manager.xr_session_settings + landmark_active = VRLandmark.get_active_landmark(context) + + session_settings.base_pose_location = landmark_active.base_pose_location + + +def vr_landmark_active_base_pose_angle_update(self, context): + session_settings = context.window_manager.xr_session_settings + landmark_active = VRLandmark.get_active_landmark(context) + + session_settings.base_pose_angle = landmark_active.base_pose_angle + + +def vr_landmark_active_base_scale_update(self, context): + session_settings = context.window_manager.xr_session_settings + landmark_active = VRLandmark.get_active_landmark(context) + + session_settings.base_scale = landmark_active.base_scale + + +def vr_landmark_type_update(self, context): + landmark_selected = VRLandmark.get_selected_landmark(context) + landmark_active = VRLandmark.get_active_landmark(context) + + # Don't allow non-trivial base scale for scene camera landmarks. + if landmark_selected.type == 'SCENE_CAMERA': + landmark_selected.base_scale = 1.0 + + # Only update session settings data if the changed landmark is actually + # the active one. + if landmark_active == landmark_selected: + vr_landmark_active_type_update(self, context) + + +def vr_landmark_base_pose_object_update(self, context): + landmark_selected = VRLandmark.get_selected_landmark(context) + landmark_active = VRLandmark.get_active_landmark(context) + + # Only update session settings data if the changed landmark is actually + # the active one. + if landmark_active == landmark_selected: + vr_landmark_active_base_pose_object_update(self, context) + + +def vr_landmark_base_pose_location_update(self, context): + landmark_selected = VRLandmark.get_selected_landmark(context) + landmark_active = VRLandmark.get_active_landmark(context) + + # Only update session settings data if the changed landmark is actually + # the active one. + if landmark_active == landmark_selected: + vr_landmark_active_base_pose_location_update(self, context) + + +def vr_landmark_base_pose_angle_update(self, context): + landmark_selected = VRLandmark.get_selected_landmark(context) + landmark_active = VRLandmark.get_active_landmark(context) + + # Only update session settings data if the changed landmark is actually + # the active one. + if landmark_active == landmark_selected: + vr_landmark_active_base_pose_angle_update(self, context) + + +def vr_landmark_base_scale_update(self, context): + landmark_selected = VRLandmark.get_selected_landmark(context) + landmark_active = VRLandmark.get_active_landmark(context) + + # Only update session settings data if the changed landmark is actually + # the active one. + if landmark_active == landmark_selected: + vr_landmark_active_base_scale_update(self, context) + + +def vr_landmark_active_update(self, context): + wm = context.window_manager + + vr_landmark_active_type_update(self, context) + vr_landmark_active_base_pose_object_update(self, context) + vr_landmark_active_base_pose_location_update(self, context) + vr_landmark_active_base_pose_angle_update(self, context) + vr_landmark_active_base_scale_update(self, context) + + if wm.xr_session_state: + wm.xr_session_state.reset_to_base_pose(context) + + +class VRLandmark(PropertyGroup): + name: bpy.props.StringProperty( + name="VR Landmark", + default="Landmark" + ) + type: bpy.props.EnumProperty( + name="Type", + items=[ + ('SCENE_CAMERA', "Scene Camera", + "Use scene's currently active camera to define the VR view base " + "location and rotation"), + ('OBJECT', "Custom Object", + "Use an existing object to define the VR view base location and " + "rotation"), + ('CUSTOM', "Custom Pose", + "Allow a manually defined position and rotation to be used as " + "the VR view base pose"), + ], + default='SCENE_CAMERA', + update=vr_landmark_type_update, + ) + base_pose_object: bpy.props.PointerProperty( + name="Object", + type=bpy.types.Object, + update=vr_landmark_base_pose_object_update, + ) + base_pose_location: bpy.props.FloatVectorProperty( + name="Base Pose Location", + subtype='TRANSLATION', + update=vr_landmark_base_pose_location_update, + ) + base_pose_angle: bpy.props.FloatProperty( + name="Base Pose Angle", + subtype='ANGLE', + update=vr_landmark_base_pose_angle_update, + ) + base_scale: bpy.props.FloatProperty( + name="Base Scale", + description="Viewer reference scale associated with this landmark", + default=1.0, + min=0.000001, + update=vr_landmark_base_scale_update, + ) + + @staticmethod + def get_selected_landmark(context): + scene = context.scene + landmarks = scene.vr_landmarks + + return ( + None if (len(landmarks) < + 1) else landmarks[scene.vr_landmarks_selected] + ) + + @staticmethod + def get_active_landmark(context): + scene = context.scene + landmarks = scene.vr_landmarks + + return ( + None if (len(landmarks) < + 1) else landmarks[scene.vr_landmarks_active] + ) + + +classes = ( + VRLandmark, +) + + +def register(): + for cls in classes: + bpy.utils.register_class(cls) + + bpy.types.Scene.vr_landmarks = bpy.props.CollectionProperty( + name="Landmark", + type=VRLandmark, + ) + bpy.types.Scene.vr_landmarks_selected = bpy.props.IntProperty( + name="Selected Landmark" + ) + bpy.types.Scene.vr_landmarks_active = bpy.props.IntProperty( + update=vr_landmark_active_update, + ) + + bpy.app.handlers.load_post.append(vr_ensure_default_landmark) + + +def unregister(): + for cls in classes: + bpy.utils.unregister_class(cls) + + del bpy.types.Scene.vr_landmarks + del bpy.types.Scene.vr_landmarks_selected + del bpy.types.Scene.vr_landmarks_active + + bpy.app.handlers.load_post.remove(vr_ensure_default_landmark) diff --git a/scripts/addons_core/viewport_vr_preview/versioning.py b/scripts/addons_core/viewport_vr_preview/versioning.py new file mode 100644 index 00000000000..dadf247fa52 --- /dev/null +++ b/scripts/addons_core/viewport_vr_preview/versioning.py @@ -0,0 +1,38 @@ +# SPDX-FileCopyrightText: 2021-2022 Blender Foundation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +# Update Blender version this action map was written in: +# +# When the version is ``(0, 0, 0)``, the action map being loaded didn't contain any versioning information. +# This will older than ``(3, 0, 0)``. + +def actionconfig_update(actionconfig_data, actionconfig_version): + from bpy.app import version_file as blender_version + if actionconfig_version >= blender_version: + return actionconfig_data + +# Version the action map. +## import copy +## has_copy = False +## +# if actionconfig_version <= (3, 0, 0): +# Only copy once. +# if not has_copy: +## actionconfig_data = copy.deepcopy(actionconfig_data) +## has_copy = True +## +# for (am_name, am_content) in actionconfig_data: +# Apply action map updates. +## +## am_items = am_content["items"] +## +# for (ami_name, ami_args, ami_data, ami_content) in am_items +# Apply action map item updates. +## +## ami_bindings = ami_content["bindings"] +## +# for (amb_name, amb_args) in ami_bindings: +# Apply action map binding updates. + + return actionconfig_data