Files
test/source/blender/editors/space_view3d/space_view3d.cc

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

2293 lines
74 KiB
C++
Raw Normal View History

/* SPDX-FileCopyrightText: 2008 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup spview3d
2011-02-27 20:29:51 +00:00
*/
/* Allow using deprecated functionality for .blend file I/O. */
#define DNA_DEPRECATED_ALLOW
#include <cstring>
#include "AS_asset_representation.hh"
#include "DNA_collection_types.h"
#include "DNA_defaults.h"
#include "DNA_gpencil_legacy_types.h"
#include "DNA_lightprobe_types.h"
#include "DNA_object_types.h"
#include "DNA_scene_types.h"
#include "DNA_view3d_types.h"
#include "MEM_guardedalloc.h"
#include "BLI_listbase.h"
#include "BLI_math_matrix.h"
#include "BLI_math_vector.h"
#include "BLI_math_vector.hh"
#include "BLI_string.h"
#include "BLI_utildefines.h"
#include "BLT_translation.hh"
#include "BKE_asset.hh"
#include "BKE_context.hh"
#include "BKE_global.hh"
#include "BKE_gpencil_legacy.h"
2024-03-26 12:57:30 -04:00
#include "BKE_idprop.hh"
2024-01-23 15:18:09 -05:00
#include "BKE_layer.hh"
2024-01-15 12:44:04 -05:00
#include "BKE_lib_id.hh"
#include "BKE_lib_query.hh"
#include "BKE_lib_remap.hh"
#include "BKE_library.hh"
#include "BKE_main.hh"
#include "BKE_object.hh"
#include "BKE_scene.hh"
#include "BKE_screen.hh"
#include "BKE_viewer_path.hh"
#include "ED_asset_shelf.hh"
#include "ED_geometry.hh"
#include "ED_object.hh"
#include "ED_outliner.hh"
#include "ED_render.hh"
#include "ED_screen.hh"
#include "ED_space_api.hh"
#include "ED_transform.hh"
#include "ED_undo.hh"
#include "GPU_matrix.hh"
2024-01-05 11:16:57 -05:00
#include "DRW_engine.hh"
#include "WM_api.hh"
#include "WM_message.hh"
#include "WM_toolsystem.hh"
#include "WM_types.hh"
#include "RNA_access.hh"
#include "UI_interface.hh"
#include "UI_resources.hh"
#include "BLO_read_write.hh"
#ifdef WITH_PYTHON
# include "BPY_extern.hh"
#endif
#include "DEG_depsgraph.hh"
#include "DEG_depsgraph_build.hh"
Depsgraph: New dependency graph integration commit This commit integrates the work done so far on the new dependency graph system, where goal was to replace legacy depsgraph with the new one, supporting loads of neat features like: - More granular dependency relation nature, which solves issues with fake cycles in the dependencies. - Move towards all-animatable, by better integration of drivers into the system. - Lay down some basis for upcoming copy-on-write, overrides and so on. The new system is living side-by-side with the previous one and disabled by default, so nothing will become suddenly broken. The way to enable new depsgraph is to pass `--new-depsgraph` command line argument. It's a bit early to consider the system production-ready, there are some TODOs and issues were discovered during the merge period, they'll be addressed ASAP. But it's important to merge, because it's the only way to attract artists to really start testing this system. There are number of assorted documents related on the design of the new system: * http://wiki.blender.org/index.php/User:Aligorith/GSoC2013_Depsgraph#Design_Documents * http://wiki.blender.org/index.php/User:Nazg-gul/DependencyGraph There are also some user-related information online: * http://code.blender.org/2015/02/blender-dependency-graph-branch-for-users/ * http://code.blender.org/2015/03/more-dependency-graph-tricks/ Kudos to everyone who was involved into the project: - Joshua "Aligorith" Leung -- design specification, initial code - Lukas "lukas_t" Toenne -- integrating code into blender, with further fixes - Sergey "Sergey" "Sharybin" -- some mocking around, trying to wrap up the project and so - Bassam "slikdigit" Kurdali -- stressing the new system, reporting all the issues and recording/writing documentation. - Everyone else who i forgot to mention here :)
2015-05-12 15:05:57 +05:00
#include "view3d_intern.hh" /* own include */
#include "view3d_navigate.hh"
/* ******************** manage regions ********************* */
bool ED_view3d_area_user_region(const ScrArea *area, const View3D *v3d, ARegion **r_region)
VR: Initial Virtual Reality support - Milestone 1, Scene Inspection NOTE: While most of the milestone 1 goals are there, a few smaller features and improvements are still to be done. Big picture of this milestone: Initial, OpenXR-based virtual reality support for users and foundation for advanced use cases. Maniphest Task: https://developer.blender.org/T71347 The tasks contains more information about this milestone. To be clear: This is not a feature rich VR implementation, it's focused on the initial scene inspection use case. We intentionally focused on that, further features like controller support are part of the next milestone. - How to use? Instructions on how to use this are here: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/How_to_Test These will be updated and moved to a more official place (likely the manual) soon. Currently Windows Mixed Reality and Oculus devices are usable. Valve/HTC headsets don't support the OpenXR standard yet and hence, do not work with this implementation. --------------- This is the C-side implementation of the features added for initial VR support as per milestone 1. A "VR Scene Inspection" Add-on will be committed separately, to expose the VR functionality in the UI. It also adds some further features for milestone 1, namely a landmarking system (stored view locations in the VR space) Main additions/features: * Support for rendering viewports to an HMD, with good performance. * Option to sync the VR view perspective with a fully interactive, regular 3D View (VR-Mirror). * Option to disable positional tracking. Keeps the current position (calculated based on the VR eye center pose) when enabled while a VR session is running. * Some regular viewport settings for the VR view * RNA/Python-API to query and set VR session state information. * WM-XR: Layer tying Ghost-XR to the Blender specific APIs/data * wmSurface API: drawable, non-window container (manages Ghost-OpenGL and GPU context) * DNA/RNA for management of VR session settings * `--debug-xr` and `--debug-xr-time` commandline options * Utility batch & config file for using the Oculus runtime on Windows. * Most VR data is runtime only. The exception is user settings which are saved to files (`XrSessionSettings`). * VR support can be disabled through the `WITH_XR_OPENXR` compiler flag. For architecture and code documentation, see https://wiki.blender.org/wiki/Source/Interface/XR. --------------- A few thank you's: * A huge shoutout to Ray Molenkamp for his help during the project - it would have not been that successful without him! * Sebastian Koenig and Simeon Conzendorf for testing and feedback! * The reviewers, especially Brecht Van Lommel! * Dalai Felinto for pushing and managing me to get this done ;) * The OpenXR working group for providing an open standard. I think we're the first bigger application to adopt OpenXR. Congratulations to them and ourselves :) This project started as a Google Summer of Code 2019 project - "Core Support of Virtual Reality Headsets through OpenXR" (see https://wiki.blender.org/wiki/User:Severin/GSoC-2019/). Some further information, including ideas for further improvements can be found in the final GSoC report: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/Final_Report Differential Revisions: D6193, D7098 Reviewed by: Brecht Van Lommel, Jeroen Bakker
2020-03-17 20:20:55 +01:00
{
2022-09-23 16:51:29 +02:00
RegionView3D *rv3d = nullptr;
ARegion *region_unlock_user = nullptr;
ARegion *region_unlock = nullptr;
const ListBase *region_list = (v3d == area->spacedata.first) ? &area->regionbase :
&v3d->regionbase;
VR: Initial Virtual Reality support - Milestone 1, Scene Inspection NOTE: While most of the milestone 1 goals are there, a few smaller features and improvements are still to be done. Big picture of this milestone: Initial, OpenXR-based virtual reality support for users and foundation for advanced use cases. Maniphest Task: https://developer.blender.org/T71347 The tasks contains more information about this milestone. To be clear: This is not a feature rich VR implementation, it's focused on the initial scene inspection use case. We intentionally focused on that, further features like controller support are part of the next milestone. - How to use? Instructions on how to use this are here: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/How_to_Test These will be updated and moved to a more official place (likely the manual) soon. Currently Windows Mixed Reality and Oculus devices are usable. Valve/HTC headsets don't support the OpenXR standard yet and hence, do not work with this implementation. --------------- This is the C-side implementation of the features added for initial VR support as per milestone 1. A "VR Scene Inspection" Add-on will be committed separately, to expose the VR functionality in the UI. It also adds some further features for milestone 1, namely a landmarking system (stored view locations in the VR space) Main additions/features: * Support for rendering viewports to an HMD, with good performance. * Option to sync the VR view perspective with a fully interactive, regular 3D View (VR-Mirror). * Option to disable positional tracking. Keeps the current position (calculated based on the VR eye center pose) when enabled while a VR session is running. * Some regular viewport settings for the VR view * RNA/Python-API to query and set VR session state information. * WM-XR: Layer tying Ghost-XR to the Blender specific APIs/data * wmSurface API: drawable, non-window container (manages Ghost-OpenGL and GPU context) * DNA/RNA for management of VR session settings * `--debug-xr` and `--debug-xr-time` commandline options * Utility batch & config file for using the Oculus runtime on Windows. * Most VR data is runtime only. The exception is user settings which are saved to files (`XrSessionSettings`). * VR support can be disabled through the `WITH_XR_OPENXR` compiler flag. For architecture and code documentation, see https://wiki.blender.org/wiki/Source/Interface/XR. --------------- A few thank you's: * A huge shoutout to Ray Molenkamp for his help during the project - it would have not been that successful without him! * Sebastian Koenig and Simeon Conzendorf for testing and feedback! * The reviewers, especially Brecht Van Lommel! * Dalai Felinto for pushing and managing me to get this done ;) * The OpenXR working group for providing an open standard. I think we're the first bigger application to adopt OpenXR. Congratulations to them and ourselves :) This project started as a Google Summer of Code 2019 project - "Core Support of Virtual Reality Headsets through OpenXR" (see https://wiki.blender.org/wiki/User:Severin/GSoC-2019/). Some further information, including ideas for further improvements can be found in the final GSoC report: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/Final_Report Differential Revisions: D6193, D7098 Reviewed by: Brecht Van Lommel, Jeroen Bakker
2020-03-17 20:20:55 +01:00
BLI_assert(v3d->spacetype == SPACE_VIEW3D);
LISTBASE_FOREACH (ARegion *, region, region_list) {
VR: Initial Virtual Reality support - Milestone 1, Scene Inspection NOTE: While most of the milestone 1 goals are there, a few smaller features and improvements are still to be done. Big picture of this milestone: Initial, OpenXR-based virtual reality support for users and foundation for advanced use cases. Maniphest Task: https://developer.blender.org/T71347 The tasks contains more information about this milestone. To be clear: This is not a feature rich VR implementation, it's focused on the initial scene inspection use case. We intentionally focused on that, further features like controller support are part of the next milestone. - How to use? Instructions on how to use this are here: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/How_to_Test These will be updated and moved to a more official place (likely the manual) soon. Currently Windows Mixed Reality and Oculus devices are usable. Valve/HTC headsets don't support the OpenXR standard yet and hence, do not work with this implementation. --------------- This is the C-side implementation of the features added for initial VR support as per milestone 1. A "VR Scene Inspection" Add-on will be committed separately, to expose the VR functionality in the UI. It also adds some further features for milestone 1, namely a landmarking system (stored view locations in the VR space) Main additions/features: * Support for rendering viewports to an HMD, with good performance. * Option to sync the VR view perspective with a fully interactive, regular 3D View (VR-Mirror). * Option to disable positional tracking. Keeps the current position (calculated based on the VR eye center pose) when enabled while a VR session is running. * Some regular viewport settings for the VR view * RNA/Python-API to query and set VR session state information. * WM-XR: Layer tying Ghost-XR to the Blender specific APIs/data * wmSurface API: drawable, non-window container (manages Ghost-OpenGL and GPU context) * DNA/RNA for management of VR session settings * `--debug-xr` and `--debug-xr-time` commandline options * Utility batch & config file for using the Oculus runtime on Windows. * Most VR data is runtime only. The exception is user settings which are saved to files (`XrSessionSettings`). * VR support can be disabled through the `WITH_XR_OPENXR` compiler flag. For architecture and code documentation, see https://wiki.blender.org/wiki/Source/Interface/XR. --------------- A few thank you's: * A huge shoutout to Ray Molenkamp for his help during the project - it would have not been that successful without him! * Sebastian Koenig and Simeon Conzendorf for testing and feedback! * The reviewers, especially Brecht Van Lommel! * Dalai Felinto for pushing and managing me to get this done ;) * The OpenXR working group for providing an open standard. I think we're the first bigger application to adopt OpenXR. Congratulations to them and ourselves :) This project started as a Google Summer of Code 2019 project - "Core Support of Virtual Reality Headsets through OpenXR" (see https://wiki.blender.org/wiki/User:Severin/GSoC-2019/). Some further information, including ideas for further improvements can be found in the final GSoC report: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/Final_Report Differential Revisions: D6193, D7098 Reviewed by: Brecht Van Lommel, Jeroen Bakker
2020-03-17 20:20:55 +01:00
/* find the first unlocked rv3d */
if (region->regiondata && region->regiontype == RGN_TYPE_WINDOW) {
2022-09-23 16:51:29 +02:00
rv3d = static_cast<RegionView3D *>(region->regiondata);
VR: Initial Virtual Reality support - Milestone 1, Scene Inspection NOTE: While most of the milestone 1 goals are there, a few smaller features and improvements are still to be done. Big picture of this milestone: Initial, OpenXR-based virtual reality support for users and foundation for advanced use cases. Maniphest Task: https://developer.blender.org/T71347 The tasks contains more information about this milestone. To be clear: This is not a feature rich VR implementation, it's focused on the initial scene inspection use case. We intentionally focused on that, further features like controller support are part of the next milestone. - How to use? Instructions on how to use this are here: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/How_to_Test These will be updated and moved to a more official place (likely the manual) soon. Currently Windows Mixed Reality and Oculus devices are usable. Valve/HTC headsets don't support the OpenXR standard yet and hence, do not work with this implementation. --------------- This is the C-side implementation of the features added for initial VR support as per milestone 1. A "VR Scene Inspection" Add-on will be committed separately, to expose the VR functionality in the UI. It also adds some further features for milestone 1, namely a landmarking system (stored view locations in the VR space) Main additions/features: * Support for rendering viewports to an HMD, with good performance. * Option to sync the VR view perspective with a fully interactive, regular 3D View (VR-Mirror). * Option to disable positional tracking. Keeps the current position (calculated based on the VR eye center pose) when enabled while a VR session is running. * Some regular viewport settings for the VR view * RNA/Python-API to query and set VR session state information. * WM-XR: Layer tying Ghost-XR to the Blender specific APIs/data * wmSurface API: drawable, non-window container (manages Ghost-OpenGL and GPU context) * DNA/RNA for management of VR session settings * `--debug-xr` and `--debug-xr-time` commandline options * Utility batch & config file for using the Oculus runtime on Windows. * Most VR data is runtime only. The exception is user settings which are saved to files (`XrSessionSettings`). * VR support can be disabled through the `WITH_XR_OPENXR` compiler flag. For architecture and code documentation, see https://wiki.blender.org/wiki/Source/Interface/XR. --------------- A few thank you's: * A huge shoutout to Ray Molenkamp for his help during the project - it would have not been that successful without him! * Sebastian Koenig and Simeon Conzendorf for testing and feedback! * The reviewers, especially Brecht Van Lommel! * Dalai Felinto for pushing and managing me to get this done ;) * The OpenXR working group for providing an open standard. I think we're the first bigger application to adopt OpenXR. Congratulations to them and ourselves :) This project started as a Google Summer of Code 2019 project - "Core Support of Virtual Reality Headsets through OpenXR" (see https://wiki.blender.org/wiki/User:Severin/GSoC-2019/). Some further information, including ideas for further improvements can be found in the final GSoC report: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/Final_Report Differential Revisions: D6193, D7098 Reviewed by: Brecht Van Lommel, Jeroen Bakker
2020-03-17 20:20:55 +01:00
if ((rv3d->viewlock & RV3D_LOCK_ROTATION) == 0) {
region_unlock = region;
2020-11-06 12:30:59 +11:00
if (ELEM(rv3d->persp, RV3D_PERSP, RV3D_CAMOB)) {
region_unlock_user = region;
VR: Initial Virtual Reality support - Milestone 1, Scene Inspection NOTE: While most of the milestone 1 goals are there, a few smaller features and improvements are still to be done. Big picture of this milestone: Initial, OpenXR-based virtual reality support for users and foundation for advanced use cases. Maniphest Task: https://developer.blender.org/T71347 The tasks contains more information about this milestone. To be clear: This is not a feature rich VR implementation, it's focused on the initial scene inspection use case. We intentionally focused on that, further features like controller support are part of the next milestone. - How to use? Instructions on how to use this are here: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/How_to_Test These will be updated and moved to a more official place (likely the manual) soon. Currently Windows Mixed Reality and Oculus devices are usable. Valve/HTC headsets don't support the OpenXR standard yet and hence, do not work with this implementation. --------------- This is the C-side implementation of the features added for initial VR support as per milestone 1. A "VR Scene Inspection" Add-on will be committed separately, to expose the VR functionality in the UI. It also adds some further features for milestone 1, namely a landmarking system (stored view locations in the VR space) Main additions/features: * Support for rendering viewports to an HMD, with good performance. * Option to sync the VR view perspective with a fully interactive, regular 3D View (VR-Mirror). * Option to disable positional tracking. Keeps the current position (calculated based on the VR eye center pose) when enabled while a VR session is running. * Some regular viewport settings for the VR view * RNA/Python-API to query and set VR session state information. * WM-XR: Layer tying Ghost-XR to the Blender specific APIs/data * wmSurface API: drawable, non-window container (manages Ghost-OpenGL and GPU context) * DNA/RNA for management of VR session settings * `--debug-xr` and `--debug-xr-time` commandline options * Utility batch & config file for using the Oculus runtime on Windows. * Most VR data is runtime only. The exception is user settings which are saved to files (`XrSessionSettings`). * VR support can be disabled through the `WITH_XR_OPENXR` compiler flag. For architecture and code documentation, see https://wiki.blender.org/wiki/Source/Interface/XR. --------------- A few thank you's: * A huge shoutout to Ray Molenkamp for his help during the project - it would have not been that successful without him! * Sebastian Koenig and Simeon Conzendorf for testing and feedback! * The reviewers, especially Brecht Van Lommel! * Dalai Felinto for pushing and managing me to get this done ;) * The OpenXR working group for providing an open standard. I think we're the first bigger application to adopt OpenXR. Congratulations to them and ourselves :) This project started as a Google Summer of Code 2019 project - "Core Support of Virtual Reality Headsets through OpenXR" (see https://wiki.blender.org/wiki/User:Severin/GSoC-2019/). Some further information, including ideas for further improvements can be found in the final GSoC report: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/Final_Report Differential Revisions: D6193, D7098 Reviewed by: Brecht Van Lommel, Jeroen Bakker
2020-03-17 20:20:55 +01:00
break;
}
}
}
}
VR: Initial Virtual Reality support - Milestone 1, Scene Inspection NOTE: While most of the milestone 1 goals are there, a few smaller features and improvements are still to be done. Big picture of this milestone: Initial, OpenXR-based virtual reality support for users and foundation for advanced use cases. Maniphest Task: https://developer.blender.org/T71347 The tasks contains more information about this milestone. To be clear: This is not a feature rich VR implementation, it's focused on the initial scene inspection use case. We intentionally focused on that, further features like controller support are part of the next milestone. - How to use? Instructions on how to use this are here: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/How_to_Test These will be updated and moved to a more official place (likely the manual) soon. Currently Windows Mixed Reality and Oculus devices are usable. Valve/HTC headsets don't support the OpenXR standard yet and hence, do not work with this implementation. --------------- This is the C-side implementation of the features added for initial VR support as per milestone 1. A "VR Scene Inspection" Add-on will be committed separately, to expose the VR functionality in the UI. It also adds some further features for milestone 1, namely a landmarking system (stored view locations in the VR space) Main additions/features: * Support for rendering viewports to an HMD, with good performance. * Option to sync the VR view perspective with a fully interactive, regular 3D View (VR-Mirror). * Option to disable positional tracking. Keeps the current position (calculated based on the VR eye center pose) when enabled while a VR session is running. * Some regular viewport settings for the VR view * RNA/Python-API to query and set VR session state information. * WM-XR: Layer tying Ghost-XR to the Blender specific APIs/data * wmSurface API: drawable, non-window container (manages Ghost-OpenGL and GPU context) * DNA/RNA for management of VR session settings * `--debug-xr` and `--debug-xr-time` commandline options * Utility batch & config file for using the Oculus runtime on Windows. * Most VR data is runtime only. The exception is user settings which are saved to files (`XrSessionSettings`). * VR support can be disabled through the `WITH_XR_OPENXR` compiler flag. For architecture and code documentation, see https://wiki.blender.org/wiki/Source/Interface/XR. --------------- A few thank you's: * A huge shoutout to Ray Molenkamp for his help during the project - it would have not been that successful without him! * Sebastian Koenig and Simeon Conzendorf for testing and feedback! * The reviewers, especially Brecht Van Lommel! * Dalai Felinto for pushing and managing me to get this done ;) * The OpenXR working group for providing an open standard. I think we're the first bigger application to adopt OpenXR. Congratulations to them and ourselves :) This project started as a Google Summer of Code 2019 project - "Core Support of Virtual Reality Headsets through OpenXR" (see https://wiki.blender.org/wiki/User:Severin/GSoC-2019/). Some further information, including ideas for further improvements can be found in the final GSoC report: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/Final_Report Differential Revisions: D6193, D7098 Reviewed by: Brecht Van Lommel, Jeroen Bakker
2020-03-17 20:20:55 +01:00
/* camera/perspective view get priority when the active region is locked */
if (region_unlock_user) {
*r_region = region_unlock_user;
VR: Initial Virtual Reality support - Milestone 1, Scene Inspection NOTE: While most of the milestone 1 goals are there, a few smaller features and improvements are still to be done. Big picture of this milestone: Initial, OpenXR-based virtual reality support for users and foundation for advanced use cases. Maniphest Task: https://developer.blender.org/T71347 The tasks contains more information about this milestone. To be clear: This is not a feature rich VR implementation, it's focused on the initial scene inspection use case. We intentionally focused on that, further features like controller support are part of the next milestone. - How to use? Instructions on how to use this are here: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/How_to_Test These will be updated and moved to a more official place (likely the manual) soon. Currently Windows Mixed Reality and Oculus devices are usable. Valve/HTC headsets don't support the OpenXR standard yet and hence, do not work with this implementation. --------------- This is the C-side implementation of the features added for initial VR support as per milestone 1. A "VR Scene Inspection" Add-on will be committed separately, to expose the VR functionality in the UI. It also adds some further features for milestone 1, namely a landmarking system (stored view locations in the VR space) Main additions/features: * Support for rendering viewports to an HMD, with good performance. * Option to sync the VR view perspective with a fully interactive, regular 3D View (VR-Mirror). * Option to disable positional tracking. Keeps the current position (calculated based on the VR eye center pose) when enabled while a VR session is running. * Some regular viewport settings for the VR view * RNA/Python-API to query and set VR session state information. * WM-XR: Layer tying Ghost-XR to the Blender specific APIs/data * wmSurface API: drawable, non-window container (manages Ghost-OpenGL and GPU context) * DNA/RNA for management of VR session settings * `--debug-xr` and `--debug-xr-time` commandline options * Utility batch & config file for using the Oculus runtime on Windows. * Most VR data is runtime only. The exception is user settings which are saved to files (`XrSessionSettings`). * VR support can be disabled through the `WITH_XR_OPENXR` compiler flag. For architecture and code documentation, see https://wiki.blender.org/wiki/Source/Interface/XR. --------------- A few thank you's: * A huge shoutout to Ray Molenkamp for his help during the project - it would have not been that successful without him! * Sebastian Koenig and Simeon Conzendorf for testing and feedback! * The reviewers, especially Brecht Van Lommel! * Dalai Felinto for pushing and managing me to get this done ;) * The OpenXR working group for providing an open standard. I think we're the first bigger application to adopt OpenXR. Congratulations to them and ourselves :) This project started as a Google Summer of Code 2019 project - "Core Support of Virtual Reality Headsets through OpenXR" (see https://wiki.blender.org/wiki/User:Severin/GSoC-2019/). Some further information, including ideas for further improvements can be found in the final GSoC report: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/Final_Report Differential Revisions: D6193, D7098 Reviewed by: Brecht Van Lommel, Jeroen Bakker
2020-03-17 20:20:55 +01:00
return true;
}
if (region_unlock) {
*r_region = region_unlock;
VR: Initial Virtual Reality support - Milestone 1, Scene Inspection NOTE: While most of the milestone 1 goals are there, a few smaller features and improvements are still to be done. Big picture of this milestone: Initial, OpenXR-based virtual reality support for users and foundation for advanced use cases. Maniphest Task: https://developer.blender.org/T71347 The tasks contains more information about this milestone. To be clear: This is not a feature rich VR implementation, it's focused on the initial scene inspection use case. We intentionally focused on that, further features like controller support are part of the next milestone. - How to use? Instructions on how to use this are here: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/How_to_Test These will be updated and moved to a more official place (likely the manual) soon. Currently Windows Mixed Reality and Oculus devices are usable. Valve/HTC headsets don't support the OpenXR standard yet and hence, do not work with this implementation. --------------- This is the C-side implementation of the features added for initial VR support as per milestone 1. A "VR Scene Inspection" Add-on will be committed separately, to expose the VR functionality in the UI. It also adds some further features for milestone 1, namely a landmarking system (stored view locations in the VR space) Main additions/features: * Support for rendering viewports to an HMD, with good performance. * Option to sync the VR view perspective with a fully interactive, regular 3D View (VR-Mirror). * Option to disable positional tracking. Keeps the current position (calculated based on the VR eye center pose) when enabled while a VR session is running. * Some regular viewport settings for the VR view * RNA/Python-API to query and set VR session state information. * WM-XR: Layer tying Ghost-XR to the Blender specific APIs/data * wmSurface API: drawable, non-window container (manages Ghost-OpenGL and GPU context) * DNA/RNA for management of VR session settings * `--debug-xr` and `--debug-xr-time` commandline options * Utility batch & config file for using the Oculus runtime on Windows. * Most VR data is runtime only. The exception is user settings which are saved to files (`XrSessionSettings`). * VR support can be disabled through the `WITH_XR_OPENXR` compiler flag. For architecture and code documentation, see https://wiki.blender.org/wiki/Source/Interface/XR. --------------- A few thank you's: * A huge shoutout to Ray Molenkamp for his help during the project - it would have not been that successful without him! * Sebastian Koenig and Simeon Conzendorf for testing and feedback! * The reviewers, especially Brecht Van Lommel! * Dalai Felinto for pushing and managing me to get this done ;) * The OpenXR working group for providing an open standard. I think we're the first bigger application to adopt OpenXR. Congratulations to them and ourselves :) This project started as a Google Summer of Code 2019 project - "Core Support of Virtual Reality Headsets through OpenXR" (see https://wiki.blender.org/wiki/User:Severin/GSoC-2019/). Some further information, including ideas for further improvements can be found in the final GSoC report: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/Final_Report Differential Revisions: D6193, D7098 Reviewed by: Brecht Van Lommel, Jeroen Bakker
2020-03-17 20:20:55 +01:00
return true;
}
return false;
}
void ED_view3d_init_mats_rv3d(const Object *ob, RegionView3D *rv3d)
{
/* local viewmat and persmat, to calculate projections */
mul_m4_m4m4(rv3d->viewmatob, rv3d->viewmat, ob->object_to_world().ptr());
mul_m4_m4m4(rv3d->persmatob, rv3d->persmat, ob->object_to_world().ptr());
/* initializes object space clipping, speeds up clip tests */
ED_view3d_clipping_local(rv3d, ob->object_to_world().ptr());
}
void ED_view3d_init_mats_rv3d_gl(const Object *ob, RegionView3D *rv3d)
{
ED_view3d_init_mats_rv3d(ob, rv3d);
2022-09-25 15:24:37 +10:00
/* We have to multiply instead of loading `viewmatob` to make
* it work with duplis using display-lists, otherwise it will
* override the dupli-matrix. */
GPU_matrix_mul(ob->object_to_world().ptr());
}
2009-04-27 13:44:11 +00:00
#ifndef NDEBUG
void ED_view3d_clear_mats_rv3d(RegionView3D *rv3d)
{
zero_m4(rv3d->viewmatob);
zero_m4(rv3d->persmatob);
}
void ED_view3d_check_mats_rv3d(RegionView3D *rv3d)
{
BLI_ASSERT_ZERO_M4(rv3d->viewmatob);
BLI_ASSERT_ZERO_M4(rv3d->persmatob);
}
#endif
void ED_view3d_stop_render_preview(wmWindowManager *wm, ARegion *region)
{
2022-09-23 16:51:29 +02:00
RegionView3D *rv3d = static_cast<RegionView3D *>(region->regiondata);
if (rv3d->view_render) {
#ifdef WITH_PYTHON
BPy_BEGIN_ALLOW_THREADS;
#endif
WM_jobs_kill_type(wm, nullptr, WM_JOB_TYPE_RENDER_PREVIEW);
#ifdef WITH_PYTHON
BPy_END_ALLOW_THREADS;
#endif
DRW_engine_external_free(rv3d);
}
/* A bit overkill but this make sure the viewport is reset completely. (fclem) */
WM_draw_region_free(region);
}
void ED_view3d_shade_update(Main *bmain, View3D *v3d, ScrArea *area)
{
2022-09-23 16:51:29 +02:00
wmWindowManager *wm = static_cast<wmWindowManager *>(bmain->wm.first);
if (v3d->shading.type != OB_RENDER) {
LISTBASE_FOREACH (ARegion *, region, &area->regionbase) {
if ((region->regiontype == RGN_TYPE_WINDOW) && region->regiondata) {
ED_view3d_stop_render_preview(wm, region);
}
}
}
}
/* ******************** default callbacks for view3d space ***************** */
static SpaceLink *view3d_create(const ScrArea * /*area*/, const Scene *scene)
{
ARegion *region;
View3D *v3d;
RegionView3D *rv3d;
v3d = DNA_struct_default_alloc(View3D);
2012-02-22 16:52:06 +00:00
if (scene) {
v3d->camera = scene->camera;
}
/* header */
region = BKE_area_region_new();
BLI_addtail(&v3d->regionbase, region);
region->regiontype = RGN_TYPE_HEADER;
region->alignment = (U.uiflag & USER_HEADER_BOTTOM) ? RGN_ALIGN_BOTTOM : RGN_ALIGN_TOP;
/* tool header */
region = BKE_area_region_new();
BLI_addtail(&v3d->regionbase, region);
region->regiontype = RGN_TYPE_TOOL_HEADER;
region->alignment = (U.uiflag & USER_HEADER_BOTTOM) ? RGN_ALIGN_BOTTOM : RGN_ALIGN_TOP;
region->flag = RGN_FLAG_HIDDEN | RGN_FLAG_HIDDEN_BY_USER;
/* asset shelf */
region = BKE_area_region_new();
BLI_addtail(&v3d->regionbase, region);
region->regiontype = RGN_TYPE_ASSET_SHELF;
region->alignment = RGN_ALIGN_BOTTOM;
region->flag |= RGN_FLAG_HIDDEN;
/* asset shelf header */
region = BKE_area_region_new();
BLI_addtail(&v3d->regionbase, region);
region->regiontype = RGN_TYPE_ASSET_SHELF_HEADER;
region->alignment = RGN_ALIGN_BOTTOM | RGN_ALIGN_HIDE_WITH_PREV;
2010-01-12 07:50:14 +00:00
/* tool shelf */
region = BKE_area_region_new();
BLI_addtail(&v3d->regionbase, region);
region->regiontype = RGN_TYPE_TOOLS;
region->alignment = RGN_ALIGN_LEFT;
region->flag = RGN_FLAG_HIDDEN;
/* buttons/list view */
region = BKE_area_region_new();
BLI_addtail(&v3d->regionbase, region);
region->regiontype = RGN_TYPE_UI;
region->alignment = RGN_ALIGN_RIGHT;
region->flag = RGN_FLAG_HIDDEN;
/* main region */
region = BKE_area_region_new();
BLI_addtail(&v3d->regionbase, region);
region->regiontype = RGN_TYPE_WINDOW;
2022-09-23 16:51:29 +02:00
region->regiondata = MEM_cnew<RegionView3D>("region view3d");
rv3d = static_cast<RegionView3D *>(region->regiondata);
rv3d->viewquat[0] = 1.0f;
rv3d->persp = RV3D_PERSP;
rv3d->view = RV3D_VIEW_USER;
rv3d->dist = 10.0;
return (SpaceLink *)v3d;
}
2023-06-09 11:39:27 +10:00
/* Doesn't free the space-link itself. */
static void view3d_free(SpaceLink *sl)
{
View3D *vd = (View3D *)sl;
if (vd->localvd) {
MEM_freeN(vd->localvd);
}
MEM_guardedalloc: Refactor to add more type-safety. The main goal of these changes are to improve static (i.e. build-time) checks on whether a given data can be allocated and freed with `malloc` and `free` (C-style), or requires proper C++-style construction and destruction (`new` and `delete`). * Add new `MEM_malloc_arrayN_aligned` API. * Make `MEM_freeN` a template function in C++, which does static assert on type triviality. * Add `MEM_SAFE_DELETE`, similar to `MEM_SAFE_FREE` but calling `MEM_delete`. The changes to `MEM_freeN` was painful and useful, as it allowed to fix a bunch of invalid calls in existing codebase already. It also highlighted a fair amount of places where it is called to free incomplete type pointers, which is likely a sign of badly designed code (there should rather be an API to destroy and free these data then, if the data type is not fully publicly exposed). For now, these are 'worked around' by explicitly casting the freed pointers to `void *` in these cases - which also makes them easy to search for. Some of these will be addressed separately (see blender/blender!134765). Finally, MSVC seems to consider structs defining new/delete operators (e.g. by using the `MEM_CXX_CLASS_ALLOC_FUNCS` macro) as non-trivial. This does not seem to follow the definition of type triviality, so for now static type checking in `MEM_freeN` has been disabled for Windows. We'll likely have to do the same with type-safe `MEM_[cm]allocN` API being worked on in blender/blender!134771 Based on ideas from Brecht in blender/blender!134452 Pull Request: https://projects.blender.org/blender/blender/pulls/134463
2025-02-20 10:37:10 +01:00
/* Cannot use MEM_SAFE_FREE, as #SceneStats type is only forward-declared in `DNA_layer_types.h`
*/
if (vd->runtime.local_stats) {
MEM_freeN(static_cast<void *>(vd->runtime.local_stats));
vd->runtime.local_stats = nullptr;
}
if (vd->runtime.properties_storage_free) {
vd->runtime.properties_storage_free(vd->runtime.properties_storage);
vd->runtime.properties_storage_free = nullptr;
}
if (vd->shading.prop) {
IDP_FreeProperty(vd->shading.prop);
2022-09-23 16:51:29 +02:00
vd->shading.prop = nullptr;
}
Geometry Nodes: viewport preview This adds support for showing geometry passed to the Viewer in the 3d viewport (instead of just in the spreadsheet). The "viewer geometry" bypasses the group output. So it is not necessary to change the final output of the node group to be able to see the intermediate geometry. **Activation and deactivation of a viewer node** * A viewer node is activated by clicking on it. * Ctrl+shift+click on any node/socket connects it to the viewer and makes it active. * Ctrl+shift+click in empty space deactivates the active viewer. * When the active viewer is not visible anymore (e.g. another object is selected, or the current node group is exit), it is deactivated. * Clicking on the icon in the header of the Viewer node toggles whether its active or not. **Pinning** * The spreadsheet still allows pinning the active viewer as before. When pinned, the spreadsheet still references the viewer node even when it becomes inactive. * The viewport does not support pinning at the moment. It always shows the active viewer. **Attribute** * When a field is linked to the second input of the viewer node it is displayed as an overlay in the viewport. * When possible the correct domain for the attribute is determined automatically. This does not work in all cases. It falls back to the face corner domain on meshes and the point domain on curves. When necessary, the domain can be picked manually. * The spreadsheet now only shows the "Viewer" column for the domain that is selected in the Viewer node. * Instance attributes are visualized as a constant color per instance. **Viewport Options** * The attribute overlay opacity can be controlled with the "Viewer Node" setting in the overlays popover. * A viewport can be configured not to show intermediate viewer-geometry by disabling the "Viewer Node" option in the "View" menu. **Implementation Details** * The "spreadsheet context path" was generalized to a "viewer path" that is used in more places now. * The viewer node itself determines the attribute domain, evaluates the field and stores the result in a `.viewer` attribute. * A new "viewer attribute' overlay displays the data from the `.viewer` attribute. * The ground truth for the active viewer node is stored in the workspace now. Node editors, spreadsheets and viewports retrieve the active viewer from there unless they are pinned. * The depsgraph object iterator has a new "viewer path" setting. When set, the viewed geometry of the corresponding object is part of the iterator instead of the final evaluated geometry. * To support the instance attribute overlay `DupliObject` was extended to contain the information necessary for drawing the overlay. * The ctrl+shift+click operator has been refactored so that it can make existing links to viewers active again. * The auto-domain-detection in the Viewer node works by checking the "preferred domain" for every field input. If there is not exactly one preferred domain, the fallback is used. Known limitations: * Loose edges of meshes don't have the attribute overlay. This could be added separately if necessary. * Some attributes are hard to visualize as a color directly. For example, the values might have to be normalized or some should be drawn as arrays. For now, we encourage users to build node groups that generate appropriate viewer-geometry. We might include some of that functionality in future versions. Support for displaying attribute values as text in the viewport is planned as well. * There seems to be an issue with the attribute overlay for pointclouds on nvidia gpus, to be investigated. Differential Revision: https://developer.blender.org/D15954
2022-09-28 17:54:59 +02:00
BKE_viewer_path_clear(&vd->viewer_path);
}
/* spacetype; init callback */
static void view3d_init(wmWindowManager * /*wm*/, ScrArea * /*area*/) {}
static void view3d_exit(wmWindowManager * /*wm*/, ScrArea *area)
{
BLI_assert(area->spacetype == SPACE_VIEW3D);
2022-09-23 16:51:29 +02:00
View3D *v3d = static_cast<View3D *>(area->spacedata.first);
MEM_guardedalloc: Refactor to add more type-safety. The main goal of these changes are to improve static (i.e. build-time) checks on whether a given data can be allocated and freed with `malloc` and `free` (C-style), or requires proper C++-style construction and destruction (`new` and `delete`). * Add new `MEM_malloc_arrayN_aligned` API. * Make `MEM_freeN` a template function in C++, which does static assert on type triviality. * Add `MEM_SAFE_DELETE`, similar to `MEM_SAFE_FREE` but calling `MEM_delete`. The changes to `MEM_freeN` was painful and useful, as it allowed to fix a bunch of invalid calls in existing codebase already. It also highlighted a fair amount of places where it is called to free incomplete type pointers, which is likely a sign of badly designed code (there should rather be an API to destroy and free these data then, if the data type is not fully publicly exposed). For now, these are 'worked around' by explicitly casting the freed pointers to `void *` in these cases - which also makes them easy to search for. Some of these will be addressed separately (see blender/blender!134765). Finally, MSVC seems to consider structs defining new/delete operators (e.g. by using the `MEM_CXX_CLASS_ALLOC_FUNCS` macro) as non-trivial. This does not seem to follow the definition of type triviality, so for now static type checking in `MEM_freeN` has been disabled for Windows. We'll likely have to do the same with type-safe `MEM_[cm]allocN` API being worked on in blender/blender!134771 Based on ideas from Brecht in blender/blender!134452 Pull Request: https://projects.blender.org/blender/blender/pulls/134463
2025-02-20 10:37:10 +01:00
/* Cannot use MEM_SAFE_FREE, as #SceneStats type is only forward-declared in `DNA_layer_types.h`
*/
if (v3d->runtime.local_stats) {
MEM_freeN(static_cast<void *>(v3d->runtime.local_stats));
v3d->runtime.local_stats = nullptr;
}
}
static SpaceLink *view3d_duplicate(SpaceLink *sl)
{
View3D *v3do = (View3D *)sl;
2022-09-23 16:51:29 +02:00
View3D *v3dn = static_cast<View3D *>(MEM_dupallocN(sl));
memset(&v3dn->runtime, 0x0, sizeof(v3dn->runtime));
/* clear or remove stuff from old */
2014-10-09 22:39:59 +02:00
if (v3dn->localvd) {
2022-09-23 16:51:29 +02:00
v3dn->localvd = nullptr;
}
v3dn->local_collections_uid = 0;
VR: Initial Virtual Reality support - Milestone 1, Scene Inspection NOTE: While most of the milestone 1 goals are there, a few smaller features and improvements are still to be done. Big picture of this milestone: Initial, OpenXR-based virtual reality support for users and foundation for advanced use cases. Maniphest Task: https://developer.blender.org/T71347 The tasks contains more information about this milestone. To be clear: This is not a feature rich VR implementation, it's focused on the initial scene inspection use case. We intentionally focused on that, further features like controller support are part of the next milestone. - How to use? Instructions on how to use this are here: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/How_to_Test These will be updated and moved to a more official place (likely the manual) soon. Currently Windows Mixed Reality and Oculus devices are usable. Valve/HTC headsets don't support the OpenXR standard yet and hence, do not work with this implementation. --------------- This is the C-side implementation of the features added for initial VR support as per milestone 1. A "VR Scene Inspection" Add-on will be committed separately, to expose the VR functionality in the UI. It also adds some further features for milestone 1, namely a landmarking system (stored view locations in the VR space) Main additions/features: * Support for rendering viewports to an HMD, with good performance. * Option to sync the VR view perspective with a fully interactive, regular 3D View (VR-Mirror). * Option to disable positional tracking. Keeps the current position (calculated based on the VR eye center pose) when enabled while a VR session is running. * Some regular viewport settings for the VR view * RNA/Python-API to query and set VR session state information. * WM-XR: Layer tying Ghost-XR to the Blender specific APIs/data * wmSurface API: drawable, non-window container (manages Ghost-OpenGL and GPU context) * DNA/RNA for management of VR session settings * `--debug-xr` and `--debug-xr-time` commandline options * Utility batch & config file for using the Oculus runtime on Windows. * Most VR data is runtime only. The exception is user settings which are saved to files (`XrSessionSettings`). * VR support can be disabled through the `WITH_XR_OPENXR` compiler flag. For architecture and code documentation, see https://wiki.blender.org/wiki/Source/Interface/XR. --------------- A few thank you's: * A huge shoutout to Ray Molenkamp for his help during the project - it would have not been that successful without him! * Sebastian Koenig and Simeon Conzendorf for testing and feedback! * The reviewers, especially Brecht Van Lommel! * Dalai Felinto for pushing and managing me to get this done ;) * The OpenXR working group for providing an open standard. I think we're the first bigger application to adopt OpenXR. Congratulations to them and ourselves :) This project started as a Google Summer of Code 2019 project - "Core Support of Virtual Reality Headsets through OpenXR" (see https://wiki.blender.org/wiki/User:Severin/GSoC-2019/). Some further information, including ideas for further improvements can be found in the final GSoC report: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/Final_Report Differential Revisions: D6193, D7098 Reviewed by: Brecht Van Lommel, Jeroen Bakker
2020-03-17 20:20:55 +01:00
v3dn->flag &= ~(V3D_LOCAL_COLLECTIONS | V3D_XR_SESSION_MIRROR);
if (v3dn->shading.type == OB_RENDER) {
v3dn->shading.type = OB_SOLID;
}
if (v3dn->shading.prop) {
v3dn->shading.prop = IDP_CopyProperty(v3do->shading.prop);
}
Geometry Nodes: viewport preview This adds support for showing geometry passed to the Viewer in the 3d viewport (instead of just in the spreadsheet). The "viewer geometry" bypasses the group output. So it is not necessary to change the final output of the node group to be able to see the intermediate geometry. **Activation and deactivation of a viewer node** * A viewer node is activated by clicking on it. * Ctrl+shift+click on any node/socket connects it to the viewer and makes it active. * Ctrl+shift+click in empty space deactivates the active viewer. * When the active viewer is not visible anymore (e.g. another object is selected, or the current node group is exit), it is deactivated. * Clicking on the icon in the header of the Viewer node toggles whether its active or not. **Pinning** * The spreadsheet still allows pinning the active viewer as before. When pinned, the spreadsheet still references the viewer node even when it becomes inactive. * The viewport does not support pinning at the moment. It always shows the active viewer. **Attribute** * When a field is linked to the second input of the viewer node it is displayed as an overlay in the viewport. * When possible the correct domain for the attribute is determined automatically. This does not work in all cases. It falls back to the face corner domain on meshes and the point domain on curves. When necessary, the domain can be picked manually. * The spreadsheet now only shows the "Viewer" column for the domain that is selected in the Viewer node. * Instance attributes are visualized as a constant color per instance. **Viewport Options** * The attribute overlay opacity can be controlled with the "Viewer Node" setting in the overlays popover. * A viewport can be configured not to show intermediate viewer-geometry by disabling the "Viewer Node" option in the "View" menu. **Implementation Details** * The "spreadsheet context path" was generalized to a "viewer path" that is used in more places now. * The viewer node itself determines the attribute domain, evaluates the field and stores the result in a `.viewer` attribute. * A new "viewer attribute' overlay displays the data from the `.viewer` attribute. * The ground truth for the active viewer node is stored in the workspace now. Node editors, spreadsheets and viewports retrieve the active viewer from there unless they are pinned. * The depsgraph object iterator has a new "viewer path" setting. When set, the viewed geometry of the corresponding object is part of the iterator instead of the final evaluated geometry. * To support the instance attribute overlay `DupliObject` was extended to contain the information necessary for drawing the overlay. * The ctrl+shift+click operator has been refactored so that it can make existing links to viewers active again. * The auto-domain-detection in the Viewer node works by checking the "preferred domain" for every field input. If there is not exactly one preferred domain, the fallback is used. Known limitations: * Loose edges of meshes don't have the attribute overlay. This could be added separately if necessary. * Some attributes are hard to visualize as a color directly. For example, the values might have to be normalized or some should be drawn as arrays. For now, we encourage users to build node groups that generate appropriate viewer-geometry. We might include some of that functionality in future versions. Support for displaying attribute values as text in the viewport is planned as well. * There seems to be an issue with the attribute overlay for pointclouds on nvidia gpus, to be investigated. Differential Revision: https://developer.blender.org/D15954
2022-09-28 17:54:59 +02:00
BKE_viewer_path_copy(&v3dn->viewer_path, &v3do->viewer_path);
/* copy or clear inside new stuff */
return (SpaceLink *)v3dn;
}
/* add handlers, stuff you only do once or on area/region changes */
static void view3d_main_region_init(wmWindowManager *wm, ARegion *region)
{
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
ListBase *lb;
wmKeyMap *keymap;
/* object ops. */
/* important to be before Pose keymap since they can both be enabled at once */
keymap = WM_keymap_ensure(
wm->defaultconf, "Paint Face Mask (Weight, Vertex, Texture)", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(
wm->defaultconf, "Paint Vertex Selection (Weight, Vertex)", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
/* Before 'Weight/Vertex Paint' so adding curve points is not overridden. */
keymap = WM_keymap_ensure(wm->defaultconf, "Paint Curve", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
/* Before 'Pose' so weight paint menus aren't overridden by pose menus. */
keymap = WM_keymap_ensure(wm->defaultconf, "Weight Paint", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(wm->defaultconf, "Vertex Paint", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
/* pose is not modal, operator poll checks for this */
keymap = WM_keymap_ensure(wm->defaultconf, "Pose", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(wm->defaultconf, "Object Mode", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(wm->defaultconf, "Curve", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(wm->defaultconf, "Curves", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(wm->defaultconf, "Image Paint", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(wm->defaultconf, "Sculpt", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(wm->defaultconf, "Mesh", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(wm->defaultconf, "Armature", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(wm->defaultconf, "Metaball", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(wm->defaultconf, "Lattice", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(wm->defaultconf, "Particle", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(wm->defaultconf, "Point Cloud", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(wm->defaultconf, "Sculpt Curves", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
/* NOTE: Grease Pencil handlers used to be added using `ED_KEYMAP_GPENCIL` in
* `ed_default_handlers` because it needed to be added to multiple editors (as other editors use
* annotations.). But for OB_GREASE_PENCIL, we only need it to register the keymaps for the
* 3D View. */
keymap = WM_keymap_ensure(
wm->defaultconf, "Grease Pencil Selection", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(
wm->defaultconf, "Grease Pencil Edit Mode", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(
wm->defaultconf, "Grease Pencil Paint Mode", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(
wm->defaultconf, "Grease Pencil Sculpt Mode", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(
wm->defaultconf, "Grease Pencil Weight Paint", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(
wm->defaultconf, "Grease Pencil Vertex Paint", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(
wm->defaultconf, "Grease Pencil Brush Stroke", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(
wm->defaultconf, "Grease Pencil Fill Tool", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
/* Edit-font key-map swallows almost all (because of text input). */
keymap = WM_keymap_ensure(wm->defaultconf, "Font", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(wm->defaultconf, "Object Non-modal", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(wm->defaultconf, "Frames", SPACE_EMPTY, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
/* own keymap, last so modes can override it */
keymap = WM_keymap_ensure(wm->defaultconf, "3D View Generic", SPACE_VIEW3D, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
keymap = WM_keymap_ensure(wm->defaultconf, "3D View", SPACE_VIEW3D, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
/* add drop boxes */
lb = WM_dropboxmap_find("View3D", SPACE_VIEW3D, RGN_TYPE_WINDOW);
WM_event_add_dropbox_handler(&region->runtime->handlers, lb);
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
}
static void view3d_main_region_exit(wmWindowManager *wm, ARegion *region)
{
ED_view3d_stop_render_preview(wm, region);
}
static bool view3d_drop_in_main_region_poll(bContext *C, const wmEvent *event)
{
ScrArea *area = CTX_wm_area(C);
return ED_region_overlap_isect_any_xy(area, event->xy) == false;
}
static ID_Type view3d_drop_id_in_main_region_poll_get_id_type(bContext *C,
wmDrag *drag,
const wmEvent *event)
{
const ScrArea *area = CTX_wm_area(C);
if (ED_region_overlap_isect_any_xy(area, event->xy)) {
2022-09-23 16:51:29 +02:00
return ID_Type(0);
}
if (!view3d_drop_in_main_region_poll(C, event)) {
2022-09-23 16:51:29 +02:00
return ID_Type(0);
}
ID *local_id = WM_drag_get_local_ID(drag, 0);
if (local_id) {
return GS(local_id->name);
}
wmDragAsset *asset_drag = WM_drag_get_asset_data(drag, 0);
if (asset_drag) {
return asset_drag->asset->get_id_type();
}
2022-09-23 16:51:29 +02:00
return ID_Type(0);
}
static bool view3d_drop_id_in_main_region_poll(bContext *C,
wmDrag *drag,
const wmEvent *event,
ID_Type id_type)
{
if (!view3d_drop_in_main_region_poll(C, event)) {
return false;
}
return WM_drag_is_ID_type(drag, id_type);
}
static void view3d_ob_drop_on_enter(wmDropBox *drop, wmDrag *drag)
{
2022-09-23 16:51:29 +02:00
V3DSnapCursorState *state = static_cast<V3DSnapCursorState *>(drop->draw_data);
if (state) {
return;
}
/* Don't use the snap cursor when linking the object. Object transform isn't editable then and
* would be reset on reload. */
if (WM_drag_asset_will_import_linked(drag)) {
return;
}
state = ED_view3d_cursor_snap_state_create();
2022-09-23 16:51:29 +02:00
drop->draw_data = state;
state->draw_plane = true;
float dimensions[3] = {0.0f};
if (drag->type == WM_DRAG_ID) {
Object *ob = (Object *)WM_drag_get_local_ID(drag, ID_OB);
BKE_object_dimensions_eval_cached_get(ob, dimensions);
}
else {
AssetMetaData *meta_data = WM_drag_get_asset_meta_data(drag, ID_OB);
IDProperty *dimensions_prop = BKE_asset_metadata_idprop_find(meta_data, "dimensions");
if (dimensions_prop) {
2022-09-23 16:51:29 +02:00
copy_v3_v3(dimensions, static_cast<float *>(IDP_Array(dimensions_prop)));
}
}
if (!is_zero_v3(dimensions)) {
mul_v3_v3fl(state->box_dimensions, dimensions, 0.5f);
UI_GetThemeColor4ubv(TH_GIZMO_PRIMARY, state->color_box);
state->draw_box = true;
}
}
static void view3d_ob_drop_on_exit(wmDropBox *drop, wmDrag * /*drag*/)
{
2022-09-23 16:51:29 +02:00
V3DSnapCursorState *state = static_cast<V3DSnapCursorState *>(drop->draw_data);
if (state) {
ED_view3d_cursor_snap_state_free(state);
2022-09-23 16:51:29 +02:00
drop->draw_data = nullptr;
}
}
static bool view3d_ob_drop_poll(bContext *C, wmDrag *drag, const wmEvent *event)
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
{
return view3d_drop_id_in_main_region_poll(C, drag, event, ID_OB);
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
}
static bool view3d_ob_drop_poll_external_asset(bContext *C, wmDrag *drag, const wmEvent *event)
{
if (!view3d_ob_drop_poll(C, drag, event) || (drag->type != WM_DRAG_ASSET)) {
return false;
}
return true;
}
/**
* \note the term local here refers to not being an external asset,
* poll will succeed for linked library objects.
*/
static bool view3d_ob_drop_poll_local_id(bContext *C, wmDrag *drag, const wmEvent *event)
{
if (!view3d_ob_drop_poll(C, drag, event) || (drag->type != WM_DRAG_ID)) {
return false;
}
return true;
}
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
static bool view3d_collection_drop_poll(bContext *C, wmDrag *drag, const wmEvent *event)
{
return view3d_drop_id_in_main_region_poll(C, drag, event, ID_GR);
}
static bool view3d_collection_drop_poll_local_id(bContext *C, wmDrag *drag, const wmEvent *event)
{
if (!view3d_collection_drop_poll(C, drag, event) || (drag->type != WM_DRAG_ID)) {
return false;
}
return true;
}
static bool view3d_collection_drop_poll_external_asset(bContext *C,
wmDrag *drag,
const wmEvent *event)
{
if (!view3d_collection_drop_poll(C, drag, event) || (drag->type != WM_DRAG_ASSET)) {
return false;
}
return true;
}
static bool view3d_mat_drop_poll(bContext *C, wmDrag *drag, const wmEvent *event)
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
{
if (!view3d_drop_id_in_main_region_poll(C, drag, event, ID_MA)) {
return false;
}
Object *ob = ED_view3d_give_object_under_cursor(C, event->mval);
return (ob && ID_IS_EDITABLE(&ob->id) && !ID_IS_OVERRIDE_LIBRARY(&ob->id));
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
}
static std::string view3d_mat_drop_tooltip(bContext *C,
wmDrag *drag,
const int xy[2],
wmDropBox * /*drop*/)
{
const char *name = WM_drag_get_item_name(drag);
ARegion *region = CTX_wm_region(C);
const int mval[2] = {
xy[0] - region->winrct.xmin,
xy[1] - region->winrct.ymin,
};
return blender::ed::object::drop_named_material_tooltip(C, name, mval);
}
static bool view3d_world_drop_poll(bContext *C, wmDrag *drag, const wmEvent *event)
{
return view3d_drop_id_in_main_region_poll(C, drag, event, ID_WO);
}
static bool view3d_object_data_drop_poll(bContext *C, wmDrag *drag, const wmEvent *event)
{
ID_Type id_type = view3d_drop_id_in_main_region_poll_get_id_type(C, drag, event);
if (id_type && OB_DATA_SUPPORT_ID(id_type)) {
return true;
}
return false;
}
static std::string view3d_object_data_drop_tooltip(bContext * /*C*/,
wmDrag * /*drag*/,
const int /*xy*/[2],
wmDropBox * /*drop*/)
{
return TIP_("Create object instance from object-data");
}
static bool view3d_ima_drop_poll(bContext *C, wmDrag *drag, const wmEvent *event)
{
if (ED_region_overlap_isect_any_xy(CTX_wm_area(C), event->xy)) {
return false;
}
return WM_drag_is_ID_type(drag, ID_IM);
}
static bool view3d_ima_bg_is_camera_view(bContext *C)
{
RegionView3D *rv3d = CTX_wm_region_view3d(C);
if (rv3d && (rv3d->persp == RV3D_CAMOB)) {
View3D *v3d = CTX_wm_view3d(C);
if (v3d && v3d->camera && v3d->camera->type == OB_CAMERA) {
return true;
}
}
return false;
}
static bool view3d_ima_bg_drop_poll(bContext *C, wmDrag *drag, const wmEvent *event)
{
if (!view3d_ima_drop_poll(C, drag, event)) {
return false;
}
if (ED_view3d_is_object_under_cursor(C, event->mval)) {
return false;
}
return view3d_ima_bg_is_camera_view(C);
}
static bool view3d_ima_empty_drop_poll(bContext *C, wmDrag *drag, const wmEvent *event)
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
{
if (!view3d_ima_drop_poll(C, drag, event)) {
return false;
}
Object *ob = ED_view3d_give_object_under_cursor(C, event->mval);
2022-09-23 16:51:29 +02:00
if (ob == nullptr) {
return true;
}
if (ob->type == OB_EMPTY && ob->empty_drawtype == OB_EMPTY_IMAGE) {
return true;
}
return false;
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
}
static bool view3d_geometry_nodes_drop_poll(bContext *C, wmDrag *drag, const wmEvent *event)
{
if (!view3d_drop_id_in_main_region_poll(C, drag, event, ID_NT)) {
return false;
}
if (drag->type == WM_DRAG_ID) {
const bNodeTree *node_tree = reinterpret_cast<const bNodeTree *>(
WM_drag_get_local_ID(drag, ID_NT));
if (!node_tree) {
return false;
}
return node_tree->type == NTREE_GEOMETRY;
}
if (drag->type == WM_DRAG_ASSET) {
const wmDragAsset *asset_data = WM_drag_get_asset_data(drag, ID_NT);
if (!asset_data) {
return false;
}
const AssetMetaData *metadata = &asset_data->asset->get_metadata();
const IDProperty *tree_type = BKE_asset_metadata_idprop_find(metadata, "type");
if (!tree_type || IDP_Int(tree_type) != NTREE_GEOMETRY) {
return false;
}
if (wmDropBox *drop_box = drag->drop_state.active_dropbox) {
const uint32_t uid = RNA_int_get(drop_box->ptr, "session_uid");
const bNodeTree *node_tree = reinterpret_cast<const bNodeTree *>(
BKE_libblock_find_session_uid(CTX_data_main(C), ID_NT, uid));
if (node_tree) {
return node_tree->type == NTREE_GEOMETRY;
}
}
}
return true;
}
static std::string view3d_geometry_nodes_drop_tooltip(bContext *C,
wmDrag * /*drag*/,
const int xy[2],
wmDropBox *drop)
{
ARegion *region = CTX_wm_region(C);
int mval[2] = {xy[0] - region->winrct.xmin, xy[1] - region->winrct.ymin};
return blender::ed::object::drop_geometry_nodes_tooltip(C, drop->ptr, mval);
}
static void view3d_ob_drop_matrix_from_snap(V3DSnapCursorState *snap_state,
Object *ob,
float obmat_final[4][4])
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
{
using namespace blender;
V3DSnapCursorData *snap_data = ED_view3d_cursor_snap_data_get();
BLI_assert(snap_state->draw_box || snap_state->draw_plane);
UNUSED_VARS_NDEBUG(snap_state);
copy_m4_m3(obmat_final, snap_data->plane_omat);
copy_v3_v3(obmat_final[3], snap_data->loc);
float scale[3];
mat4_to_size(scale, ob->object_to_world().ptr());
rescale_m4(obmat_final, scale);
if (const std::optional<Bounds<float3>> bb = BKE_object_boundbox_get(ob)) {
float3 offset = math::midpoint(bb->min, bb->max);
offset[2] = bb->min[2];
mul_mat3_m4_v3(obmat_final, offset);
sub_v3_v3(obmat_final[3], offset);
}
}
static void view3d_ob_drop_copy_local_id(bContext * /*C*/, wmDrag *drag, wmDropBox *drop)
{
ID *id = WM_drag_get_local_ID(drag, ID_OB);
RNA_int_set(drop->ptr, "session_uid", id->session_uid);
/* Don't duplicate ID's which were just imported. Only do that for existing, local IDs. */
BLI_assert(drag->type != WM_DRAG_ASSET);
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_active_get();
float obmat_final[4][4];
view3d_ob_drop_matrix_from_snap(snap_state, (Object *)id, obmat_final);
RNA_float_set_array(drop->ptr, "matrix", &obmat_final[0][0]);
}
/* Mostly the same logic as #view3d_collection_drop_copy_external_asset(), just different enough to
* make sharing code a bit difficult. */
static void view3d_ob_drop_copy_external_asset(bContext *C, wmDrag *drag, wmDropBox *drop)
{
/* NOTE(@ideasman42): Selection is handled here, de-selecting objects before append,
* using auto-select to ensure the new objects are selected.
* This is done so #OBJECT_OT_transform_to_mouse (which runs after this drop handler)
* can use the context setup here to place the objects. */
BLI_assert(drag->type == WM_DRAG_ASSET);
wmDragAsset *asset_drag = WM_drag_get_asset_data(drag, 0);
Scene *scene = CTX_data_scene(C);
ViewLayer *view_layer = CTX_data_view_layer(C);
BKE_view_layer_base_deselect_all(scene, view_layer);
ID *id = WM_drag_asset_id_import(C, asset_drag, FILE_AUTOSELECT);
/* TODO(sergey): Only update relations for the current scene. */
DEG_relations_tag_update(CTX_data_main(C));
WM_event_add_notifier(C, NC_SCENE | ND_LAYER_CONTENT, scene);
RNA_int_set(drop->ptr, "session_uid", id->session_uid);
ViewLayer: Lazy sync of scene data. When a change happens which invalidates view layers the syncing will be postponed until the first usage. This will improve importing or adding many objects in a single operation/script. `BKE_view_layer_need_resync_tag` is used to tag the view layer to be out of sync. Before accessing `BKE_view_layer_active_base_get`, `BKE_view_layer_active_object_get`, `BKE_view_layer_active_collection` or `BKE_view_layer_object_bases` the caller should call `BKE_view_layer_synced_ensure`. Having two functions ensures that partial syncing could be added as smaller patches in the future. Tagging a view layer out of sync could be replaced with a partial sync. Eventually the number of full resyncs could be reduced. After all tagging has been replaced with partial syncs the ensure_sync could be phased out. This patch has been added to discuss the details and consequences of the current approach. For clarity the call to BKE_view_layer_ensure_sync is placed close to the getters. In the future this could be placed in more strategical places to reduce the number of calls or improve performance. Finding those strategical places isn't that clear. When multiple operations are grouped in a single script you might want to always check for resync. Some areas found that can be improved. This list isn't complete. These areas aren't addressed by this patch as these changes would be hard to detect to the reviewer. The idea is to add changes to these areas as a separate patch. It might be that the initial commit would reduce performance compared to master, but will be fixed by the additional patches. **Object duplication** During object duplication the syncing is temporarily disabled. With this patch this isn't useful as when disabled the view_layer is accessed to locate bases. This can be improved by first locating the source bases, then duplicate and sync and locate the new bases. Will be solved in a separate patch for clarity reasons ({D15886}). **Object add** `BKE_object_add` not only adds a new object, but also selects and activates the new base. This requires the view_layer to be resynced. Some callers reverse the selection and activation (See `get_new_constraint_target`). We should make the selection and activation optional. This would make it possible to add multiple objects without having to resync per object. **Postpone Activate Base** Setting the basact is done in many locations. They follow a rule as after an action find the base and set the basact. Finding the base could require a resync. The idea is to store in the view_layer the object which base will be set in the basact during the next sync, reducing the times resyncing needs to happen. Reviewed By: mont29 Maniphest Tasks: T73411 Differential Revision: https://developer.blender.org/D15885
2022-09-14 21:33:51 +02:00
BKE_view_layer_synced_ensure(scene, view_layer);
Base *base = BKE_view_layer_base_find(view_layer, (Object *)id);
if (base != nullptr) {
BKE_view_layer_base_select_and_set_active(view_layer, base);
WM_main_add_notifier(NC_SCENE | ND_OB_ACTIVE, scene);
}
DEG_id_tag_update(&scene->id, ID_RECALC_SELECT);
ED_outliner_select_sync_from_object_tag(C);
/* Make sure the depsgraph is evaluated so the new object's transforms are up-to-date.
* The evaluated #Object::object_to_world() will be copied back to the original object
* and used below. */
CTX_data_ensure_evaluated_depsgraph(C);
2022-09-23 16:51:29 +02:00
V3DSnapCursorState *snap_state = static_cast<V3DSnapCursorState *>(drop->draw_data);
if (snap_state) {
float obmat_final[4][4];
view3d_ob_drop_matrix_from_snap(snap_state, (Object *)id, obmat_final);
RNA_float_set_array(drop->ptr, "matrix", &obmat_final[0][0]);
}
}
static void view3d_collection_drop_copy_local_id(bContext * /*C*/, wmDrag *drag, wmDropBox *drop)
{
ID *id = WM_drag_get_local_ID(drag, ID_GR);
RNA_int_set(drop->ptr, "session_uid", int(id->session_uid));
}
/* Mostly the same logic as #view3d_ob_drop_copy_external_asset(), just different enough to make
* sharing code a bit difficult. */
static void view3d_collection_drop_copy_external_asset(bContext *C, wmDrag *drag, wmDropBox *drop)
{
BLI_assert(drag->type == WM_DRAG_ASSET);
wmDragAsset *asset_drag = WM_drag_get_asset_data(drag, 0);
Scene *scene = CTX_data_scene(C);
ViewLayer *view_layer = CTX_data_view_layer(C);
BKE_view_layer_base_deselect_all(scene, view_layer);
ID *id = WM_drag_asset_id_import(C, asset_drag, FILE_AUTOSELECT);
Collection *collection = (Collection *)id;
/* TODO(sergey): Only update relations for the current scene. */
DEG_relations_tag_update(CTX_data_main(C));
WM_event_add_notifier(C, NC_SCENE | ND_LAYER_CONTENT, scene);
RNA_int_set(drop->ptr, "session_uid", int(id->session_uid));
/* Make an object active, just use the first one in the collection. */
2022-09-23 16:51:29 +02:00
CollectionObject *cobject = static_cast<CollectionObject *>(collection->gobject.first);
ViewLayer: Lazy sync of scene data. When a change happens which invalidates view layers the syncing will be postponed until the first usage. This will improve importing or adding many objects in a single operation/script. `BKE_view_layer_need_resync_tag` is used to tag the view layer to be out of sync. Before accessing `BKE_view_layer_active_base_get`, `BKE_view_layer_active_object_get`, `BKE_view_layer_active_collection` or `BKE_view_layer_object_bases` the caller should call `BKE_view_layer_synced_ensure`. Having two functions ensures that partial syncing could be added as smaller patches in the future. Tagging a view layer out of sync could be replaced with a partial sync. Eventually the number of full resyncs could be reduced. After all tagging has been replaced with partial syncs the ensure_sync could be phased out. This patch has been added to discuss the details and consequences of the current approach. For clarity the call to BKE_view_layer_ensure_sync is placed close to the getters. In the future this could be placed in more strategical places to reduce the number of calls or improve performance. Finding those strategical places isn't that clear. When multiple operations are grouped in a single script you might want to always check for resync. Some areas found that can be improved. This list isn't complete. These areas aren't addressed by this patch as these changes would be hard to detect to the reviewer. The idea is to add changes to these areas as a separate patch. It might be that the initial commit would reduce performance compared to master, but will be fixed by the additional patches. **Object duplication** During object duplication the syncing is temporarily disabled. With this patch this isn't useful as when disabled the view_layer is accessed to locate bases. This can be improved by first locating the source bases, then duplicate and sync and locate the new bases. Will be solved in a separate patch for clarity reasons ({D15886}). **Object add** `BKE_object_add` not only adds a new object, but also selects and activates the new base. This requires the view_layer to be resynced. Some callers reverse the selection and activation (See `get_new_constraint_target`). We should make the selection and activation optional. This would make it possible to add multiple objects without having to resync per object. **Postpone Activate Base** Setting the basact is done in many locations. They follow a rule as after an action find the base and set the basact. Finding the base could require a resync. The idea is to store in the view_layer the object which base will be set in the basact during the next sync, reducing the times resyncing needs to happen. Reviewed By: mont29 Maniphest Tasks: T73411 Differential Revision: https://developer.blender.org/D15885
2022-09-14 21:33:51 +02:00
BKE_view_layer_synced_ensure(scene, view_layer);
2022-09-23 16:51:29 +02:00
Base *base = cobject ? BKE_view_layer_base_find(view_layer, cobject->ob) : nullptr;
if (base) {
BLI_assert((base->flag & BASE_SELECTABLE) && (base->flag & BASE_ENABLED_VIEWPORT));
BKE_view_layer_base_select_and_set_active(view_layer, base);
WM_main_add_notifier(NC_SCENE | ND_OB_ACTIVE, scene);
}
DEG_id_tag_update(&scene->id, ID_RECALC_SELECT);
ED_outliner_select_sync_from_object_tag(C);
/* XXX Without an undo push here, there will be a crash when the user modifies operator
* properties. The stuff we do in these drop callbacks just isn't safe over undo/redo. */
ED_undo_push(C, "Collection_Drop");
}
static void view3d_id_drop_copy(bContext *C, wmDrag *drag, wmDropBox *drop)
{
ID *id = WM_drag_get_local_ID_or_import_from_asset(C, drag, 0);
WM_operator_properties_id_lookup_set_from_id(drop->ptr, id);
RNA_boolean_set(drop->ptr, "show_datablock_in_modifier", (drag->type != WM_DRAG_ASSET));
}
static void view3d_geometry_nodes_drop_copy(bContext *C, wmDrag *drag, wmDropBox *drop)
{
view3d_id_drop_copy(C, drag, drop);
RNA_boolean_set(drop->ptr, "show_datablock_in_modifier", (drag->type != WM_DRAG_ASSET));
}
static void view3d_id_drop_copy_with_type(bContext *C, wmDrag *drag, wmDropBox *drop)
{
ID *id = WM_drag_get_local_ID_or_import_from_asset(C, drag, 0);
RNA_enum_set(drop->ptr, "type", GS(id->name));
WM_operator_properties_id_lookup_set_from_id(drop->ptr, id);
}
static void view3d_id_path_drop_copy(bContext *C, wmDrag *drag, wmDropBox *drop)
{
ID *id = WM_drag_get_local_ID_or_import_from_asset(C, drag, 0);
if (id) {
WM_operator_properties_id_lookup_set_from_id(drop->ptr, id);
RNA_struct_property_unset(drop->ptr, "filepath");
return;
}
}
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
/* region dropbox definition */
static void view3d_dropboxes()
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
{
ListBase *lb = WM_dropboxmap_find("View3D", SPACE_VIEW3D, RGN_TYPE_WINDOW);
wmDropBox *drop;
drop = WM_dropbox_add(lb,
"OBJECT_OT_add_named",
view3d_ob_drop_poll_local_id,
view3d_ob_drop_copy_local_id,
WM_drag_free_imported_drag_ID,
2022-09-23 16:51:29 +02:00
nullptr);
drop->draw_droptip = WM_drag_draw_item_name_fn;
drop->on_enter = view3d_ob_drop_on_enter;
drop->on_exit = view3d_ob_drop_on_exit;
drop = WM_dropbox_add(lb,
"OBJECT_OT_transform_to_mouse",
view3d_ob_drop_poll_external_asset,
view3d_ob_drop_copy_external_asset,
WM_drag_free_imported_drag_ID,
2022-09-23 16:51:29 +02:00
nullptr);
drop->draw_droptip = WM_drag_draw_item_name_fn;
drop->on_enter = view3d_ob_drop_on_enter;
drop->on_exit = view3d_ob_drop_on_exit;
WM_dropbox_add(lb,
"OBJECT_OT_collection_external_asset_drop",
view3d_collection_drop_poll_external_asset,
view3d_collection_drop_copy_external_asset,
WM_drag_free_imported_drag_ID,
2022-09-23 16:51:29 +02:00
nullptr);
WM_dropbox_add(lb,
"OBJECT_OT_collection_instance_add",
view3d_collection_drop_poll_local_id,
view3d_collection_drop_copy_local_id,
WM_drag_free_imported_drag_ID,
2022-09-23 16:51:29 +02:00
nullptr);
WM_dropbox_add(lb,
"OBJECT_OT_drop_named_material",
view3d_mat_drop_poll,
view3d_id_drop_copy,
WM_drag_free_imported_drag_ID,
view3d_mat_drop_tooltip);
WM_dropbox_add(lb,
"OBJECT_OT_drop_geometry_nodes",
view3d_geometry_nodes_drop_poll,
view3d_geometry_nodes_drop_copy,
WM_drag_free_imported_drag_ID,
view3d_geometry_nodes_drop_tooltip);
WM_dropbox_add(lb,
"VIEW3D_OT_camera_background_image_add",
view3d_ima_bg_drop_poll,
view3d_id_path_drop_copy,
WM_drag_free_imported_drag_ID,
2022-09-23 16:51:29 +02:00
nullptr);
WM_dropbox_add(lb,
"OBJECT_OT_empty_image_add",
view3d_ima_empty_drop_poll,
view3d_id_path_drop_copy,
WM_drag_free_imported_drag_ID,
2022-09-23 16:51:29 +02:00
nullptr);
WM_dropbox_add(lb,
"OBJECT_OT_data_instance_add",
view3d_object_data_drop_poll,
view3d_id_drop_copy_with_type,
WM_drag_free_imported_drag_ID,
view3d_object_data_drop_tooltip);
WM_dropbox_add(lb,
"VIEW3D_OT_drop_world",
view3d_world_drop_poll,
view3d_id_drop_copy,
WM_drag_free_imported_drag_ID,
2022-09-23 16:51:29 +02:00
nullptr);
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
}
static void view3d_widgets()
{
2022-09-23 16:51:29 +02:00
wmGizmoMapType_Params params{SPACE_VIEW3D, RGN_TYPE_WINDOW};
wmGizmoMapType *gzmap_type = WM_gizmomaptype_ensure(&params);
WM_gizmogrouptype_append_and_link(gzmap_type,
blender::ed::transform::VIEW3D_GGT_xform_gizmo_context);
2019-02-27 12:02:02 +11:00
WM_gizmogrouptype_append_and_link(gzmap_type, VIEW3D_GGT_light_spot);
WM_gizmogrouptype_append_and_link(gzmap_type, VIEW3D_GGT_light_point);
2019-02-27 12:02:02 +11:00
WM_gizmogrouptype_append_and_link(gzmap_type, VIEW3D_GGT_light_area);
WM_gizmogrouptype_append_and_link(gzmap_type, VIEW3D_GGT_light_target);
WM_gizmogrouptype_append_and_link(gzmap_type, VIEW3D_GGT_force_field);
WM_gizmogrouptype_append_and_link(gzmap_type, VIEW3D_GGT_camera);
WM_gizmogrouptype_append_and_link(gzmap_type, VIEW3D_GGT_camera_view);
WM_gizmogrouptype_append_and_link(gzmap_type, VIEW3D_GGT_empty_image);
Geometry Nodes: support attaching gizmos to input values This adds support for attaching gizmos for input values. The goal is to make it easier for users to set input values intuitively in the 3D viewport. We went through multiple different possible designs until we settled on the one implemented here. We picked it for it's flexibility and ease of use when using geometry node assets. The core principle in the design is that **gizmos are attached to existing input values instead of being the input value themselves**. This actually fits the existing concept of gizmos in Blender well, but may be a bit unintutitive in a node setup at first. The attachment is done using links in the node editor. The most basic usage of the node is to link a Value node to the new Linear Gizmo node. This attaches the gizmo to the input value and allows you to change it from the 3D view. The attachment is indicated by the gizmo icon in the sockets which are controlled by a gizmo as well as the back-link (notice the double link) when the gizmo is active. The core principle makes it straight forward to control the same node setup from the 3D view with gizmos, or by manually changing input values, or by driving the input values procedurally. If the input value is controlled indirectly by other inputs, it's often possible to **automatically propagate** the gizmo to the actual input. Backpropagation does not work for all nodes, although more nodes can be supported over time. This patch adds the first three gizmo nodes which cover common use cases: * **Linear Gizmo**: Creates a gizmo that controls a float or integer value using a linear movement of e.g. an arrow in the 3D viewport. * **Dial Gizmo**: Creates a circular gizmo in the 3D viewport that can be rotated to change the attached angle input. * **Transform Gizmo**: Creates a simple gizmo for location, rotation and scale. In the future, more built-in gizmos and potentially the ability for custom gizmos could be added. All gizmo nodes have a **Transform** geometry output. Using it is optional but it is recommended when the gizmo is used to control inputs that affect a geometry. When it is used, Blender will automatically transform the gizmos together with the geometry that they control. To achieve this, the output should be merged with the generated geometry using the *Join Geometry* node. The data contained in *Transform* output is not visible geometry, but just internal information that helps Blender to give a better user experience when using gizmos. The gizmo nodes have a multi-input socket. This allows **controlling multiple values** with the same gizmo. Only a small set of **gizmo shapes** is supported initially. It might be extended in the future but one goal is to give the gizmos used by different node group assets a familiar look and feel. A similar constraint exists for **colors**. Currently, one can choose from a fixed set of colors which can be modified in the theme settings. The set of **visible gizmos** is determined by a multiple factors because it's not really feasible to show all possible gizmos at all times. To see any of the geometry nodes gizmos, the "Active Modifier" option has to be enabled in the "Viewport Gizmos" popover. Then all gizmos are drawn for which at least one of the following is true: * The gizmo controls an input of the active modifier of the active object. * The gizmo controls a value in a selected node in an open node editor. * The gizmo controls a pinned value in an open node editor. Pinning works by clicking the gizmo icon next to the value. Pull Request: https://projects.blender.org/blender/blender/pulls/112677
2024-07-10 16:18:47 +02:00
WM_gizmogrouptype_append_and_link(gzmap_type, VIEW3D_GGT_geometry_nodes);
/* TODO(@ideasman42): Not working well enough, disable for now. */
#if 0
WM_gizmogrouptype_append_and_link(gzmap_type, VIEW3D_GGT_armature_spline);
#endif
WM_gizmogrouptype_append(blender::ed::transform::VIEW3D_GGT_xform_gizmo);
WM_gizmogrouptype_append(blender::ed::transform::VIEW3D_GGT_xform_cage);
WM_gizmogrouptype_append(blender::ed::transform::VIEW3D_GGT_xform_shear);
WM_gizmogrouptype_append(blender::ed::transform::VIEW3D_GGT_xform_extrude);
WM_gizmogrouptype_append(VIEW3D_GGT_mesh_preselect_elem);
WM_gizmogrouptype_append(VIEW3D_GGT_mesh_preselect_edgering);
WM_gizmogrouptype_append(VIEW3D_GGT_tool_generic_handle_normal);
WM_gizmogrouptype_append(VIEW3D_GGT_tool_generic_handle_free);
WM_gizmogrouptype_append(VIEW3D_GGT_ruler);
WM_gizmotype_append(VIEW3D_GT_ruler_item);
WM_gizmogrouptype_append(VIEW3D_GGT_placement);
WM_gizmogrouptype_append_and_link(gzmap_type, VIEW3D_GGT_navigate);
WM_gizmotype_append(VIEW3D_GT_navigate_rotate);
}
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
/* type callback, not region itself */
static void view3d_main_region_free(ARegion *region)
{
2022-09-23 16:51:29 +02:00
RegionView3D *rv3d = static_cast<RegionView3D *>(region->regiondata);
2012-02-22 16:52:06 +00:00
if (rv3d) {
if (rv3d->localvd) {
MEM_freeN(rv3d->localvd);
}
if (rv3d->clipbb) {
MEM_freeN(rv3d->clipbb);
}
if (rv3d->view_render) {
DRW_engine_external_free(rv3d);
}
if (rv3d->sms) {
MEM_guardedalloc: Refactor to add more type-safety. The main goal of these changes are to improve static (i.e. build-time) checks on whether a given data can be allocated and freed with `malloc` and `free` (C-style), or requires proper C++-style construction and destruction (`new` and `delete`). * Add new `MEM_malloc_arrayN_aligned` API. * Make `MEM_freeN` a template function in C++, which does static assert on type triviality. * Add `MEM_SAFE_DELETE`, similar to `MEM_SAFE_FREE` but calling `MEM_delete`. The changes to `MEM_freeN` was painful and useful, as it allowed to fix a bunch of invalid calls in existing codebase already. It also highlighted a fair amount of places where it is called to free incomplete type pointers, which is likely a sign of badly designed code (there should rather be an API to destroy and free these data then, if the data type is not fully publicly exposed). For now, these are 'worked around' by explicitly casting the freed pointers to `void *` in these cases - which also makes them easy to search for. Some of these will be addressed separately (see blender/blender!134765). Finally, MSVC seems to consider structs defining new/delete operators (e.g. by using the `MEM_CXX_CLASS_ALLOC_FUNCS` macro) as non-trivial. This does not seem to follow the definition of type triviality, so for now static type checking in `MEM_freeN` has been disabled for Windows. We'll likely have to do the same with type-safe `MEM_[cm]allocN` API being worked on in blender/blender!134771 Based on ideas from Brecht in blender/blender!134452 Pull Request: https://projects.blender.org/blender/blender/pulls/134463
2025-02-20 10:37:10 +01:00
MEM_freeN(static_cast<void *>(rv3d->sms));
}
MEM_freeN(rv3d);
2022-09-23 16:51:29 +02:00
region->regiondata = nullptr;
}
}
/* copy regiondata */
static void *view3d_main_region_duplicate(void *poin)
{
2012-02-22 16:52:06 +00:00
if (poin) {
2022-09-23 16:51:29 +02:00
RegionView3D *rv3d = static_cast<RegionView3D *>(poin);
RegionView3D *new_rv3d;
2022-09-23 16:51:29 +02:00
new_rv3d = static_cast<RegionView3D *>(MEM_dupallocN(rv3d));
if (rv3d->localvd) {
2022-09-23 16:51:29 +02:00
new_rv3d->localvd = static_cast<RegionView3D *>(MEM_dupallocN(rv3d->localvd));
}
if (rv3d->clipbb) {
2022-09-23 16:51:29 +02:00
new_rv3d->clipbb = static_cast<BoundBox *>(MEM_dupallocN(rv3d->clipbb));
}
new_rv3d->view_render = nullptr;
2022-09-23 16:51:29 +02:00
new_rv3d->sms = nullptr;
new_rv3d->smooth_timer = nullptr;
2022-09-23 16:51:29 +02:00
return new_rv3d;
}
2022-09-23 16:51:29 +02:00
return nullptr;
}
static void view3d_main_region_listener(const wmRegionListenerParams *params)
{
2021-01-29 15:55:41 +01:00
wmWindow *window = params->window;
ScrArea *area = params->area;
ARegion *region = params->region;
2022-08-27 12:50:43 +10:00
const wmNotifier *wmn = params->notifier;
const Scene *scene = params->scene;
2022-09-23 16:51:29 +02:00
View3D *v3d = static_cast<View3D *>(area->spacedata.first);
RegionView3D *rv3d = static_cast<RegionView3D *>(region->regiondata);
wmGizmoMap *gzmap = region->runtime->gizmo_map;
/* context changes */
switch (wmn->category) {
case NC_WM:
if (ELEM(wmn->data, ND_UNDO)) {
WM_gizmomap_tag_refresh(gzmap);
}
VR: Initial Virtual Reality support - Milestone 1, Scene Inspection NOTE: While most of the milestone 1 goals are there, a few smaller features and improvements are still to be done. Big picture of this milestone: Initial, OpenXR-based virtual reality support for users and foundation for advanced use cases. Maniphest Task: https://developer.blender.org/T71347 The tasks contains more information about this milestone. To be clear: This is not a feature rich VR implementation, it's focused on the initial scene inspection use case. We intentionally focused on that, further features like controller support are part of the next milestone. - How to use? Instructions on how to use this are here: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/How_to_Test These will be updated and moved to a more official place (likely the manual) soon. Currently Windows Mixed Reality and Oculus devices are usable. Valve/HTC headsets don't support the OpenXR standard yet and hence, do not work with this implementation. --------------- This is the C-side implementation of the features added for initial VR support as per milestone 1. A "VR Scene Inspection" Add-on will be committed separately, to expose the VR functionality in the UI. It also adds some further features for milestone 1, namely a landmarking system (stored view locations in the VR space) Main additions/features: * Support for rendering viewports to an HMD, with good performance. * Option to sync the VR view perspective with a fully interactive, regular 3D View (VR-Mirror). * Option to disable positional tracking. Keeps the current position (calculated based on the VR eye center pose) when enabled while a VR session is running. * Some regular viewport settings for the VR view * RNA/Python-API to query and set VR session state information. * WM-XR: Layer tying Ghost-XR to the Blender specific APIs/data * wmSurface API: drawable, non-window container (manages Ghost-OpenGL and GPU context) * DNA/RNA for management of VR session settings * `--debug-xr` and `--debug-xr-time` commandline options * Utility batch & config file for using the Oculus runtime on Windows. * Most VR data is runtime only. The exception is user settings which are saved to files (`XrSessionSettings`). * VR support can be disabled through the `WITH_XR_OPENXR` compiler flag. For architecture and code documentation, see https://wiki.blender.org/wiki/Source/Interface/XR. --------------- A few thank you's: * A huge shoutout to Ray Molenkamp for his help during the project - it would have not been that successful without him! * Sebastian Koenig and Simeon Conzendorf for testing and feedback! * The reviewers, especially Brecht Van Lommel! * Dalai Felinto for pushing and managing me to get this done ;) * The OpenXR working group for providing an open standard. I think we're the first bigger application to adopt OpenXR. Congratulations to them and ourselves :) This project started as a Google Summer of Code 2019 project - "Core Support of Virtual Reality Headsets through OpenXR" (see https://wiki.blender.org/wiki/User:Severin/GSoC-2019/). Some further information, including ideas for further improvements can be found in the final GSoC report: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/Final_Report Differential Revisions: D6193, D7098 Reviewed by: Brecht Van Lommel, Jeroen Bakker
2020-03-17 20:20:55 +01:00
else if (ELEM(wmn->data, ND_XR_DATA_CHANGED)) {
/* Only cause a redraw if this a VR session mirror. Should more features be added that
* require redraws, we could pass something to wmn->reference, e.g. the flag value. */
if (v3d->flag & V3D_XR_SESSION_MIRROR) {
ED_region_tag_redraw(region);
}
}
break;
case NC_ANIMATION:
switch (wmn->data) {
case ND_KEYFRAME_PROP:
case ND_NLA_ACTCHANGE:
ED_region_tag_redraw(region);
break;
case ND_NLA:
case ND_KEYFRAME:
if (ELEM(wmn->action, NA_EDITED, NA_ADDED, NA_REMOVED)) {
ED_region_tag_redraw(region);
}
break;
case ND_ANIMCHAN:
if (ELEM(wmn->action, NA_EDITED, NA_ADDED, NA_REMOVED, NA_SELECTED)) {
ED_region_tag_redraw(region);
}
break;
}
break;
case NC_SCENE:
switch (wmn->data) {
Main Workspace Integration This commit does the main integration of workspaces, which is a design we agreed on during the 2.8 UI workshop (see https://wiki.blender.org/index.php/Dev:2.8/UI/Workshop_Writeup) Workspaces should generally be stable, I'm not aware of any remaining bugs (or I've forgotten them :) ). If you find any, let me know! (Exception: mode switching button might get out of sync with actual mode in some cases, would consider that a limitation/ToDo. Needs to be resolved at some point.) == Main Changes/Features * Introduces the new Workspaces as data-blocks. * Allow storing a number of custom workspaces as part of the user configuration. Needs further work to allow adding and deleting individual workspaces. * Bundle a default workspace configuration with Blender (current screen-layouts converted to workspaces). * Pressing button to add a workspace spawns a menu to select between "Duplicate Current" and the workspaces from the user configuration. If no workspaces are stored in the user configuration, the default workspaces are listed instead. * Store screen-layouts (`bScreen`) per workspace. * Store an active screen-layout per workspace. Changing the workspace will enable this layout. * Store active mode in workspace. Changing the workspace will also enter the mode of the new workspace. (Note that we still store the active mode in the object, moving this completely to workspaces is a separate project.) * Store an active render layer per workspace. * Moved mode switch from 3D View header to Info Editor header. * Store active scene in window (not directly workspace related, but overlaps quite a bit). * Removed 'Use Global Scene' User Preference option. * Compatibility with old files - a new workspace is created for every screen-layout of old files. Old Blender versions should be able to read files saved with workspace support as well. * Default .blend only contains one workspace ("General"). * Support appending workspaces. Opening files without UI and commandline rendering should work fine. Note that the UI is temporary! We plan to introduce a new global topbar that contains the workspace options and tabs for switching workspaces. == Technical Notes * Workspaces are data-blocks. * Adding and removing `bScreen`s should be done through `ED_workspace_layout` API now. * A workspace can be active in multiple windows at the same time. * The mode menu (which is now in the Info Editor header) doesn't display "Grease Pencil Edit" mode anymore since its availability depends on the active editor. Will be fixed by making Grease Pencil an own object type (as planned). * The button to change the active workspace object mode may get out of sync with the mode of the active object. Will either be resolved by moving mode out of object data, or we'll disable workspace modes again (there's a `#define USE_WORKSPACE_MODE` for that). * Screen-layouts (`bScreen`) are IDs and thus stored in a main list-base. Had to add a wrapper `WorkSpaceLayout` so we can store them in a list-base within workspaces, too. On the long run we could completely replace `bScreen` by workspace structs. * `WorkSpace` types use some special compiler trickery to allow marking structs and struct members as private. BKE_workspace API should be used for accessing those. * Added scene operators `SCENE_OT_`. Was previously done through screen operators. == BPY API Changes * Removed `Screen.scene`, added `Window.scene` * Removed `UserPreferencesView.use_global_scene` * Added `Context.workspace`, `Window.workspace` and `BlendData.workspaces` * Added `bpy.types.WorkSpace` containing `screens`, `object_mode` and `render_layer` * Added Screen.layout_name for the layout name that'll be displayed in the UI (may differ from internal name) == What's left? * There are a few open design questions (T50521). We should find the needed answers and implement them. * Allow adding and removing individual workspaces from workspace configuration (needs UI design). * Get the override system ready and support overrides per workspace. * Support custom UI setups as part of workspaces (hidden panels, hidden buttons, customizable toolbars, etc). * Allow enabling add-ons per workspace. * Support custom workspace keymaps. * Remove special exception for workspaces in linking code (so they're always appended, never linked). Depends on a few things, so best to solve later. * Get the topbar done. * Workspaces need a proper icon, current one is just a placeholder :) Reviewed By: campbellbarton, mont29 Tags: #user_interface, #bf_blender_2.8 Maniphest Tasks: T50521 Differential Revision: https://developer.blender.org/D2451
2017-06-01 19:56:58 +02:00
case ND_SCENEBROWSE:
case ND_LAYER_CONTENT:
ED_region_tag_redraw(region);
WM_gizmomap_tag_refresh(gzmap);
if (v3d->localvd && v3d->localvd->runtime.flag & V3D_RUNTIME_LOCAL_MAYBE_EMPTY) {
ED_area_tag_refresh(area);
}
break;
Main Workspace Integration This commit does the main integration of workspaces, which is a design we agreed on during the 2.8 UI workshop (see https://wiki.blender.org/index.php/Dev:2.8/UI/Workshop_Writeup) Workspaces should generally be stable, I'm not aware of any remaining bugs (or I've forgotten them :) ). If you find any, let me know! (Exception: mode switching button might get out of sync with actual mode in some cases, would consider that a limitation/ToDo. Needs to be resolved at some point.) == Main Changes/Features * Introduces the new Workspaces as data-blocks. * Allow storing a number of custom workspaces as part of the user configuration. Needs further work to allow adding and deleting individual workspaces. * Bundle a default workspace configuration with Blender (current screen-layouts converted to workspaces). * Pressing button to add a workspace spawns a menu to select between "Duplicate Current" and the workspaces from the user configuration. If no workspaces are stored in the user configuration, the default workspaces are listed instead. * Store screen-layouts (`bScreen`) per workspace. * Store an active screen-layout per workspace. Changing the workspace will enable this layout. * Store active mode in workspace. Changing the workspace will also enter the mode of the new workspace. (Note that we still store the active mode in the object, moving this completely to workspaces is a separate project.) * Store an active render layer per workspace. * Moved mode switch from 3D View header to Info Editor header. * Store active scene in window (not directly workspace related, but overlaps quite a bit). * Removed 'Use Global Scene' User Preference option. * Compatibility with old files - a new workspace is created for every screen-layout of old files. Old Blender versions should be able to read files saved with workspace support as well. * Default .blend only contains one workspace ("General"). * Support appending workspaces. Opening files without UI and commandline rendering should work fine. Note that the UI is temporary! We plan to introduce a new global topbar that contains the workspace options and tabs for switching workspaces. == Technical Notes * Workspaces are data-blocks. * Adding and removing `bScreen`s should be done through `ED_workspace_layout` API now. * A workspace can be active in multiple windows at the same time. * The mode menu (which is now in the Info Editor header) doesn't display "Grease Pencil Edit" mode anymore since its availability depends on the active editor. Will be fixed by making Grease Pencil an own object type (as planned). * The button to change the active workspace object mode may get out of sync with the mode of the active object. Will either be resolved by moving mode out of object data, or we'll disable workspace modes again (there's a `#define USE_WORKSPACE_MODE` for that). * Screen-layouts (`bScreen`) are IDs and thus stored in a main list-base. Had to add a wrapper `WorkSpaceLayout` so we can store them in a list-base within workspaces, too. On the long run we could completely replace `bScreen` by workspace structs. * `WorkSpace` types use some special compiler trickery to allow marking structs and struct members as private. BKE_workspace API should be used for accessing those. * Added scene operators `SCENE_OT_`. Was previously done through screen operators. == BPY API Changes * Removed `Screen.scene`, added `Window.scene` * Removed `UserPreferencesView.use_global_scene` * Added `Context.workspace`, `Window.workspace` and `BlendData.workspaces` * Added `bpy.types.WorkSpace` containing `screens`, `object_mode` and `render_layer` * Added Screen.layout_name for the layout name that'll be displayed in the UI (may differ from internal name) == What's left? * There are a few open design questions (T50521). We should find the needed answers and implement them. * Allow adding and removing individual workspaces from workspace configuration (needs UI design). * Get the override system ready and support overrides per workspace. * Support custom UI setups as part of workspaces (hidden panels, hidden buttons, customizable toolbars, etc). * Allow enabling add-ons per workspace. * Support custom workspace keymaps. * Remove special exception for workspaces in linking code (so they're always appended, never linked). Depends on a few things, so best to solve later. * Get the topbar done. * Workspaces need a proper icon, current one is just a placeholder :) Reviewed By: campbellbarton, mont29 Tags: #user_interface, #bf_blender_2.8 Maniphest Tasks: T50521 Differential Revision: https://developer.blender.org/D2451
2017-06-01 19:56:58 +02:00
case ND_LAYER:
if (wmn->reference) {
2022-09-23 16:51:29 +02:00
BKE_screen_view3d_sync(v3d, static_cast<Scene *>(wmn->reference));
Main Workspace Integration This commit does the main integration of workspaces, which is a design we agreed on during the 2.8 UI workshop (see https://wiki.blender.org/index.php/Dev:2.8/UI/Workshop_Writeup) Workspaces should generally be stable, I'm not aware of any remaining bugs (or I've forgotten them :) ). If you find any, let me know! (Exception: mode switching button might get out of sync with actual mode in some cases, would consider that a limitation/ToDo. Needs to be resolved at some point.) == Main Changes/Features * Introduces the new Workspaces as data-blocks. * Allow storing a number of custom workspaces as part of the user configuration. Needs further work to allow adding and deleting individual workspaces. * Bundle a default workspace configuration with Blender (current screen-layouts converted to workspaces). * Pressing button to add a workspace spawns a menu to select between "Duplicate Current" and the workspaces from the user configuration. If no workspaces are stored in the user configuration, the default workspaces are listed instead. * Store screen-layouts (`bScreen`) per workspace. * Store an active screen-layout per workspace. Changing the workspace will enable this layout. * Store active mode in workspace. Changing the workspace will also enter the mode of the new workspace. (Note that we still store the active mode in the object, moving this completely to workspaces is a separate project.) * Store an active render layer per workspace. * Moved mode switch from 3D View header to Info Editor header. * Store active scene in window (not directly workspace related, but overlaps quite a bit). * Removed 'Use Global Scene' User Preference option. * Compatibility with old files - a new workspace is created for every screen-layout of old files. Old Blender versions should be able to read files saved with workspace support as well. * Default .blend only contains one workspace ("General"). * Support appending workspaces. Opening files without UI and commandline rendering should work fine. Note that the UI is temporary! We plan to introduce a new global topbar that contains the workspace options and tabs for switching workspaces. == Technical Notes * Workspaces are data-blocks. * Adding and removing `bScreen`s should be done through `ED_workspace_layout` API now. * A workspace can be active in multiple windows at the same time. * The mode menu (which is now in the Info Editor header) doesn't display "Grease Pencil Edit" mode anymore since its availability depends on the active editor. Will be fixed by making Grease Pencil an own object type (as planned). * The button to change the active workspace object mode may get out of sync with the mode of the active object. Will either be resolved by moving mode out of object data, or we'll disable workspace modes again (there's a `#define USE_WORKSPACE_MODE` for that). * Screen-layouts (`bScreen`) are IDs and thus stored in a main list-base. Had to add a wrapper `WorkSpaceLayout` so we can store them in a list-base within workspaces, too. On the long run we could completely replace `bScreen` by workspace structs. * `WorkSpace` types use some special compiler trickery to allow marking structs and struct members as private. BKE_workspace API should be used for accessing those. * Added scene operators `SCENE_OT_`. Was previously done through screen operators. == BPY API Changes * Removed `Screen.scene`, added `Window.scene` * Removed `UserPreferencesView.use_global_scene` * Added `Context.workspace`, `Window.workspace` and `BlendData.workspaces` * Added `bpy.types.WorkSpace` containing `screens`, `object_mode` and `render_layer` * Added Screen.layout_name for the layout name that'll be displayed in the UI (may differ from internal name) == What's left? * There are a few open design questions (T50521). We should find the needed answers and implement them. * Allow adding and removing individual workspaces from workspace configuration (needs UI design). * Get the override system ready and support overrides per workspace. * Support custom UI setups as part of workspaces (hidden panels, hidden buttons, customizable toolbars, etc). * Allow enabling add-ons per workspace. * Support custom workspace keymaps. * Remove special exception for workspaces in linking code (so they're always appended, never linked). Depends on a few things, so best to solve later. * Get the topbar done. * Workspaces need a proper icon, current one is just a placeholder :) Reviewed By: campbellbarton, mont29 Tags: #user_interface, #bf_blender_2.8 Maniphest Tasks: T50521 Differential Revision: https://developer.blender.org/D2451
2017-06-01 19:56:58 +02:00
}
ED_region_tag_redraw(region);
WM_gizmomap_tag_refresh(gzmap);
Main Workspace Integration This commit does the main integration of workspaces, which is a design we agreed on during the 2.8 UI workshop (see https://wiki.blender.org/index.php/Dev:2.8/UI/Workshop_Writeup) Workspaces should generally be stable, I'm not aware of any remaining bugs (or I've forgotten them :) ). If you find any, let me know! (Exception: mode switching button might get out of sync with actual mode in some cases, would consider that a limitation/ToDo. Needs to be resolved at some point.) == Main Changes/Features * Introduces the new Workspaces as data-blocks. * Allow storing a number of custom workspaces as part of the user configuration. Needs further work to allow adding and deleting individual workspaces. * Bundle a default workspace configuration with Blender (current screen-layouts converted to workspaces). * Pressing button to add a workspace spawns a menu to select between "Duplicate Current" and the workspaces from the user configuration. If no workspaces are stored in the user configuration, the default workspaces are listed instead. * Store screen-layouts (`bScreen`) per workspace. * Store an active screen-layout per workspace. Changing the workspace will enable this layout. * Store active mode in workspace. Changing the workspace will also enter the mode of the new workspace. (Note that we still store the active mode in the object, moving this completely to workspaces is a separate project.) * Store an active render layer per workspace. * Moved mode switch from 3D View header to Info Editor header. * Store active scene in window (not directly workspace related, but overlaps quite a bit). * Removed 'Use Global Scene' User Preference option. * Compatibility with old files - a new workspace is created for every screen-layout of old files. Old Blender versions should be able to read files saved with workspace support as well. * Default .blend only contains one workspace ("General"). * Support appending workspaces. Opening files without UI and commandline rendering should work fine. Note that the UI is temporary! We plan to introduce a new global topbar that contains the workspace options and tabs for switching workspaces. == Technical Notes * Workspaces are data-blocks. * Adding and removing `bScreen`s should be done through `ED_workspace_layout` API now. * A workspace can be active in multiple windows at the same time. * The mode menu (which is now in the Info Editor header) doesn't display "Grease Pencil Edit" mode anymore since its availability depends on the active editor. Will be fixed by making Grease Pencil an own object type (as planned). * The button to change the active workspace object mode may get out of sync with the mode of the active object. Will either be resolved by moving mode out of object data, or we'll disable workspace modes again (there's a `#define USE_WORKSPACE_MODE` for that). * Screen-layouts (`bScreen`) are IDs and thus stored in a main list-base. Had to add a wrapper `WorkSpaceLayout` so we can store them in a list-base within workspaces, too. On the long run we could completely replace `bScreen` by workspace structs. * `WorkSpace` types use some special compiler trickery to allow marking structs and struct members as private. BKE_workspace API should be used for accessing those. * Added scene operators `SCENE_OT_`. Was previously done through screen operators. == BPY API Changes * Removed `Screen.scene`, added `Window.scene` * Removed `UserPreferencesView.use_global_scene` * Added `Context.workspace`, `Window.workspace` and `BlendData.workspaces` * Added `bpy.types.WorkSpace` containing `screens`, `object_mode` and `render_layer` * Added Screen.layout_name for the layout name that'll be displayed in the UI (may differ from internal name) == What's left? * There are a few open design questions (T50521). We should find the needed answers and implement them. * Allow adding and removing individual workspaces from workspace configuration (needs UI design). * Get the override system ready and support overrides per workspace. * Support custom UI setups as part of workspaces (hidden panels, hidden buttons, customizable toolbars, etc). * Allow enabling add-ons per workspace. * Support custom workspace keymaps. * Remove special exception for workspaces in linking code (so they're always appended, never linked). Depends on a few things, so best to solve later. * Get the topbar done. * Workspaces need a proper icon, current one is just a placeholder :) Reviewed By: campbellbarton, mont29 Tags: #user_interface, #bf_blender_2.8 Maniphest Tasks: T50521 Differential Revision: https://developer.blender.org/D2451
2017-06-01 19:56:58 +02:00
break;
case ND_OB_ACTIVE:
case ND_OB_SELECT:
[[fallthrough]];
case ND_FRAME:
case ND_TRANSFORM:
case ND_OB_VISIBLE:
case ND_RENDER_OPTIONS:
case ND_MARKERS:
case ND_MODE:
ED_region_tag_redraw(region);
WM_gizmomap_tag_refresh(gzmap);
break;
case ND_WORLD:
/* handled by space_view3d_listener() for v3d access */
break;
case ND_DRAW_RENDER_VIEWPORT: {
if (v3d->camera && (scene == wmn->reference)) {
if (rv3d->persp == RV3D_CAMOB) {
ED_region_tag_redraw(region);
}
}
break;
}
}
if (wmn->action == NA_EDITED) {
ED_region_tag_redraw(region);
}
break;
case NC_OBJECT:
switch (wmn->data) {
case ND_BONE_ACTIVE:
case ND_BONE_SELECT:
case ND_BONE_COLLECTION:
case ND_TRANSFORM:
case ND_POSE:
case ND_DRAW:
case ND_MODIFIER:
case ND_SHADERFX:
case ND_CONSTRAINT:
case ND_KEYS:
case ND_PARTICLE:
case ND_POINTCACHE:
case ND_LOD:
ED_region_tag_redraw(region);
WM_gizmomap_tag_refresh(gzmap);
break;
case ND_DRAW_ANIMVIZ:
ED_region_tag_redraw(region);
break;
}
switch (wmn->action) {
case NA_ADDED:
ED_region_tag_redraw(region);
break;
}
break;
case NC_GEOM:
switch (wmn->data) {
case ND_SELECT: {
WM_gizmomap_tag_refresh(gzmap);
2017-05-21 09:39:31 +10:00
ATTR_FALLTHROUGH;
}
case ND_DATA:
ED_region_tag_redraw(region);
WM_gizmomap_tag_refresh(gzmap);
break;
case ND_VERTEX_GROUP:
ED_region_tag_redraw(region);
break;
}
switch (wmn->action) {
case NA_EDITED:
ED_region_tag_redraw(region);
break;
}
break;
case NC_CAMERA:
switch (wmn->data) {
case ND_DRAW_RENDER_VIEWPORT: {
if (v3d->camera && (v3d->camera->data == wmn->reference)) {
if (rv3d->persp == RV3D_CAMOB) {
ED_region_tag_redraw(region);
}
}
break;
}
}
break;
case NC_GROUP:
/* all group ops for now */
ED_region_tag_redraw(region);
break;
case NC_BRUSH:
switch (wmn->action) {
case NA_EDITED:
ED_region_tag_redraw_cursor(region);
2015-02-18 09:08:24 +11:00
break;
case NA_SELECTED:
2015-02-18 09:08:24 +11:00
/* used on brush changes - needed because 3d cursor
* has to be drawn if clone brush is selected */
ED_region_tag_redraw(region);
2015-02-18 09:08:24 +11:00
break;
}
break;
case NC_MATERIAL:
switch (wmn->data) {
case ND_SHADING:
case ND_NODES:
/* TODO(sergey): This is a bit too much updates, but needed to
* have proper material drivers update in the viewport.
*
* How to solve?
*/
ED_region_tag_redraw(region);
break;
case ND_SHADING_DRAW:
case ND_SHADING_LINKS:
ED_region_tag_redraw(region);
break;
}
Sorry, three commits in one, became difficult to untangle.. Editors Modules * render/ module added in editors, moved the preview render code there and also shading related operators. * physics/ module made more consistent with other modules. renaming files, making a single physics_ops.c for operators and keymaps. Also move all particle related operators here now. * space_buttons/ now should have only operators relevant to the buttons specificially. Updates & Notifiers * Material/Texture/World/Lamp can now be passed to DAG_id_flush_update, which will go back to a callback in editors. Eventually these should be in the depsgraph itself, but for now this gives a unified call for doing updates. * GLSL materials are now refreshed on changes. There's still various cases missing, * Preview icons now hook into this system, solving various update cases that were missed before. * Also fixes issue in my last commit, where some preview would not render, problem is avoided in the new system. Icon Rendering * On systems with support for non-power of two textures, an OpenGL texture is now used instead of glDrawPixels. This avoids problems with icons get clipped on region borders. On my Linux desktop, this gives an 1.1x speedup, and on my Mac laptop a 2.3x speedup overall in redrawing the full window, with the default setup. The glDrawPixels implementation on Mac seems to have a lot of overhread. * Preview icons are now drawn using proper premul alpha, and never faded so you can see them clearly. * Also tried to fix issue with texture node preview rendering, globals can't be used with threads reliably.
2009-09-29 19:12:12 +00:00
break;
case NC_NODE:
Geometry Nodes: support attaching gizmos to input values This adds support for attaching gizmos for input values. The goal is to make it easier for users to set input values intuitively in the 3D viewport. We went through multiple different possible designs until we settled on the one implemented here. We picked it for it's flexibility and ease of use when using geometry node assets. The core principle in the design is that **gizmos are attached to existing input values instead of being the input value themselves**. This actually fits the existing concept of gizmos in Blender well, but may be a bit unintutitive in a node setup at first. The attachment is done using links in the node editor. The most basic usage of the node is to link a Value node to the new Linear Gizmo node. This attaches the gizmo to the input value and allows you to change it from the 3D view. The attachment is indicated by the gizmo icon in the sockets which are controlled by a gizmo as well as the back-link (notice the double link) when the gizmo is active. The core principle makes it straight forward to control the same node setup from the 3D view with gizmos, or by manually changing input values, or by driving the input values procedurally. If the input value is controlled indirectly by other inputs, it's often possible to **automatically propagate** the gizmo to the actual input. Backpropagation does not work for all nodes, although more nodes can be supported over time. This patch adds the first three gizmo nodes which cover common use cases: * **Linear Gizmo**: Creates a gizmo that controls a float or integer value using a linear movement of e.g. an arrow in the 3D viewport. * **Dial Gizmo**: Creates a circular gizmo in the 3D viewport that can be rotated to change the attached angle input. * **Transform Gizmo**: Creates a simple gizmo for location, rotation and scale. In the future, more built-in gizmos and potentially the ability for custom gizmos could be added. All gizmo nodes have a **Transform** geometry output. Using it is optional but it is recommended when the gizmo is used to control inputs that affect a geometry. When it is used, Blender will automatically transform the gizmos together with the geometry that they control. To achieve this, the output should be merged with the generated geometry using the *Join Geometry* node. The data contained in *Transform* output is not visible geometry, but just internal information that helps Blender to give a better user experience when using gizmos. The gizmo nodes have a multi-input socket. This allows **controlling multiple values** with the same gizmo. Only a small set of **gizmo shapes** is supported initially. It might be extended in the future but one goal is to give the gizmos used by different node group assets a familiar look and feel. A similar constraint exists for **colors**. Currently, one can choose from a fixed set of colors which can be modified in the theme settings. The set of **visible gizmos** is determined by a multiple factors because it's not really feasible to show all possible gizmos at all times. To see any of the geometry nodes gizmos, the "Active Modifier" option has to be enabled in the "Viewport Gizmos" popover. Then all gizmos are drawn for which at least one of the following is true: * The gizmo controls an input of the active modifier of the active object. * The gizmo controls a value in a selected node in an open node editor. * The gizmo controls a pinned value in an open node editor. Pinning works by clicking the gizmo icon next to the value. Pull Request: https://projects.blender.org/blender/blender/pulls/112677
2024-07-10 16:18:47 +02:00
switch (wmn->data) {
case ND_NODE_GIZMO: {
WM_gizmomap_tag_refresh(gzmap);
break;
}
}
ED_region_tag_redraw(region);
break;
Sorry, three commits in one, became difficult to untangle.. Editors Modules * render/ module added in editors, moved the preview render code there and also shading related operators. * physics/ module made more consistent with other modules. renaming files, making a single physics_ops.c for operators and keymaps. Also move all particle related operators here now. * space_buttons/ now should have only operators relevant to the buttons specificially. Updates & Notifiers * Material/Texture/World/Lamp can now be passed to DAG_id_flush_update, which will go back to a callback in editors. Eventually these should be in the depsgraph itself, but for now this gives a unified call for doing updates. * GLSL materials are now refreshed on changes. There's still various cases missing, * Preview icons now hook into this system, solving various update cases that were missed before. * Also fixes issue in my last commit, where some preview would not render, problem is avoided in the new system. Icon Rendering * On systems with support for non-power of two textures, an OpenGL texture is now used instead of glDrawPixels. This avoids problems with icons get clipped on region borders. On my Linux desktop, this gives an 1.1x speedup, and on my Mac laptop a 2.3x speedup overall in redrawing the full window, with the default setup. The glDrawPixels implementation on Mac seems to have a lot of overhread. * Preview icons are now drawn using proper premul alpha, and never faded so you can see them clearly. * Also tried to fix issue with texture node preview rendering, globals can't be used with threads reliably.
2009-09-29 19:12:12 +00:00
case NC_WORLD:
switch (wmn->data) {
Sorry, three commits in one, became difficult to untangle.. Editors Modules * render/ module added in editors, moved the preview render code there and also shading related operators. * physics/ module made more consistent with other modules. renaming files, making a single physics_ops.c for operators and keymaps. Also move all particle related operators here now. * space_buttons/ now should have only operators relevant to the buttons specificially. Updates & Notifiers * Material/Texture/World/Lamp can now be passed to DAG_id_flush_update, which will go back to a callback in editors. Eventually these should be in the depsgraph itself, but for now this gives a unified call for doing updates. * GLSL materials are now refreshed on changes. There's still various cases missing, * Preview icons now hook into this system, solving various update cases that were missed before. * Also fixes issue in my last commit, where some preview would not render, problem is avoided in the new system. Icon Rendering * On systems with support for non-power of two textures, an OpenGL texture is now used instead of glDrawPixels. This avoids problems with icons get clipped on region borders. On my Linux desktop, this gives an 1.1x speedup, and on my Mac laptop a 2.3x speedup overall in redrawing the full window, with the default setup. The glDrawPixels implementation on Mac seems to have a lot of overhread. * Preview icons are now drawn using proper premul alpha, and never faded so you can see them clearly. * Also tried to fix issue with texture node preview rendering, globals can't be used with threads reliably.
2009-09-29 19:12:12 +00:00
case ND_WORLD_DRAW:
/* handled by space_view3d_listener() for v3d access */
Sorry, three commits in one, became difficult to untangle.. Editors Modules * render/ module added in editors, moved the preview render code there and also shading related operators. * physics/ module made more consistent with other modules. renaming files, making a single physics_ops.c for operators and keymaps. Also move all particle related operators here now. * space_buttons/ now should have only operators relevant to the buttons specificially. Updates & Notifiers * Material/Texture/World/Lamp can now be passed to DAG_id_flush_update, which will go back to a callback in editors. Eventually these should be in the depsgraph itself, but for now this gives a unified call for doing updates. * GLSL materials are now refreshed on changes. There's still various cases missing, * Preview icons now hook into this system, solving various update cases that were missed before. * Also fixes issue in my last commit, where some preview would not render, problem is avoided in the new system. Icon Rendering * On systems with support for non-power of two textures, an OpenGL texture is now used instead of glDrawPixels. This avoids problems with icons get clipped on region borders. On my Linux desktop, this gives an 1.1x speedup, and on my Mac laptop a 2.3x speedup overall in redrawing the full window, with the default setup. The glDrawPixels implementation on Mac seems to have a lot of overhread. * Preview icons are now drawn using proper premul alpha, and never faded so you can see them clearly. * Also tried to fix issue with texture node preview rendering, globals can't be used with threads reliably.
2009-09-29 19:12:12 +00:00
break;
case ND_WORLD:
/* Needed for updating world materials */
ED_region_tag_redraw(region);
break;
Sorry, three commits in one, became difficult to untangle.. Editors Modules * render/ module added in editors, moved the preview render code there and also shading related operators. * physics/ module made more consistent with other modules. renaming files, making a single physics_ops.c for operators and keymaps. Also move all particle related operators here now. * space_buttons/ now should have only operators relevant to the buttons specificially. Updates & Notifiers * Material/Texture/World/Lamp can now be passed to DAG_id_flush_update, which will go back to a callback in editors. Eventually these should be in the depsgraph itself, but for now this gives a unified call for doing updates. * GLSL materials are now refreshed on changes. There's still various cases missing, * Preview icons now hook into this system, solving various update cases that were missed before. * Also fixes issue in my last commit, where some preview would not render, problem is avoided in the new system. Icon Rendering * On systems with support for non-power of two textures, an OpenGL texture is now used instead of glDrawPixels. This avoids problems with icons get clipped on region borders. On my Linux desktop, this gives an 1.1x speedup, and on my Mac laptop a 2.3x speedup overall in redrawing the full window, with the default setup. The glDrawPixels implementation on Mac seems to have a lot of overhread. * Preview icons are now drawn using proper premul alpha, and never faded so you can see them clearly. * Also tried to fix issue with texture node preview rendering, globals can't be used with threads reliably.
2009-09-29 19:12:12 +00:00
}
break;
case NC_LAMP:
switch (wmn->data) {
case ND_LIGHTING:
/* TODO(sergey): This is a bit too much, but needed to
* handle updates from new depsgraph.
*/
ED_region_tag_redraw(region);
break;
case ND_LIGHTING_DRAW:
ED_region_tag_redraw(region);
WM_gizmomap_tag_refresh(gzmap);
break;
}
Sorry, three commits in one, became difficult to untangle.. Editors Modules * render/ module added in editors, moved the preview render code there and also shading related operators. * physics/ module made more consistent with other modules. renaming files, making a single physics_ops.c for operators and keymaps. Also move all particle related operators here now. * space_buttons/ now should have only operators relevant to the buttons specificially. Updates & Notifiers * Material/Texture/World/Lamp can now be passed to DAG_id_flush_update, which will go back to a callback in editors. Eventually these should be in the depsgraph itself, but for now this gives a unified call for doing updates. * GLSL materials are now refreshed on changes. There's still various cases missing, * Preview icons now hook into this system, solving various update cases that were missed before. * Also fixes issue in my last commit, where some preview would not render, problem is avoided in the new system. Icon Rendering * On systems with support for non-power of two textures, an OpenGL texture is now used instead of glDrawPixels. This avoids problems with icons get clipped on region borders. On my Linux desktop, this gives an 1.1x speedup, and on my Mac laptop a 2.3x speedup overall in redrawing the full window, with the default setup. The glDrawPixels implementation on Mac seems to have a lot of overhread. * Preview icons are now drawn using proper premul alpha, and never faded so you can see them clearly. * Also tried to fix issue with texture node preview rendering, globals can't be used with threads reliably.
2009-09-29 19:12:12 +00:00
break;
case NC_LIGHTPROBE:
ED_area_tag_refresh(area);
break;
case NC_IMAGE:
/* this could be more fine grained checks if we had
* more context than just the region */
ED_region_tag_redraw(region);
break;
case NC_TEXTURE:
/* Same as #NC_IMAGE. */
ED_region_tag_redraw(region);
break;
Camera tracking integration =========================== Commiting camera tracking integration gsoc project into trunk. This commit includes: - Bundled version of libmv library (with some changes against official repo, re-sync with libmv repo a bit later) - New datatype ID called MovieClip which is optimized to work with movie clips (both of movie files and image sequences) and doing camera/motion tracking operations. - New editor called Clip Editor which is currently used for motion/tracking stuff only, but which can be easily extended to work with masks too. This editor supports: * Loading movie files/image sequences * Build proxies with different size for loaded movie clip, also supports building undistorted proxies to increase speed of playback in undistorted mode. * Manual lens distortion mode calibration using grid and grease pencil * Supervised 2D tracking using two different algorithms KLT and SAD. * Basic algorithm for feature detection * Camera motion solving. scene orientation - New constraints to "link" scene objects with solved motions from clip: * Follow Track (make object follow 2D motion of track with given name or parent object to reconstructed 3D position of track) * Camera Solver to make camera moving in the same way as reconstructed camera This commit NOT includes changes from tomato branch: - New nodes (they'll be commited as separated patch) - Automatic image offset guessing for image input node and image editor (need to do more tests and gather more feedback) - Code cleanup in libmv-capi. It's not so critical cleanup, just increasing readability and understanadability of code. Better to make this chaneg when Keir will finish his current patch. More details about this project can be found on this page: http://wiki.blender.org/index.php/User:Nazg-gul/GSoC-2011 Further development of small features would be done in trunk, bigger/experimental features would first be implemented in tomato branch.
2011-11-07 12:55:18 +00:00
case NC_MOVIECLIP:
if (wmn->data == ND_DISPLAY || wmn->action == NA_EDITED) {
ED_region_tag_redraw(region);
}
Camera tracking integration =========================== Commiting camera tracking integration gsoc project into trunk. This commit includes: - Bundled version of libmv library (with some changes against official repo, re-sync with libmv repo a bit later) - New datatype ID called MovieClip which is optimized to work with movie clips (both of movie files and image sequences) and doing camera/motion tracking operations. - New editor called Clip Editor which is currently used for motion/tracking stuff only, but which can be easily extended to work with masks too. This editor supports: * Loading movie files/image sequences * Build proxies with different size for loaded movie clip, also supports building undistorted proxies to increase speed of playback in undistorted mode. * Manual lens distortion mode calibration using grid and grease pencil * Supervised 2D tracking using two different algorithms KLT and SAD. * Basic algorithm for feature detection * Camera motion solving. scene orientation - New constraints to "link" scene objects with solved motions from clip: * Follow Track (make object follow 2D motion of track with given name or parent object to reconstructed 3D position of track) * Camera Solver to make camera moving in the same way as reconstructed camera This commit NOT includes changes from tomato branch: - New nodes (they'll be commited as separated patch) - Automatic image offset guessing for image input node and image editor (need to do more tests and gather more feedback) - Code cleanup in libmv-capi. It's not so critical cleanup, just increasing readability and understanadability of code. Better to make this chaneg when Keir will finish his current patch. More details about this project can be found on this page: http://wiki.blender.org/index.php/User:Nazg-gul/GSoC-2011 Further development of small features would be done in trunk, bigger/experimental features would first be implemented in tomato branch.
2011-11-07 12:55:18 +00:00
break;
case NC_SPACE:
2012-02-22 16:52:06 +00:00
if (wmn->data == ND_SPACE_VIEW3D) {
if (wmn->subtype == NS_VIEW3D_GPU) {
rv3d->rflag |= RV3D_GPULIGHT_UPDATE;
}
VR: Initial Virtual Reality support - Milestone 1, Scene Inspection NOTE: While most of the milestone 1 goals are there, a few smaller features and improvements are still to be done. Big picture of this milestone: Initial, OpenXR-based virtual reality support for users and foundation for advanced use cases. Maniphest Task: https://developer.blender.org/T71347 The tasks contains more information about this milestone. To be clear: This is not a feature rich VR implementation, it's focused on the initial scene inspection use case. We intentionally focused on that, further features like controller support are part of the next milestone. - How to use? Instructions on how to use this are here: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/How_to_Test These will be updated and moved to a more official place (likely the manual) soon. Currently Windows Mixed Reality and Oculus devices are usable. Valve/HTC headsets don't support the OpenXR standard yet and hence, do not work with this implementation. --------------- This is the C-side implementation of the features added for initial VR support as per milestone 1. A "VR Scene Inspection" Add-on will be committed separately, to expose the VR functionality in the UI. It also adds some further features for milestone 1, namely a landmarking system (stored view locations in the VR space) Main additions/features: * Support for rendering viewports to an HMD, with good performance. * Option to sync the VR view perspective with a fully interactive, regular 3D View (VR-Mirror). * Option to disable positional tracking. Keeps the current position (calculated based on the VR eye center pose) when enabled while a VR session is running. * Some regular viewport settings for the VR view * RNA/Python-API to query and set VR session state information. * WM-XR: Layer tying Ghost-XR to the Blender specific APIs/data * wmSurface API: drawable, non-window container (manages Ghost-OpenGL and GPU context) * DNA/RNA for management of VR session settings * `--debug-xr` and `--debug-xr-time` commandline options * Utility batch & config file for using the Oculus runtime on Windows. * Most VR data is runtime only. The exception is user settings which are saved to files (`XrSessionSettings`). * VR support can be disabled through the `WITH_XR_OPENXR` compiler flag. For architecture and code documentation, see https://wiki.blender.org/wiki/Source/Interface/XR. --------------- A few thank you's: * A huge shoutout to Ray Molenkamp for his help during the project - it would have not been that successful without him! * Sebastian Koenig and Simeon Conzendorf for testing and feedback! * The reviewers, especially Brecht Van Lommel! * Dalai Felinto for pushing and managing me to get this done ;) * The OpenXR working group for providing an open standard. I think we're the first bigger application to adopt OpenXR. Congratulations to them and ourselves :) This project started as a Google Summer of Code 2019 project - "Core Support of Virtual Reality Headsets through OpenXR" (see https://wiki.blender.org/wiki/User:Severin/GSoC-2019/). Some further information, including ideas for further improvements can be found in the final GSoC report: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/Final_Report Differential Revisions: D6193, D7098 Reviewed by: Brecht Van Lommel, Jeroen Bakker
2020-03-17 20:20:55 +01:00
else if (wmn->subtype == NS_VIEW3D_SHADING) {
#ifdef WITH_XR_OPENXR
2022-09-23 16:51:29 +02:00
ED_view3d_xr_shading_update(
static_cast<wmWindowManager *>(G_MAIN->wm.first), v3d, scene);
VR: Initial Virtual Reality support - Milestone 1, Scene Inspection NOTE: While most of the milestone 1 goals are there, a few smaller features and improvements are still to be done. Big picture of this milestone: Initial, OpenXR-based virtual reality support for users and foundation for advanced use cases. Maniphest Task: https://developer.blender.org/T71347 The tasks contains more information about this milestone. To be clear: This is not a feature rich VR implementation, it's focused on the initial scene inspection use case. We intentionally focused on that, further features like controller support are part of the next milestone. - How to use? Instructions on how to use this are here: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/How_to_Test These will be updated and moved to a more official place (likely the manual) soon. Currently Windows Mixed Reality and Oculus devices are usable. Valve/HTC headsets don't support the OpenXR standard yet and hence, do not work with this implementation. --------------- This is the C-side implementation of the features added for initial VR support as per milestone 1. A "VR Scene Inspection" Add-on will be committed separately, to expose the VR functionality in the UI. It also adds some further features for milestone 1, namely a landmarking system (stored view locations in the VR space) Main additions/features: * Support for rendering viewports to an HMD, with good performance. * Option to sync the VR view perspective with a fully interactive, regular 3D View (VR-Mirror). * Option to disable positional tracking. Keeps the current position (calculated based on the VR eye center pose) when enabled while a VR session is running. * Some regular viewport settings for the VR view * RNA/Python-API to query and set VR session state information. * WM-XR: Layer tying Ghost-XR to the Blender specific APIs/data * wmSurface API: drawable, non-window container (manages Ghost-OpenGL and GPU context) * DNA/RNA for management of VR session settings * `--debug-xr` and `--debug-xr-time` commandline options * Utility batch & config file for using the Oculus runtime on Windows. * Most VR data is runtime only. The exception is user settings which are saved to files (`XrSessionSettings`). * VR support can be disabled through the `WITH_XR_OPENXR` compiler flag. For architecture and code documentation, see https://wiki.blender.org/wiki/Source/Interface/XR. --------------- A few thank you's: * A huge shoutout to Ray Molenkamp for his help during the project - it would have not been that successful without him! * Sebastian Koenig and Simeon Conzendorf for testing and feedback! * The reviewers, especially Brecht Van Lommel! * Dalai Felinto for pushing and managing me to get this done ;) * The OpenXR working group for providing an open standard. I think we're the first bigger application to adopt OpenXR. Congratulations to them and ourselves :) This project started as a Google Summer of Code 2019 project - "Core Support of Virtual Reality Headsets through OpenXR" (see https://wiki.blender.org/wiki/User:Severin/GSoC-2019/). Some further information, including ideas for further improvements can be found in the final GSoC report: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/Final_Report Differential Revisions: D6193, D7098 Reviewed by: Brecht Van Lommel, Jeroen Bakker
2020-03-17 20:20:55 +01:00
#endif
2021-01-29 15:55:41 +01:00
ViewLayer *view_layer = WM_window_get_active_view_layer(window);
Depsgraph *depsgraph = BKE_scene_get_depsgraph(scene, view_layer);
if (depsgraph) {
2021-01-29 15:55:41 +01:00
ED_render_view3d_update(depsgraph, window, area, true);
}
}
ED_region_tag_redraw(region);
WM_gizmomap_tag_refresh(gzmap);
}
break;
case NC_ID:
if (ELEM(wmn->action, NA_RENAME, NA_EDITED, NA_ADDED, NA_REMOVED)) {
if (ELEM(wmn->action, NA_EDITED, NA_REMOVED) && v3d->localvd &&
v3d->localvd->runtime.flag & V3D_RUNTIME_LOCAL_MAYBE_EMPTY)
{
ED_area_tag_refresh(area);
}
ED_region_tag_redraw(region);
Geometry Nodes: support attaching gizmos to input values This adds support for attaching gizmos for input values. The goal is to make it easier for users to set input values intuitively in the 3D viewport. We went through multiple different possible designs until we settled on the one implemented here. We picked it for it's flexibility and ease of use when using geometry node assets. The core principle in the design is that **gizmos are attached to existing input values instead of being the input value themselves**. This actually fits the existing concept of gizmos in Blender well, but may be a bit unintutitive in a node setup at first. The attachment is done using links in the node editor. The most basic usage of the node is to link a Value node to the new Linear Gizmo node. This attaches the gizmo to the input value and allows you to change it from the 3D view. The attachment is indicated by the gizmo icon in the sockets which are controlled by a gizmo as well as the back-link (notice the double link) when the gizmo is active. The core principle makes it straight forward to control the same node setup from the 3D view with gizmos, or by manually changing input values, or by driving the input values procedurally. If the input value is controlled indirectly by other inputs, it's often possible to **automatically propagate** the gizmo to the actual input. Backpropagation does not work for all nodes, although more nodes can be supported over time. This patch adds the first three gizmo nodes which cover common use cases: * **Linear Gizmo**: Creates a gizmo that controls a float or integer value using a linear movement of e.g. an arrow in the 3D viewport. * **Dial Gizmo**: Creates a circular gizmo in the 3D viewport that can be rotated to change the attached angle input. * **Transform Gizmo**: Creates a simple gizmo for location, rotation and scale. In the future, more built-in gizmos and potentially the ability for custom gizmos could be added. All gizmo nodes have a **Transform** geometry output. Using it is optional but it is recommended when the gizmo is used to control inputs that affect a geometry. When it is used, Blender will automatically transform the gizmos together with the geometry that they control. To achieve this, the output should be merged with the generated geometry using the *Join Geometry* node. The data contained in *Transform* output is not visible geometry, but just internal information that helps Blender to give a better user experience when using gizmos. The gizmo nodes have a multi-input socket. This allows **controlling multiple values** with the same gizmo. Only a small set of **gizmo shapes** is supported initially. It might be extended in the future but one goal is to give the gizmos used by different node group assets a familiar look and feel. A similar constraint exists for **colors**. Currently, one can choose from a fixed set of colors which can be modified in the theme settings. The set of **visible gizmos** is determined by a multiple factors because it's not really feasible to show all possible gizmos at all times. To see any of the geometry nodes gizmos, the "Active Modifier" option has to be enabled in the "Viewport Gizmos" popover. Then all gizmos are drawn for which at least one of the following is true: * The gizmo controls an input of the active modifier of the active object. * The gizmo controls a value in a selected node in an open node editor. * The gizmo controls a pinned value in an open node editor. Pinning works by clicking the gizmo icon next to the value. Pull Request: https://projects.blender.org/blender/blender/pulls/112677
2024-07-10 16:18:47 +02:00
WM_gizmomap_tag_refresh(gzmap);
}
break;
case NC_SCREEN:
switch (wmn->data) {
case ND_ANIMPLAY:
case ND_SKETCH:
ED_region_tag_redraw(region);
break;
Main Workspace Integration This commit does the main integration of workspaces, which is a design we agreed on during the 2.8 UI workshop (see https://wiki.blender.org/index.php/Dev:2.8/UI/Workshop_Writeup) Workspaces should generally be stable, I'm not aware of any remaining bugs (or I've forgotten them :) ). If you find any, let me know! (Exception: mode switching button might get out of sync with actual mode in some cases, would consider that a limitation/ToDo. Needs to be resolved at some point.) == Main Changes/Features * Introduces the new Workspaces as data-blocks. * Allow storing a number of custom workspaces as part of the user configuration. Needs further work to allow adding and deleting individual workspaces. * Bundle a default workspace configuration with Blender (current screen-layouts converted to workspaces). * Pressing button to add a workspace spawns a menu to select between "Duplicate Current" and the workspaces from the user configuration. If no workspaces are stored in the user configuration, the default workspaces are listed instead. * Store screen-layouts (`bScreen`) per workspace. * Store an active screen-layout per workspace. Changing the workspace will enable this layout. * Store active mode in workspace. Changing the workspace will also enter the mode of the new workspace. (Note that we still store the active mode in the object, moving this completely to workspaces is a separate project.) * Store an active render layer per workspace. * Moved mode switch from 3D View header to Info Editor header. * Store active scene in window (not directly workspace related, but overlaps quite a bit). * Removed 'Use Global Scene' User Preference option. * Compatibility with old files - a new workspace is created for every screen-layout of old files. Old Blender versions should be able to read files saved with workspace support as well. * Default .blend only contains one workspace ("General"). * Support appending workspaces. Opening files without UI and commandline rendering should work fine. Note that the UI is temporary! We plan to introduce a new global topbar that contains the workspace options and tabs for switching workspaces. == Technical Notes * Workspaces are data-blocks. * Adding and removing `bScreen`s should be done through `ED_workspace_layout` API now. * A workspace can be active in multiple windows at the same time. * The mode menu (which is now in the Info Editor header) doesn't display "Grease Pencil Edit" mode anymore since its availability depends on the active editor. Will be fixed by making Grease Pencil an own object type (as planned). * The button to change the active workspace object mode may get out of sync with the mode of the active object. Will either be resolved by moving mode out of object data, or we'll disable workspace modes again (there's a `#define USE_WORKSPACE_MODE` for that). * Screen-layouts (`bScreen`) are IDs and thus stored in a main list-base. Had to add a wrapper `WorkSpaceLayout` so we can store them in a list-base within workspaces, too. On the long run we could completely replace `bScreen` by workspace structs. * `WorkSpace` types use some special compiler trickery to allow marking structs and struct members as private. BKE_workspace API should be used for accessing those. * Added scene operators `SCENE_OT_`. Was previously done through screen operators. == BPY API Changes * Removed `Screen.scene`, added `Window.scene` * Removed `UserPreferencesView.use_global_scene` * Added `Context.workspace`, `Window.workspace` and `BlendData.workspaces` * Added `bpy.types.WorkSpace` containing `screens`, `object_mode` and `render_layer` * Added Screen.layout_name for the layout name that'll be displayed in the UI (may differ from internal name) == What's left? * There are a few open design questions (T50521). We should find the needed answers and implement them. * Allow adding and removing individual workspaces from workspace configuration (needs UI design). * Get the override system ready and support overrides per workspace. * Support custom UI setups as part of workspaces (hidden panels, hidden buttons, customizable toolbars, etc). * Allow enabling add-ons per workspace. * Support custom workspace keymaps. * Remove special exception for workspaces in linking code (so they're always appended, never linked). Depends on a few things, so best to solve later. * Get the topbar done. * Workspaces need a proper icon, current one is just a placeholder :) Reviewed By: campbellbarton, mont29 Tags: #user_interface, #bf_blender_2.8 Maniphest Tasks: T50521 Differential Revision: https://developer.blender.org/D2451
2017-06-01 19:56:58 +02:00
case ND_LAYOUTBROWSE:
case ND_LAYOUTDELETE:
case ND_LAYOUTSET:
WM_gizmomap_tag_refresh(gzmap);
ED_region_tag_redraw(region);
break;
Main Workspace Integration This commit does the main integration of workspaces, which is a design we agreed on during the 2.8 UI workshop (see https://wiki.blender.org/index.php/Dev:2.8/UI/Workshop_Writeup) Workspaces should generally be stable, I'm not aware of any remaining bugs (or I've forgotten them :) ). If you find any, let me know! (Exception: mode switching button might get out of sync with actual mode in some cases, would consider that a limitation/ToDo. Needs to be resolved at some point.) == Main Changes/Features * Introduces the new Workspaces as data-blocks. * Allow storing a number of custom workspaces as part of the user configuration. Needs further work to allow adding and deleting individual workspaces. * Bundle a default workspace configuration with Blender (current screen-layouts converted to workspaces). * Pressing button to add a workspace spawns a menu to select between "Duplicate Current" and the workspaces from the user configuration. If no workspaces are stored in the user configuration, the default workspaces are listed instead. * Store screen-layouts (`bScreen`) per workspace. * Store an active screen-layout per workspace. Changing the workspace will enable this layout. * Store active mode in workspace. Changing the workspace will also enter the mode of the new workspace. (Note that we still store the active mode in the object, moving this completely to workspaces is a separate project.) * Store an active render layer per workspace. * Moved mode switch from 3D View header to Info Editor header. * Store active scene in window (not directly workspace related, but overlaps quite a bit). * Removed 'Use Global Scene' User Preference option. * Compatibility with old files - a new workspace is created for every screen-layout of old files. Old Blender versions should be able to read files saved with workspace support as well. * Default .blend only contains one workspace ("General"). * Support appending workspaces. Opening files without UI and commandline rendering should work fine. Note that the UI is temporary! We plan to introduce a new global topbar that contains the workspace options and tabs for switching workspaces. == Technical Notes * Workspaces are data-blocks. * Adding and removing `bScreen`s should be done through `ED_workspace_layout` API now. * A workspace can be active in multiple windows at the same time. * The mode menu (which is now in the Info Editor header) doesn't display "Grease Pencil Edit" mode anymore since its availability depends on the active editor. Will be fixed by making Grease Pencil an own object type (as planned). * The button to change the active workspace object mode may get out of sync with the mode of the active object. Will either be resolved by moving mode out of object data, or we'll disable workspace modes again (there's a `#define USE_WORKSPACE_MODE` for that). * Screen-layouts (`bScreen`) are IDs and thus stored in a main list-base. Had to add a wrapper `WorkSpaceLayout` so we can store them in a list-base within workspaces, too. On the long run we could completely replace `bScreen` by workspace structs. * `WorkSpace` types use some special compiler trickery to allow marking structs and struct members as private. BKE_workspace API should be used for accessing those. * Added scene operators `SCENE_OT_`. Was previously done through screen operators. == BPY API Changes * Removed `Screen.scene`, added `Window.scene` * Removed `UserPreferencesView.use_global_scene` * Added `Context.workspace`, `Window.workspace` and `BlendData.workspaces` * Added `bpy.types.WorkSpace` containing `screens`, `object_mode` and `render_layer` * Added Screen.layout_name for the layout name that'll be displayed in the UI (may differ from internal name) == What's left? * There are a few open design questions (T50521). We should find the needed answers and implement them. * Allow adding and removing individual workspaces from workspace configuration (needs UI design). * Get the override system ready and support overrides per workspace. * Support custom UI setups as part of workspaces (hidden panels, hidden buttons, customizable toolbars, etc). * Allow enabling add-ons per workspace. * Support custom workspace keymaps. * Remove special exception for workspaces in linking code (so they're always appended, never linked). Depends on a few things, so best to solve later. * Get the topbar done. * Workspaces need a proper icon, current one is just a placeholder :) Reviewed By: campbellbarton, mont29 Tags: #user_interface, #bf_blender_2.8 Maniphest Tasks: T50521 Differential Revision: https://developer.blender.org/D2451
2017-06-01 19:56:58 +02:00
case ND_LAYER:
ED_region_tag_redraw(region);
Main Workspace Integration This commit does the main integration of workspaces, which is a design we agreed on during the 2.8 UI workshop (see https://wiki.blender.org/index.php/Dev:2.8/UI/Workshop_Writeup) Workspaces should generally be stable, I'm not aware of any remaining bugs (or I've forgotten them :) ). If you find any, let me know! (Exception: mode switching button might get out of sync with actual mode in some cases, would consider that a limitation/ToDo. Needs to be resolved at some point.) == Main Changes/Features * Introduces the new Workspaces as data-blocks. * Allow storing a number of custom workspaces as part of the user configuration. Needs further work to allow adding and deleting individual workspaces. * Bundle a default workspace configuration with Blender (current screen-layouts converted to workspaces). * Pressing button to add a workspace spawns a menu to select between "Duplicate Current" and the workspaces from the user configuration. If no workspaces are stored in the user configuration, the default workspaces are listed instead. * Store screen-layouts (`bScreen`) per workspace. * Store an active screen-layout per workspace. Changing the workspace will enable this layout. * Store active mode in workspace. Changing the workspace will also enter the mode of the new workspace. (Note that we still store the active mode in the object, moving this completely to workspaces is a separate project.) * Store an active render layer per workspace. * Moved mode switch from 3D View header to Info Editor header. * Store active scene in window (not directly workspace related, but overlaps quite a bit). * Removed 'Use Global Scene' User Preference option. * Compatibility with old files - a new workspace is created for every screen-layout of old files. Old Blender versions should be able to read files saved with workspace support as well. * Default .blend only contains one workspace ("General"). * Support appending workspaces. Opening files without UI and commandline rendering should work fine. Note that the UI is temporary! We plan to introduce a new global topbar that contains the workspace options and tabs for switching workspaces. == Technical Notes * Workspaces are data-blocks. * Adding and removing `bScreen`s should be done through `ED_workspace_layout` API now. * A workspace can be active in multiple windows at the same time. * The mode menu (which is now in the Info Editor header) doesn't display "Grease Pencil Edit" mode anymore since its availability depends on the active editor. Will be fixed by making Grease Pencil an own object type (as planned). * The button to change the active workspace object mode may get out of sync with the mode of the active object. Will either be resolved by moving mode out of object data, or we'll disable workspace modes again (there's a `#define USE_WORKSPACE_MODE` for that). * Screen-layouts (`bScreen`) are IDs and thus stored in a main list-base. Had to add a wrapper `WorkSpaceLayout` so we can store them in a list-base within workspaces, too. On the long run we could completely replace `bScreen` by workspace structs. * `WorkSpace` types use some special compiler trickery to allow marking structs and struct members as private. BKE_workspace API should be used for accessing those. * Added scene operators `SCENE_OT_`. Was previously done through screen operators. == BPY API Changes * Removed `Screen.scene`, added `Window.scene` * Removed `UserPreferencesView.use_global_scene` * Added `Context.workspace`, `Window.workspace` and `BlendData.workspaces` * Added `bpy.types.WorkSpace` containing `screens`, `object_mode` and `render_layer` * Added Screen.layout_name for the layout name that'll be displayed in the UI (may differ from internal name) == What's left? * There are a few open design questions (T50521). We should find the needed answers and implement them. * Allow adding and removing individual workspaces from workspace configuration (needs UI design). * Get the override system ready and support overrides per workspace. * Support custom UI setups as part of workspaces (hidden panels, hidden buttons, customizable toolbars, etc). * Allow enabling add-ons per workspace. * Support custom workspace keymaps. * Remove special exception for workspaces in linking code (so they're always appended, never linked). Depends on a few things, so best to solve later. * Get the topbar done. * Workspaces need a proper icon, current one is just a placeholder :) Reviewed By: campbellbarton, mont29 Tags: #user_interface, #bf_blender_2.8 Maniphest Tasks: T50521 Differential Revision: https://developer.blender.org/D2451
2017-06-01 19:56:58 +02:00
break;
}
break;
case NC_GPENCIL:
if (wmn->data == ND_DATA || ELEM(wmn->action, NA_EDITED, NA_SELECTED)) {
ED_region_tag_redraw(region);
Grease Pencil - Storyboarding Features (merge from GPencil_EditStrokes branch) This merge-commit brings in a number of new features and workflow/UI improvements for working with Grease Pencil. While these were originally targetted at improving the workflow for creating 3D storyboards in Blender using the Grease Pencil, many of these changes should also prove useful in other workflows too. The main highlights here are: 1) It is now possible to edit Grease Pencil strokes - Use D Tab, or toggle the "Enable Editing" toggles in the Toolbar/Properties regions to enter "Stroke Edit Mode". In this mode, many common editing tools will operate on Grease Pencil stroke points instead. - Tools implemented include Select, Select All/Border/Circle/Linked/More/Less, Grab, Rotate, Scale, Bend, Shear, To Sphere, Mirror, Duplicate, Delete. - Proportional Editing works when using the transform tools 2) Grease Pencil stroke settings can now be animated NOTE: Currently drivers don't work, but if time allows, this may still be added before the release. 3) Strokes can be drawn with "filled" interiors, using a separate set of colour/opacity settings to the ones used for the lines themselves. This makes use of OpenGL filled polys, which has the limitation of only being able to fill convex shapes. Some artifacts may be visible on concave shapes (e.g. pacman's mouth will be overdrawn) 4) "Volumetric Strokes" - An alternative drawing technique for stroke drawing has been added which draws strokes as a series of screen-aligned discs. While this was originally a partial experimental technique at getting better quality 3D lines, the effects possible using this technique were interesting enough to warrant making this a dedicated feature. Best results when partial opacity and large stroke widths are used. 5) Improved Onion Skinning Support - Different colours can be selected for the before/after ghosts. To do so, enable the "colour wheel" toggle beside the Onion Skinning toggle, and set the colours accordingly. - Different numbers of ghosts can be shown before/after the current frame 6) Grease Pencil datablocks are now attached to the scene by default instead of the active object. - For a long time, the object-attachment has proved to be quite problematic for users to keep track of. Now that this is done at scene level, it is easier for most users to use. - An exception for old files (and for any addons which may benefit from object attachment instead), is that if the active object has a Grease Pencil datablock, that will be used instead. - It is not currently possible to choose object-attachment from the UI, but it is simple to do this from the console instead, by doing: context.active_object.grease_pencil = bpy.data.grease_pencil["blah"] 7) Various UI Cleanups - The layers UI has been cleaned up to use a list instead of the nested-panels design. Apart from saving space, this is also much nicer to look at now. - The UI code is now all defined in Python. To support this, it has been necessary to add some new context properties to make it easier to access these settings. e.g. "gpencil_data" for the datablock "active_gpencil_layer" and "active_gpencil_frame" for active data, "editable_gpencil_strokes" for the strokes that can be edited - The "stroke placement/alignment" settings (previously "Drawing Settings" at the bottom of the Grease Pencil panel in the Properties Region) is now located in the toolbar. These were more toolsettings than properties for how GPencil got drawn. - "Use Sketching Sessions" has been renamed "Continuous Drawing", as per a suggestion for an earlier discussion on developer.blender.org - By default, the painting operator will wait for a mouse button to be pressed before it starts creating the stroke. This is to make it easier to include this operator in various toolbars/menus/etc. To get it immediately starting (as when you hold down DKEy to draw), set "wait_for_input" to False. - GPencil Layers can be rearranged in the "Grease Pencil" mode of the Action Editor - Toolbar panels have been added to all the other editors which support these. 8) Pie menus for quick-access to tools A set of experimental pie menus has been included for quick access to many tools and settings. It is not necessary to use these to get things done, but they have been designed to help make certain common tasks easier. - Ctrl-D = The main pie menu. Reveals tools in a context sensitive and spatially stable manner. - D Q = "Quick Settings" pie. This allows quick access to the active layer's settings. Notably, colours, thickness, and turning onion skinning on/off.
2014-12-01 01:52:06 +13:00
}
break;
case NC_WORKSPACE:
/* In case the region displays workspace settings. */
ED_region_tag_redraw(region);
break;
Geometry Nodes: viewport preview This adds support for showing geometry passed to the Viewer in the 3d viewport (instead of just in the spreadsheet). The "viewer geometry" bypasses the group output. So it is not necessary to change the final output of the node group to be able to see the intermediate geometry. **Activation and deactivation of a viewer node** * A viewer node is activated by clicking on it. * Ctrl+shift+click on any node/socket connects it to the viewer and makes it active. * Ctrl+shift+click in empty space deactivates the active viewer. * When the active viewer is not visible anymore (e.g. another object is selected, or the current node group is exit), it is deactivated. * Clicking on the icon in the header of the Viewer node toggles whether its active or not. **Pinning** * The spreadsheet still allows pinning the active viewer as before. When pinned, the spreadsheet still references the viewer node even when it becomes inactive. * The viewport does not support pinning at the moment. It always shows the active viewer. **Attribute** * When a field is linked to the second input of the viewer node it is displayed as an overlay in the viewport. * When possible the correct domain for the attribute is determined automatically. This does not work in all cases. It falls back to the face corner domain on meshes and the point domain on curves. When necessary, the domain can be picked manually. * The spreadsheet now only shows the "Viewer" column for the domain that is selected in the Viewer node. * Instance attributes are visualized as a constant color per instance. **Viewport Options** * The attribute overlay opacity can be controlled with the "Viewer Node" setting in the overlays popover. * A viewport can be configured not to show intermediate viewer-geometry by disabling the "Viewer Node" option in the "View" menu. **Implementation Details** * The "spreadsheet context path" was generalized to a "viewer path" that is used in more places now. * The viewer node itself determines the attribute domain, evaluates the field and stores the result in a `.viewer` attribute. * A new "viewer attribute' overlay displays the data from the `.viewer` attribute. * The ground truth for the active viewer node is stored in the workspace now. Node editors, spreadsheets and viewports retrieve the active viewer from there unless they are pinned. * The depsgraph object iterator has a new "viewer path" setting. When set, the viewed geometry of the corresponding object is part of the iterator instead of the final evaluated geometry. * To support the instance attribute overlay `DupliObject` was extended to contain the information necessary for drawing the overlay. * The ctrl+shift+click operator has been refactored so that it can make existing links to viewers active again. * The auto-domain-detection in the Viewer node works by checking the "preferred domain" for every field input. If there is not exactly one preferred domain, the fallback is used. Known limitations: * Loose edges of meshes don't have the attribute overlay. This could be added separately if necessary. * Some attributes are hard to visualize as a color directly. For example, the values might have to be normalized or some should be drawn as arrays. For now, we encourage users to build node groups that generate appropriate viewer-geometry. We might include some of that functionality in future versions. Support for displaying attribute values as text in the viewport is planned as well. * There seems to be an issue with the attribute overlay for pointclouds on nvidia gpus, to be investigated. Differential Revision: https://developer.blender.org/D15954
2022-09-28 17:54:59 +02:00
case NC_VIEWER_PATH: {
if (v3d->flag2 & V3D_SHOW_VIEWER) {
ViewLayer *view_layer = WM_window_get_active_view_layer(window);
if (Depsgraph *depsgraph = BKE_scene_get_depsgraph(scene, view_layer)) {
ED_render_view3d_update(depsgraph, window, area, true);
}
ED_region_tag_redraw(region);
}
break;
}
}
}
static void view3d_do_msg_notify_workbench_view_update(bContext *C,
wmMsgSubscribeKey * /*msg_key*/,
wmMsgSubscribeValue *msg_val)
{
Scene *scene = CTX_data_scene(C);
ScrArea *area = (ScrArea *)msg_val->user_data;
View3D *v3d = (View3D *)area->spacedata.first;
if (v3d->shading.type == OB_SOLID) {
RenderEngineType *engine_type = ED_view3d_engine_type(scene, v3d->shading.type);
2022-09-23 16:51:29 +02:00
DRWUpdateContext drw_context = {nullptr};
drw_context.bmain = CTX_data_main(C);
drw_context.depsgraph = CTX_data_depsgraph_pointer(C);
drw_context.scene = scene;
drw_context.view_layer = CTX_data_view_layer(C);
drw_context.region = (ARegion *)(msg_val->owner);
drw_context.v3d = v3d;
drw_context.engine_type = engine_type;
DRW_notify_view_update(&drw_context);
}
}
static void view3d_main_region_message_subscribe(const wmRegionMessageSubscribeParams *params)
{
wmMsgBus *mbus = params->message_bus;
const bContext *C = params->context;
ScrArea *area = params->area;
ARegion *region = params->region;
/* Developer NOTE: there are many properties that impact 3D view drawing,
* so instead of subscribing to individual properties, just subscribe to types
* accepting some redundant redraws.
*
* For other space types we might try avoid this, keep the 3D view as an exceptional case! */
wmMsgParams_RNA msg_key_params{};
/* Only subscribe to types. */
StructRNA *type_array[] = {
&RNA_Window,
/* These object have properties that impact drawing. */
&RNA_AreaLight,
&RNA_Camera,
&RNA_Light,
&RNA_Speaker,
&RNA_SunLight,
/* General types the 3D view depends on. */
&RNA_Object,
&RNA_UnitSettings, /* grid-floor */
&RNA_View3DCursor,
&RNA_View3DOverlay,
&RNA_View3DShading,
&RNA_World,
};
2022-09-23 16:51:29 +02:00
wmMsgSubscribeValue msg_sub_value_region_tag_redraw{};
msg_sub_value_region_tag_redraw.owner = region;
msg_sub_value_region_tag_redraw.user_data = region;
msg_sub_value_region_tag_redraw.notify = ED_region_do_msg_notify_tag_redraw;
2022-09-23 16:51:29 +02:00
wmMsgSubscribeValue msg_sub_value_workbench_view_update{};
msg_sub_value_workbench_view_update.owner = region;
msg_sub_value_workbench_view_update.user_data = area;
msg_sub_value_workbench_view_update.notify = view3d_do_msg_notify_workbench_view_update;
for (int i = 0; i < ARRAY_SIZE(type_array); i++) {
msg_key_params.ptr.type = type_array[i];
WM_msg_subscribe_rna_params(mbus, &msg_key_params, &msg_sub_value_region_tag_redraw, __func__);
}
/* Subscribe to a handful of other properties. */
2022-09-23 16:51:29 +02:00
RegionView3D *rv3d = static_cast<RegionView3D *>(region->regiondata);
WM_msg_subscribe_rna_anon_prop(mbus, RenderSettings, engine, &msg_sub_value_region_tag_redraw);
WM_msg_subscribe_rna_anon_prop(
mbus, RenderSettings, resolution_x, &msg_sub_value_region_tag_redraw);
WM_msg_subscribe_rna_anon_prop(
mbus, RenderSettings, resolution_y, &msg_sub_value_region_tag_redraw);
WM_msg_subscribe_rna_anon_prop(
mbus, RenderSettings, pixel_aspect_x, &msg_sub_value_region_tag_redraw);
WM_msg_subscribe_rna_anon_prop(
mbus, RenderSettings, pixel_aspect_y, &msg_sub_value_region_tag_redraw);
if (rv3d->persp == RV3D_CAMOB) {
WM_msg_subscribe_rna_anon_prop(
mbus, RenderSettings, use_border, &msg_sub_value_region_tag_redraw);
}
WM_msg_subscribe_rna_anon_type(mbus, SceneEEVEE, &msg_sub_value_region_tag_redraw);
WM_msg_subscribe_rna_anon_type(mbus, SceneDisplay, &msg_sub_value_region_tag_redraw);
WM_msg_subscribe_rna_anon_type(mbus, ObjectDisplay, &msg_sub_value_region_tag_redraw);
ViewLayer: Lazy sync of scene data. When a change happens which invalidates view layers the syncing will be postponed until the first usage. This will improve importing or adding many objects in a single operation/script. `BKE_view_layer_need_resync_tag` is used to tag the view layer to be out of sync. Before accessing `BKE_view_layer_active_base_get`, `BKE_view_layer_active_object_get`, `BKE_view_layer_active_collection` or `BKE_view_layer_object_bases` the caller should call `BKE_view_layer_synced_ensure`. Having two functions ensures that partial syncing could be added as smaller patches in the future. Tagging a view layer out of sync could be replaced with a partial sync. Eventually the number of full resyncs could be reduced. After all tagging has been replaced with partial syncs the ensure_sync could be phased out. This patch has been added to discuss the details and consequences of the current approach. For clarity the call to BKE_view_layer_ensure_sync is placed close to the getters. In the future this could be placed in more strategical places to reduce the number of calls or improve performance. Finding those strategical places isn't that clear. When multiple operations are grouped in a single script you might want to always check for resync. Some areas found that can be improved. This list isn't complete. These areas aren't addressed by this patch as these changes would be hard to detect to the reviewer. The idea is to add changes to these areas as a separate patch. It might be that the initial commit would reduce performance compared to master, but will be fixed by the additional patches. **Object duplication** During object duplication the syncing is temporarily disabled. With this patch this isn't useful as when disabled the view_layer is accessed to locate bases. This can be improved by first locating the source bases, then duplicate and sync and locate the new bases. Will be solved in a separate patch for clarity reasons ({D15886}). **Object add** `BKE_object_add` not only adds a new object, but also selects and activates the new base. This requires the view_layer to be resynced. Some callers reverse the selection and activation (See `get_new_constraint_target`). We should make the selection and activation optional. This would make it possible to add multiple objects without having to resync per object. **Postpone Activate Base** Setting the basact is done in many locations. They follow a rule as after an action find the base and set the basact. Finding the base could require a resync. The idea is to store in the view_layer the object which base will be set in the basact during the next sync, reducing the times resyncing needs to happen. Reviewed By: mont29 Maniphest Tasks: T73411 Differential Revision: https://developer.blender.org/D15885
2022-09-14 21:33:51 +02:00
const Scene *scene = CTX_data_scene(C);
ViewLayer *view_layer = CTX_data_view_layer(C);
ViewLayer: Lazy sync of scene data. When a change happens which invalidates view layers the syncing will be postponed until the first usage. This will improve importing or adding many objects in a single operation/script. `BKE_view_layer_need_resync_tag` is used to tag the view layer to be out of sync. Before accessing `BKE_view_layer_active_base_get`, `BKE_view_layer_active_object_get`, `BKE_view_layer_active_collection` or `BKE_view_layer_object_bases` the caller should call `BKE_view_layer_synced_ensure`. Having two functions ensures that partial syncing could be added as smaller patches in the future. Tagging a view layer out of sync could be replaced with a partial sync. Eventually the number of full resyncs could be reduced. After all tagging has been replaced with partial syncs the ensure_sync could be phased out. This patch has been added to discuss the details and consequences of the current approach. For clarity the call to BKE_view_layer_ensure_sync is placed close to the getters. In the future this could be placed in more strategical places to reduce the number of calls or improve performance. Finding those strategical places isn't that clear. When multiple operations are grouped in a single script you might want to always check for resync. Some areas found that can be improved. This list isn't complete. These areas aren't addressed by this patch as these changes would be hard to detect to the reviewer. The idea is to add changes to these areas as a separate patch. It might be that the initial commit would reduce performance compared to master, but will be fixed by the additional patches. **Object duplication** During object duplication the syncing is temporarily disabled. With this patch this isn't useful as when disabled the view_layer is accessed to locate bases. This can be improved by first locating the source bases, then duplicate and sync and locate the new bases. Will be solved in a separate patch for clarity reasons ({D15886}). **Object add** `BKE_object_add` not only adds a new object, but also selects and activates the new base. This requires the view_layer to be resynced. Some callers reverse the selection and activation (See `get_new_constraint_target`). We should make the selection and activation optional. This would make it possible to add multiple objects without having to resync per object. **Postpone Activate Base** Setting the basact is done in many locations. They follow a rule as after an action find the base and set the basact. Finding the base could require a resync. The idea is to store in the view_layer the object which base will be set in the basact during the next sync, reducing the times resyncing needs to happen. Reviewed By: mont29 Maniphest Tasks: T73411 Differential Revision: https://developer.blender.org/D15885
2022-09-14 21:33:51 +02:00
BKE_view_layer_synced_ensure(scene, view_layer);
Object *obact = BKE_view_layer_active_object_get(view_layer);
2022-09-23 16:51:29 +02:00
if (obact != nullptr) {
switch (obact->mode) {
case OB_MODE_PARTICLE_EDIT:
WM_msg_subscribe_rna_anon_type(mbus, ParticleEdit, &msg_sub_value_region_tag_redraw);
break;
case OB_MODE_SCULPT:
WM_msg_subscribe_rna_anon_prop(
mbus, WorkSpace, tools, &msg_sub_value_workbench_view_update);
break;
default:
break;
}
}
{
2022-09-23 16:51:29 +02:00
wmMsgSubscribeValue msg_sub_value_region_tag_refresh{};
msg_sub_value_region_tag_refresh.owner = region;
msg_sub_value_region_tag_refresh.user_data = area;
msg_sub_value_region_tag_refresh.notify = WM_toolsystem_do_msg_notify_tag_refresh;
WM_msg_subscribe_rna_anon_prop(mbus, Object, mode, &msg_sub_value_region_tag_refresh);
WM_msg_subscribe_rna_anon_prop(mbus, LayerObjects, active, &msg_sub_value_region_tag_refresh);
}
}
/* concept is to retrieve cursor type context-less */
static void view3d_main_region_cursor(wmWindow *win, ScrArea *area, ARegion *region)
{
if (WM_cursor_set_from_tool(win, area, region)) {
return;
}
ViewLayer: Lazy sync of scene data. When a change happens which invalidates view layers the syncing will be postponed until the first usage. This will improve importing or adding many objects in a single operation/script. `BKE_view_layer_need_resync_tag` is used to tag the view layer to be out of sync. Before accessing `BKE_view_layer_active_base_get`, `BKE_view_layer_active_object_get`, `BKE_view_layer_active_collection` or `BKE_view_layer_object_bases` the caller should call `BKE_view_layer_synced_ensure`. Having two functions ensures that partial syncing could be added as smaller patches in the future. Tagging a view layer out of sync could be replaced with a partial sync. Eventually the number of full resyncs could be reduced. After all tagging has been replaced with partial syncs the ensure_sync could be phased out. This patch has been added to discuss the details and consequences of the current approach. For clarity the call to BKE_view_layer_ensure_sync is placed close to the getters. In the future this could be placed in more strategical places to reduce the number of calls or improve performance. Finding those strategical places isn't that clear. When multiple operations are grouped in a single script you might want to always check for resync. Some areas found that can be improved. This list isn't complete. These areas aren't addressed by this patch as these changes would be hard to detect to the reviewer. The idea is to add changes to these areas as a separate patch. It might be that the initial commit would reduce performance compared to master, but will be fixed by the additional patches. **Object duplication** During object duplication the syncing is temporarily disabled. With this patch this isn't useful as when disabled the view_layer is accessed to locate bases. This can be improved by first locating the source bases, then duplicate and sync and locate the new bases. Will be solved in a separate patch for clarity reasons ({D15886}). **Object add** `BKE_object_add` not only adds a new object, but also selects and activates the new base. This requires the view_layer to be resynced. Some callers reverse the selection and activation (See `get_new_constraint_target`). We should make the selection and activation optional. This would make it possible to add multiple objects without having to resync per object. **Postpone Activate Base** Setting the basact is done in many locations. They follow a rule as after an action find the base and set the basact. Finding the base could require a resync. The idea is to store in the view_layer the object which base will be set in the basact during the next sync, reducing the times resyncing needs to happen. Reviewed By: mont29 Maniphest Tasks: T73411 Differential Revision: https://developer.blender.org/D15885
2022-09-14 21:33:51 +02:00
Scene *scene = WM_window_get_active_scene(win);
ViewLayer *view_layer = WM_window_get_active_view_layer(win);
ViewLayer: Lazy sync of scene data. When a change happens which invalidates view layers the syncing will be postponed until the first usage. This will improve importing or adding many objects in a single operation/script. `BKE_view_layer_need_resync_tag` is used to tag the view layer to be out of sync. Before accessing `BKE_view_layer_active_base_get`, `BKE_view_layer_active_object_get`, `BKE_view_layer_active_collection` or `BKE_view_layer_object_bases` the caller should call `BKE_view_layer_synced_ensure`. Having two functions ensures that partial syncing could be added as smaller patches in the future. Tagging a view layer out of sync could be replaced with a partial sync. Eventually the number of full resyncs could be reduced. After all tagging has been replaced with partial syncs the ensure_sync could be phased out. This patch has been added to discuss the details and consequences of the current approach. For clarity the call to BKE_view_layer_ensure_sync is placed close to the getters. In the future this could be placed in more strategical places to reduce the number of calls or improve performance. Finding those strategical places isn't that clear. When multiple operations are grouped in a single script you might want to always check for resync. Some areas found that can be improved. This list isn't complete. These areas aren't addressed by this patch as these changes would be hard to detect to the reviewer. The idea is to add changes to these areas as a separate patch. It might be that the initial commit would reduce performance compared to master, but will be fixed by the additional patches. **Object duplication** During object duplication the syncing is temporarily disabled. With this patch this isn't useful as when disabled the view_layer is accessed to locate bases. This can be improved by first locating the source bases, then duplicate and sync and locate the new bases. Will be solved in a separate patch for clarity reasons ({D15886}). **Object add** `BKE_object_add` not only adds a new object, but also selects and activates the new base. This requires the view_layer to be resynced. Some callers reverse the selection and activation (See `get_new_constraint_target`). We should make the selection and activation optional. This would make it possible to add multiple objects without having to resync per object. **Postpone Activate Base** Setting the basact is done in many locations. They follow a rule as after an action find the base and set the basact. Finding the base could require a resync. The idea is to store in the view_layer the object which base will be set in the basact during the next sync, reducing the times resyncing needs to happen. Reviewed By: mont29 Maniphest Tasks: T73411 Differential Revision: https://developer.blender.org/D15885
2022-09-14 21:33:51 +02:00
BKE_view_layer_synced_ensure(scene, view_layer);
Object *obedit = BKE_view_layer_edit_object_get(view_layer);
if (obedit) {
WM_cursor_set(win, WM_CURSOR_EDIT);
}
else {
WM_cursor_set(win, WM_CURSOR_DEFAULT);
}
}
/* add handlers, stuff you only do once or on area/region changes */
static void view3d_header_region_init(wmWindowManager *wm, ARegion *region)
{
wmKeyMap *keymap = WM_keymap_ensure(
wm->defaultconf, "3D View Generic", SPACE_VIEW3D, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
ED_region_header_init(region);
}
static void view3d_header_region_draw(const bContext *C, ARegion *region)
{
ED_region_header(C, region);
}
static void view3d_header_region_listener(const wmRegionListenerParams *params)
{
ARegion *region = params->region;
2022-08-27 12:50:43 +10:00
const wmNotifier *wmn = params->notifier;
/* context changes */
switch (wmn->category) {
case NC_SCENE:
switch (wmn->data) {
case ND_FRAME:
case ND_OB_ACTIVE:
case ND_OB_SELECT:
case ND_OB_VISIBLE:
case ND_MODE:
case ND_LAYER:
case ND_TOOLSETTINGS:
case ND_LAYER_CONTENT:
case ND_RENDER_OPTIONS:
ED_region_tag_redraw(region);
break;
}
break;
case NC_SPACE:
switch (wmn->data) {
case ND_SPACE_VIEW3D:
ED_region_tag_redraw(region);
break;
case ND_SPACE_ASSET_PARAMS:
blender::ed::geometry::clear_operator_asset_trees();
ED_region_tag_redraw(region);
break;
}
break;
case NC_ASSET:
switch (wmn->data) {
case ND_ASSET_CATALOGS:
case ND_ASSET_LIST_READING:
blender::ed::geometry::clear_operator_asset_trees();
ED_region_tag_redraw(region);
break;
default:
if (ELEM(wmn->action, NA_ADDED, NA_REMOVED)) {
blender::ed::geometry::clear_operator_asset_trees();
ED_region_tag_redraw(region);
}
}
break;
case NC_NODE:
switch (wmn->data) {
case ND_NODE_ASSET_DATA:
blender::ed::geometry::clear_operator_asset_trees();
ED_region_tag_redraw(region);
break;
}
break;
Grease Pencil - Storyboarding Features (merge from GPencil_EditStrokes branch) This merge-commit brings in a number of new features and workflow/UI improvements for working with Grease Pencil. While these were originally targetted at improving the workflow for creating 3D storyboards in Blender using the Grease Pencil, many of these changes should also prove useful in other workflows too. The main highlights here are: 1) It is now possible to edit Grease Pencil strokes - Use D Tab, or toggle the "Enable Editing" toggles in the Toolbar/Properties regions to enter "Stroke Edit Mode". In this mode, many common editing tools will operate on Grease Pencil stroke points instead. - Tools implemented include Select, Select All/Border/Circle/Linked/More/Less, Grab, Rotate, Scale, Bend, Shear, To Sphere, Mirror, Duplicate, Delete. - Proportional Editing works when using the transform tools 2) Grease Pencil stroke settings can now be animated NOTE: Currently drivers don't work, but if time allows, this may still be added before the release. 3) Strokes can be drawn with "filled" interiors, using a separate set of colour/opacity settings to the ones used for the lines themselves. This makes use of OpenGL filled polys, which has the limitation of only being able to fill convex shapes. Some artifacts may be visible on concave shapes (e.g. pacman's mouth will be overdrawn) 4) "Volumetric Strokes" - An alternative drawing technique for stroke drawing has been added which draws strokes as a series of screen-aligned discs. While this was originally a partial experimental technique at getting better quality 3D lines, the effects possible using this technique were interesting enough to warrant making this a dedicated feature. Best results when partial opacity and large stroke widths are used. 5) Improved Onion Skinning Support - Different colours can be selected for the before/after ghosts. To do so, enable the "colour wheel" toggle beside the Onion Skinning toggle, and set the colours accordingly. - Different numbers of ghosts can be shown before/after the current frame 6) Grease Pencil datablocks are now attached to the scene by default instead of the active object. - For a long time, the object-attachment has proved to be quite problematic for users to keep track of. Now that this is done at scene level, it is easier for most users to use. - An exception for old files (and for any addons which may benefit from object attachment instead), is that if the active object has a Grease Pencil datablock, that will be used instead. - It is not currently possible to choose object-attachment from the UI, but it is simple to do this from the console instead, by doing: context.active_object.grease_pencil = bpy.data.grease_pencil["blah"] 7) Various UI Cleanups - The layers UI has been cleaned up to use a list instead of the nested-panels design. Apart from saving space, this is also much nicer to look at now. - The UI code is now all defined in Python. To support this, it has been necessary to add some new context properties to make it easier to access these settings. e.g. "gpencil_data" for the datablock "active_gpencil_layer" and "active_gpencil_frame" for active data, "editable_gpencil_strokes" for the strokes that can be edited - The "stroke placement/alignment" settings (previously "Drawing Settings" at the bottom of the Grease Pencil panel in the Properties Region) is now located in the toolbar. These were more toolsettings than properties for how GPencil got drawn. - "Use Sketching Sessions" has been renamed "Continuous Drawing", as per a suggestion for an earlier discussion on developer.blender.org - By default, the painting operator will wait for a mouse button to be pressed before it starts creating the stroke. This is to make it easier to include this operator in various toolbars/menus/etc. To get it immediately starting (as when you hold down DKEy to draw), set "wait_for_input" to False. - GPencil Layers can be rearranged in the "Grease Pencil" mode of the Action Editor - Toolbar panels have been added to all the other editors which support these. 8) Pie menus for quick-access to tools A set of experimental pie menus has been included for quick access to many tools and settings. It is not necessary to use these to get things done, but they have been designed to help make certain common tasks easier. - Ctrl-D = The main pie menu. Reveals tools in a context sensitive and spatially stable manner. - D Q = "Quick Settings" pie. This allows quick access to the active layer's settings. Notably, colours, thickness, and turning onion skinning on/off.
2014-12-01 01:52:06 +13:00
case NC_GPENCIL:
if (wmn->data & ND_GPENCIL_EDITMODE) {
ED_region_tag_redraw(region);
}
else if (wmn->action == NA_EDITED) {
ED_region_tag_redraw(region);
}
Grease Pencil - Storyboarding Features (merge from GPencil_EditStrokes branch) This merge-commit brings in a number of new features and workflow/UI improvements for working with Grease Pencil. While these were originally targetted at improving the workflow for creating 3D storyboards in Blender using the Grease Pencil, many of these changes should also prove useful in other workflows too. The main highlights here are: 1) It is now possible to edit Grease Pencil strokes - Use D Tab, or toggle the "Enable Editing" toggles in the Toolbar/Properties regions to enter "Stroke Edit Mode". In this mode, many common editing tools will operate on Grease Pencil stroke points instead. - Tools implemented include Select, Select All/Border/Circle/Linked/More/Less, Grab, Rotate, Scale, Bend, Shear, To Sphere, Mirror, Duplicate, Delete. - Proportional Editing works when using the transform tools 2) Grease Pencil stroke settings can now be animated NOTE: Currently drivers don't work, but if time allows, this may still be added before the release. 3) Strokes can be drawn with "filled" interiors, using a separate set of colour/opacity settings to the ones used for the lines themselves. This makes use of OpenGL filled polys, which has the limitation of only being able to fill convex shapes. Some artifacts may be visible on concave shapes (e.g. pacman's mouth will be overdrawn) 4) "Volumetric Strokes" - An alternative drawing technique for stroke drawing has been added which draws strokes as a series of screen-aligned discs. While this was originally a partial experimental technique at getting better quality 3D lines, the effects possible using this technique were interesting enough to warrant making this a dedicated feature. Best results when partial opacity and large stroke widths are used. 5) Improved Onion Skinning Support - Different colours can be selected for the before/after ghosts. To do so, enable the "colour wheel" toggle beside the Onion Skinning toggle, and set the colours accordingly. - Different numbers of ghosts can be shown before/after the current frame 6) Grease Pencil datablocks are now attached to the scene by default instead of the active object. - For a long time, the object-attachment has proved to be quite problematic for users to keep track of. Now that this is done at scene level, it is easier for most users to use. - An exception for old files (and for any addons which may benefit from object attachment instead), is that if the active object has a Grease Pencil datablock, that will be used instead. - It is not currently possible to choose object-attachment from the UI, but it is simple to do this from the console instead, by doing: context.active_object.grease_pencil = bpy.data.grease_pencil["blah"] 7) Various UI Cleanups - The layers UI has been cleaned up to use a list instead of the nested-panels design. Apart from saving space, this is also much nicer to look at now. - The UI code is now all defined in Python. To support this, it has been necessary to add some new context properties to make it easier to access these settings. e.g. "gpencil_data" for the datablock "active_gpencil_layer" and "active_gpencil_frame" for active data, "editable_gpencil_strokes" for the strokes that can be edited - The "stroke placement/alignment" settings (previously "Drawing Settings" at the bottom of the Grease Pencil panel in the Properties Region) is now located in the toolbar. These were more toolsettings than properties for how GPencil got drawn. - "Use Sketching Sessions" has been renamed "Continuous Drawing", as per a suggestion for an earlier discussion on developer.blender.org - By default, the painting operator will wait for a mouse button to be pressed before it starts creating the stroke. This is to make it easier to include this operator in various toolbars/menus/etc. To get it immediately starting (as when you hold down DKEy to draw), set "wait_for_input" to False. - GPencil Layers can be rearranged in the "Grease Pencil" mode of the Action Editor - Toolbar panels have been added to all the other editors which support these. 8) Pie menus for quick-access to tools A set of experimental pie menus has been included for quick access to many tools and settings. It is not necessary to use these to get things done, but they have been designed to help make certain common tasks easier. - Ctrl-D = The main pie menu. Reveals tools in a context sensitive and spatially stable manner. - D Q = "Quick Settings" pie. This allows quick access to the active layer's settings. Notably, colours, thickness, and turning onion skinning on/off.
2014-12-01 01:52:06 +13:00
break;
case NC_BRUSH:
ED_region_tag_redraw(region);
break;
case NC_GEOM:
if (ELEM(wmn->data, ND_VERTEX_GROUP, ND_DATA)) {
ED_region_tag_redraw(region);
}
break;
case NC_MATERIAL:
/* For the canvas picker. */
if (wmn->data == ND_SHADING_LINKS) {
ED_region_tag_redraw(region);
}
break;
}
/* From top-bar, which ones are needed? split per header? */
/* Disable for now, re-enable if needed, or remove - campbell. */
#if 0
/* context changes */
switch (wmn->category) {
case NC_WM:
if (wmn->data == ND_HISTORY) {
ED_region_tag_redraw(region);
}
break;
case NC_SCENE:
if (wmn->data == ND_MODE) {
ED_region_tag_redraw(region);
}
break;
case NC_SPACE:
if (wmn->data == ND_SPACE_VIEW3D) {
ED_region_tag_redraw(region);
}
break;
case NC_GPENCIL:
if (wmn->data == ND_DATA) {
ED_region_tag_redraw(region);
}
break;
}
#endif
}
static void view3d_header_region_message_subscribe(const wmRegionMessageSubscribeParams *params)
{
wmMsgBus *mbus = params->message_bus;
ARegion *region = params->region;
wmMsgParams_RNA msg_key_params{};
/* Only subscribe to types. */
StructRNA *type_array[] = {
&RNA_View3DShading,
};
2022-09-23 16:51:29 +02:00
wmMsgSubscribeValue msg_sub_value_region_tag_redraw{};
msg_sub_value_region_tag_redraw.owner = region;
msg_sub_value_region_tag_redraw.user_data = region;
msg_sub_value_region_tag_redraw.notify = ED_region_do_msg_notify_tag_redraw;
for (int i = 0; i < ARRAY_SIZE(type_array); i++) {
msg_key_params.ptr.type = type_array[i];
WM_msg_subscribe_rna_params(mbus, &msg_key_params, &msg_sub_value_region_tag_redraw, __func__);
}
}
/* add handlers, stuff you only do once or on area/region changes */
static void view3d_buttons_region_init(wmWindowManager *wm, ARegion *region)
{
wmKeyMap *keymap;
ED_region_panels_init(wm, region);
keymap = WM_keymap_ensure(wm->defaultconf, "3D View Generic", SPACE_VIEW3D, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
}
void ED_view3d_buttons_region_layout_ex(const bContext *C,
ARegion *region,
const char *category_override)
{
const enum eContextObjectMode mode = CTX_data_mode_enum(C);
2022-09-23 16:51:29 +02:00
const char *contexts_base[4] = {nullptr};
contexts_base[0] = CTX_data_mode_string(C);
const char **contexts = &contexts_base[1];
switch (mode) {
case CTX_MODE_EDIT_MESH:
ARRAY_SET_ITEMS(contexts, ".mesh_edit");
break;
case CTX_MODE_EDIT_CURVE:
ARRAY_SET_ITEMS(contexts, ".curve_edit");
break;
case CTX_MODE_EDIT_CURVES:
ARRAY_SET_ITEMS(contexts, ".curves_edit");
break;
case CTX_MODE_EDIT_SURFACE:
ARRAY_SET_ITEMS(contexts, ".curve_edit");
break;
case CTX_MODE_EDIT_TEXT:
ARRAY_SET_ITEMS(contexts, ".text_edit");
break;
case CTX_MODE_EDIT_ARMATURE:
ARRAY_SET_ITEMS(contexts, ".armature_edit");
break;
case CTX_MODE_EDIT_METABALL:
ARRAY_SET_ITEMS(contexts, ".mball_edit");
break;
case CTX_MODE_EDIT_LATTICE:
ARRAY_SET_ITEMS(contexts, ".lattice_edit");
break;
case CTX_MODE_EDIT_GREASE_PENCIL:
ARRAY_SET_ITEMS(contexts, ".grease_pencil_edit");
break;
case CTX_MODE_PAINT_GREASE_PENCIL:
ARRAY_SET_ITEMS(contexts, ".grease_pencil_paint");
break;
case CTX_MODE_SCULPT_GREASE_PENCIL:
ARRAY_SET_ITEMS(contexts, ".paint_common", ".grease_pencil_sculpt");
break;
case CTX_MODE_WEIGHT_GREASE_PENCIL:
ARRAY_SET_ITEMS(contexts, ".greasepencil_weight");
break;
case CTX_MODE_VERTEX_GREASE_PENCIL:
ARRAY_SET_ITEMS(contexts, ".greasepencil_vertex");
break;
case CTX_MODE_EDIT_POINTCLOUD:
ARRAY_SET_ITEMS(contexts, ".pointcloud_edit");
break;
case CTX_MODE_POSE:
ARRAY_SET_ITEMS(contexts, ".posemode");
break;
case CTX_MODE_SCULPT:
ARRAY_SET_ITEMS(contexts, ".paint_common", ".sculpt_mode");
break;
case CTX_MODE_PAINT_WEIGHT:
ARRAY_SET_ITEMS(contexts, ".paint_common", ".weightpaint");
break;
case CTX_MODE_PAINT_VERTEX:
ARRAY_SET_ITEMS(contexts, ".paint_common", ".vertexpaint");
break;
case CTX_MODE_PAINT_TEXTURE:
ARRAY_SET_ITEMS(contexts, ".paint_common", ".imagepaint");
break;
case CTX_MODE_PARTICLE:
ARRAY_SET_ITEMS(contexts, ".paint_common", ".particlemode");
break;
case CTX_MODE_OBJECT:
ARRAY_SET_ITEMS(contexts, ".objectmode");
break;
case CTX_MODE_PAINT_GPENCIL_LEGACY:
ARRAY_SET_ITEMS(contexts, ".greasepencil_paint");
break;
case CTX_MODE_SCULPT_GPENCIL_LEGACY:
ARRAY_SET_ITEMS(contexts, ".greasepencil_sculpt");
break;
case CTX_MODE_WEIGHT_GPENCIL_LEGACY:
ARRAY_SET_ITEMS(contexts, ".greasepencil_weight");
break;
case CTX_MODE_VERTEX_GPENCIL_LEGACY:
ARRAY_SET_ITEMS(contexts, ".greasepencil_vertex");
break;
case CTX_MODE_SCULPT_CURVES:
ARRAY_SET_ITEMS(contexts, ".paint_common", ".curves_sculpt");
break;
default:
break;
}
switch (mode) {
case CTX_MODE_PAINT_GPENCIL_LEGACY:
ARRAY_SET_ITEMS(contexts, ".greasepencil_paint");
break;
case CTX_MODE_SCULPT_GPENCIL_LEGACY:
ARRAY_SET_ITEMS(contexts, ".greasepencil_sculpt");
break;
case CTX_MODE_WEIGHT_GPENCIL_LEGACY:
ARRAY_SET_ITEMS(contexts, ".greasepencil_weight");
break;
case CTX_MODE_EDIT_GPENCIL_LEGACY:
ARRAY_SET_ITEMS(contexts, ".greasepencil_edit");
break;
case CTX_MODE_VERTEX_GPENCIL_LEGACY:
ARRAY_SET_ITEMS(contexts, ".greasepencil_vertex");
break;
default:
break;
}
ListBase *paneltypes = &region->runtime->type->paneltypes;
/* Allow drawing 3D view toolbar from non 3D view space type. */
2022-09-23 16:51:29 +02:00
if (category_override != nullptr) {
SpaceType *st = BKE_spacetype_from_id(SPACE_VIEW3D);
ARegionType *art = BKE_regiontype_from_id(st, RGN_TYPE_UI);
paneltypes = &art->paneltypes;
}
ED_region_panels_layout_ex(
C, region, paneltypes, WM_OP_INVOKE_REGION_WIN, contexts_base, category_override);
}
static void view3d_buttons_region_layout(const bContext *C, ARegion *region)
{
2022-09-23 16:51:29 +02:00
ED_view3d_buttons_region_layout_ex(C, region, nullptr);
}
static void view3d_buttons_region_listener(const wmRegionListenerParams *params)
{
ARegion *region = params->region;
2022-08-27 12:50:43 +10:00
const wmNotifier *wmn = params->notifier;
/* context changes */
switch (wmn->category) {
case NC_ANIMATION:
switch (wmn->data) {
case ND_KEYFRAME_PROP:
case ND_NLA_ACTCHANGE:
ED_region_tag_redraw(region);
break;
case ND_NLA:
case ND_KEYFRAME:
if (ELEM(wmn->action, NA_EDITED, NA_ADDED, NA_REMOVED)) {
ED_region_tag_redraw(region);
}
break;
}
break;
case NC_SCENE:
switch (wmn->data) {
case ND_FRAME:
case ND_OB_ACTIVE:
case ND_OB_SELECT:
case ND_OB_VISIBLE:
case ND_MODE:
case ND_LAYER:
case ND_LAYER_CONTENT:
case ND_TOOLSETTINGS:
case ND_TRANSFORM:
ED_region_tag_redraw(region);
break;
}
switch (wmn->action) {
case NA_EDITED:
ED_region_tag_redraw(region);
break;
}
break;
case NC_OBJECT:
switch (wmn->data) {
case ND_BONE_ACTIVE:
case ND_BONE_SELECT:
case ND_BONE_COLLECTION:
case ND_TRANSFORM:
case ND_POSE:
case ND_DRAW:
case ND_KEYS:
case ND_MODIFIER:
case ND_SHADERFX:
ED_region_tag_redraw(region);
break;
}
break;
case NC_GEOM:
switch (wmn->data) {
case ND_DATA:
case ND_VERTEX_GROUP:
case ND_SELECT:
ED_region_tag_redraw(region);
break;
}
if (wmn->action == NA_EDITED) {
ED_region_tag_redraw(region);
}
break;
case NC_TEXTURE:
case NC_MATERIAL:
/* for brush textures */
ED_region_tag_redraw(region);
break;
case NC_BRUSH:
/* NA_SELECTED is used on brush changes */
if (ELEM(wmn->action, NA_EDITED, NA_SELECTED)) {
ED_region_tag_redraw(region);
}
break;
case NC_SPACE:
if (wmn->data == ND_SPACE_VIEW3D) {
ED_region_tag_redraw(region);
}
break;
case NC_ID:
if (wmn->action == NA_RENAME) {
ED_region_tag_redraw(region);
}
break;
case NC_GPENCIL:
if ((wmn->data & (ND_DATA | ND_GPENCIL_EDITMODE)) || (wmn->action == NA_EDITED)) {
ED_region_tag_redraw(region);
}
break;
case NC_IMAGE:
/* Update for the image layers in texture paint. */
if (wmn->action == NA_EDITED) {
ED_region_tag_redraw(region);
}
break;
VR: Initial Virtual Reality support - Milestone 1, Scene Inspection NOTE: While most of the milestone 1 goals are there, a few smaller features and improvements are still to be done. Big picture of this milestone: Initial, OpenXR-based virtual reality support for users and foundation for advanced use cases. Maniphest Task: https://developer.blender.org/T71347 The tasks contains more information about this milestone. To be clear: This is not a feature rich VR implementation, it's focused on the initial scene inspection use case. We intentionally focused on that, further features like controller support are part of the next milestone. - How to use? Instructions on how to use this are here: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/How_to_Test These will be updated and moved to a more official place (likely the manual) soon. Currently Windows Mixed Reality and Oculus devices are usable. Valve/HTC headsets don't support the OpenXR standard yet and hence, do not work with this implementation. --------------- This is the C-side implementation of the features added for initial VR support as per milestone 1. A "VR Scene Inspection" Add-on will be committed separately, to expose the VR functionality in the UI. It also adds some further features for milestone 1, namely a landmarking system (stored view locations in the VR space) Main additions/features: * Support for rendering viewports to an HMD, with good performance. * Option to sync the VR view perspective with a fully interactive, regular 3D View (VR-Mirror). * Option to disable positional tracking. Keeps the current position (calculated based on the VR eye center pose) when enabled while a VR session is running. * Some regular viewport settings for the VR view * RNA/Python-API to query and set VR session state information. * WM-XR: Layer tying Ghost-XR to the Blender specific APIs/data * wmSurface API: drawable, non-window container (manages Ghost-OpenGL and GPU context) * DNA/RNA for management of VR session settings * `--debug-xr` and `--debug-xr-time` commandline options * Utility batch & config file for using the Oculus runtime on Windows. * Most VR data is runtime only. The exception is user settings which are saved to files (`XrSessionSettings`). * VR support can be disabled through the `WITH_XR_OPENXR` compiler flag. For architecture and code documentation, see https://wiki.blender.org/wiki/Source/Interface/XR. --------------- A few thank you's: * A huge shoutout to Ray Molenkamp for his help during the project - it would have not been that successful without him! * Sebastian Koenig and Simeon Conzendorf for testing and feedback! * The reviewers, especially Brecht Van Lommel! * Dalai Felinto for pushing and managing me to get this done ;) * The OpenXR working group for providing an open standard. I think we're the first bigger application to adopt OpenXR. Congratulations to them and ourselves :) This project started as a Google Summer of Code 2019 project - "Core Support of Virtual Reality Headsets through OpenXR" (see https://wiki.blender.org/wiki/User:Severin/GSoC-2019/). Some further information, including ideas for further improvements can be found in the final GSoC report: https://wiki.blender.org/wiki/User:Severin/GSoC-2019/Final_Report Differential Revisions: D6193, D7098 Reviewed by: Brecht Van Lommel, Jeroen Bakker
2020-03-17 20:20:55 +01:00
case NC_WM:
if (wmn->data == ND_XR_DATA_CHANGED) {
ED_region_tag_redraw(region);
}
break;
}
}
2009-04-27 13:44:11 +00:00
/* add handlers, stuff you only do once or on area/region changes */
static void view3d_tools_region_init(wmWindowManager *wm, ARegion *region)
2009-04-27 13:44:11 +00:00
{
wmKeyMap *keymap;
ED_region_panels_init(wm, region);
keymap = WM_keymap_ensure(wm->defaultconf, "3D View Generic", SPACE_VIEW3D, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
2009-04-27 13:44:11 +00:00
}
static void view3d_tools_region_draw(const bContext *C, ARegion *region)
2009-04-27 13:44:11 +00:00
{
2022-09-23 16:51:29 +02:00
const char *contexts[] = {CTX_data_mode_string(C), nullptr};
ED_region_panels_ex(C, region, WM_OP_INVOKE_REGION_WIN, contexts);
2009-04-27 13:44:11 +00:00
}
static void view3d_tools_header_region_draw(const bContext *C, ARegion *region)
{
ED_region_header_with_button_sections(
C,
region,
(RGN_ALIGN_ENUM_FROM_MASK(region->alignment) == RGN_ALIGN_TOP) ?
uiButtonSectionsAlign::Top :
uiButtonSectionsAlign::Bottom);
}
/* add handlers, stuff you only do once or on area/region changes */
static void view3d_asset_shelf_region_init(wmWindowManager *wm, ARegion *region)
{
using namespace blender::ed;
wmKeyMap *keymap = WM_keymap_ensure(
wm->defaultconf, "3D View Generic", SPACE_VIEW3D, RGN_TYPE_WINDOW);
WM_event_add_keymap_handler(&region->runtime->handlers, keymap);
asset::shelf::region_init(wm, region);
}
/* area (not region) level listener */
static void space_view3d_listener(const wmSpaceTypeListenerParams *params)
{
ScrArea *area = params->area;
2022-08-27 12:50:43 +10:00
const wmNotifier *wmn = params->notifier;
2022-09-23 16:51:29 +02:00
View3D *v3d = static_cast<View3D *>(area->spacedata.first);
/* context changes */
switch (wmn->category) {
case NC_SCENE:
switch (wmn->data) {
case ND_WORLD: {
const bool use_scene_world = V3D_USES_SCENE_WORLD(v3d);
if (v3d->flag2 & V3D_HIDE_OVERLAYS || use_scene_world) {
ED_area_tag_redraw_regiontype(area, RGN_TYPE_WINDOW);
}
break;
}
}
break;
case NC_WORLD:
switch (wmn->data) {
case ND_WORLD_DRAW:
case ND_WORLD:
if (v3d->shading.background_type == V3D_SHADING_BACKGROUND_WORLD) {
ED_area_tag_redraw_regiontype(area, RGN_TYPE_WINDOW);
}
break;
}
break;
case NC_MATERIAL:
switch (wmn->data) {
case ND_NODES:
if (v3d->shading.type == OB_TEXTURE) {
ED_area_tag_redraw_regiontype(area, RGN_TYPE_WINDOW);
}
break;
}
break;
}
}
static void space_view3d_refresh(const bContext *C, ScrArea *area)
{
View3D *v3d = (View3D *)area->spacedata.first;
MEM_guardedalloc: Refactor to add more type-safety. The main goal of these changes are to improve static (i.e. build-time) checks on whether a given data can be allocated and freed with `malloc` and `free` (C-style), or requires proper C++-style construction and destruction (`new` and `delete`). * Add new `MEM_malloc_arrayN_aligned` API. * Make `MEM_freeN` a template function in C++, which does static assert on type triviality. * Add `MEM_SAFE_DELETE`, similar to `MEM_SAFE_FREE` but calling `MEM_delete`. The changes to `MEM_freeN` was painful and useful, as it allowed to fix a bunch of invalid calls in existing codebase already. It also highlighted a fair amount of places where it is called to free incomplete type pointers, which is likely a sign of badly designed code (there should rather be an API to destroy and free these data then, if the data type is not fully publicly exposed). For now, these are 'worked around' by explicitly casting the freed pointers to `void *` in these cases - which also makes them easy to search for. Some of these will be addressed separately (see blender/blender!134765). Finally, MSVC seems to consider structs defining new/delete operators (e.g. by using the `MEM_CXX_CLASS_ALLOC_FUNCS` macro) as non-trivial. This does not seem to follow the definition of type triviality, so for now static type checking in `MEM_freeN` has been disabled for Windows. We'll likely have to do the same with type-safe `MEM_[cm]allocN` API being worked on in blender/blender!134771 Based on ideas from Brecht in blender/blender!134452 Pull Request: https://projects.blender.org/blender/blender/pulls/134463
2025-02-20 10:37:10 +01:00
/* Cannot use MEM_SAFE_FREE, as #SceneStats type is only forward-declared in `DNA_layer_types.h`
*/
if (v3d->runtime.local_stats) {
MEM_freeN(static_cast<void *>(v3d->runtime.local_stats));
v3d->runtime.local_stats = nullptr;
}
if (v3d->localvd && v3d->localvd->runtime.flag & V3D_RUNTIME_LOCAL_MAYBE_EMPTY) {
ED_localview_exit_if_empty(CTX_data_ensure_evaluated_depsgraph(C),
CTX_data_scene(C),
CTX_data_view_layer(C),
CTX_wm_manager(C),
CTX_wm_window(C),
v3d,
CTX_wm_area(C),
true,
U.smooth_viewtx);
}
}
static void view3d_id_remap_v3d_ob_centers(View3D *v3d,
const blender::bke::id::IDRemapper &mappings)
ID-Remap - Step one: core work (cleanup and rework of generic ID datablock handling). This commit changes a lot of how IDs are handled internally, especially the unlinking/freeing processes. So far, this was very fuzy, to summarize cleanly deleting or replacing a datablock was pretty much impossible, except for a few special cases. Also, unlinking was handled by each datatype, in a rather messy and prone-to-errors way (quite a few ID usages were missed or wrongly handled that way). One of the main goal of id-remap branch was to cleanup this, and fatorize ID links handling by using library_query utils to allow generic handling of those, which is now the case (now, generic ID links handling is only "knwon" from readfile.c and library_query.c). This commit also adds backends to allow live replacement and deletion of datablocks in Blender (so-called 'remapping' process, where we replace all usages of a given ID pointer by a new one, or NULL one in case of unlinking). This will allow nice new features, like ability to easily reload or relocate libraries, real immediate deletion of datablocks in blender, replacement of one datablock by another, etc. Some of those are for next commits. A word of warning: this commit is highly risky, because it affects potentially a lot in Blender core. Though it was tested rather deeply, being totally impossible to check all possible ID usage cases, it's likely there are some remaining issues and bugs in new code... Please report them! ;) Review task: D2027 (https://developer.blender.org/D2027). Reviewed by campbellbarton, thanks a bunch.
2016-06-22 17:29:38 +02:00
{
if (mappings.apply(reinterpret_cast<ID **>(&v3d->ob_center), ID_REMAP_APPLY_DEFAULT) ==
Performance: Remap multiple items in UI During sprite fright loading of complex scenes would spend a long time in remapping ID's The remapping process is done on a per ID instance that resulted in a very time consuming process that goes over every possible ID reference to find out if it needs to be updated. If there are N of references to ID blocks and there are M ID blocks that needed to be remapped it would take N*M checks. These checks are scattered around the place and memory. Each reference would only be updated at most once, but most of the time no update is needed at all. Idea: By grouping the changes together will reduce the number of checks resulting in improved performance. This would only require N checks. Additional benefits is improved data locality as data is only loaded once in the L2 cache. It has be implemented for the resyncing process and UI editors. On an Intel(R) Core(TM) i7-6700 CPU @ 3.40GHz 16Gig the resyncing process went from 170 seconds to 145 seconds (during hotspot recording). After this patch has been applied we could add similar approach to references (references between data blocks) and functionality (tagged deletion). In my understanding this could reduce the resyncing process to less than a second. Opening the village production file between 10 and 20 seconds. Flame graphs showing that UI remapping isn't visible anymore (`WM_main_remap_editor_id_reference`) * Master {F12769210 size=full} * This patch {F12769211 size=full} Reviewed By: mont29 Maniphest Tasks: T94185 Differential Revision: https://developer.blender.org/D13615
2022-01-25 14:51:35 +01:00
ID_REMAP_RESULT_SOURCE_UNASSIGNED)
{
2022-09-25 15:24:37 +10:00
/* Otherwise, bone-name may remain valid...
Performance: Remap multiple items in UI During sprite fright loading of complex scenes would spend a long time in remapping ID's The remapping process is done on a per ID instance that resulted in a very time consuming process that goes over every possible ID reference to find out if it needs to be updated. If there are N of references to ID blocks and there are M ID blocks that needed to be remapped it would take N*M checks. These checks are scattered around the place and memory. Each reference would only be updated at most once, but most of the time no update is needed at all. Idea: By grouping the changes together will reduce the number of checks resulting in improved performance. This would only require N checks. Additional benefits is improved data locality as data is only loaded once in the L2 cache. It has be implemented for the resyncing process and UI editors. On an Intel(R) Core(TM) i7-6700 CPU @ 3.40GHz 16Gig the resyncing process went from 170 seconds to 145 seconds (during hotspot recording). After this patch has been applied we could add similar approach to references (references between data blocks) and functionality (tagged deletion). In my understanding this could reduce the resyncing process to less than a second. Opening the village production file between 10 and 20 seconds. Flame graphs showing that UI remapping isn't visible anymore (`WM_main_remap_editor_id_reference`) * Master {F12769210 size=full} * This patch {F12769211 size=full} Reviewed By: mont29 Maniphest Tasks: T94185 Differential Revision: https://developer.blender.org/D13615
2022-01-25 14:51:35 +01:00
* We could be smart and check this, too? */
v3d->ob_center_bone[0] = '\0';
ID-Remap - Step one: core work (cleanup and rework of generic ID datablock handling). This commit changes a lot of how IDs are handled internally, especially the unlinking/freeing processes. So far, this was very fuzy, to summarize cleanly deleting or replacing a datablock was pretty much impossible, except for a few special cases. Also, unlinking was handled by each datatype, in a rather messy and prone-to-errors way (quite a few ID usages were missed or wrongly handled that way). One of the main goal of id-remap branch was to cleanup this, and fatorize ID links handling by using library_query utils to allow generic handling of those, which is now the case (now, generic ID links handling is only "knwon" from readfile.c and library_query.c). This commit also adds backends to allow live replacement and deletion of datablocks in Blender (so-called 'remapping' process, where we replace all usages of a given ID pointer by a new one, or NULL one in case of unlinking). This will allow nice new features, like ability to easily reload or relocate libraries, real immediate deletion of datablocks in blender, replacement of one datablock by another, etc. Some of those are for next commits. A word of warning: this commit is highly risky, because it affects potentially a lot in Blender core. Though it was tested rather deeply, being totally impossible to check all possible ID usage cases, it's likely there are some remaining issues and bugs in new code... Please report them! ;) Review task: D2027 (https://developer.blender.org/D2027). Reviewed by campbellbarton, thanks a bunch.
2016-06-22 17:29:38 +02:00
}
Performance: Remap multiple items in UI During sprite fright loading of complex scenes would spend a long time in remapping ID's The remapping process is done on a per ID instance that resulted in a very time consuming process that goes over every possible ID reference to find out if it needs to be updated. If there are N of references to ID blocks and there are M ID blocks that needed to be remapped it would take N*M checks. These checks are scattered around the place and memory. Each reference would only be updated at most once, but most of the time no update is needed at all. Idea: By grouping the changes together will reduce the number of checks resulting in improved performance. This would only require N checks. Additional benefits is improved data locality as data is only loaded once in the L2 cache. It has be implemented for the resyncing process and UI editors. On an Intel(R) Core(TM) i7-6700 CPU @ 3.40GHz 16Gig the resyncing process went from 170 seconds to 145 seconds (during hotspot recording). After this patch has been applied we could add similar approach to references (references between data blocks) and functionality (tagged deletion). In my understanding this could reduce the resyncing process to less than a second. Opening the village production file between 10 and 20 seconds. Flame graphs showing that UI remapping isn't visible anymore (`WM_main_remap_editor_id_reference`) * Master {F12769210 size=full} * This patch {F12769211 size=full} Reviewed By: mont29 Maniphest Tasks: T94185 Differential Revision: https://developer.blender.org/D13615
2022-01-25 14:51:35 +01:00
}
static void view3d_id_remap_v3d(ScrArea *area,
SpaceLink *slink,
View3D *v3d,
const blender::bke::id::IDRemapper &mappings,
const bool is_local)
Performance: Remap multiple items in UI During sprite fright loading of complex scenes would spend a long time in remapping ID's The remapping process is done on a per ID instance that resulted in a very time consuming process that goes over every possible ID reference to find out if it needs to be updated. If there are N of references to ID blocks and there are M ID blocks that needed to be remapped it would take N*M checks. These checks are scattered around the place and memory. Each reference would only be updated at most once, but most of the time no update is needed at all. Idea: By grouping the changes together will reduce the number of checks resulting in improved performance. This would only require N checks. Additional benefits is improved data locality as data is only loaded once in the L2 cache. It has be implemented for the resyncing process and UI editors. On an Intel(R) Core(TM) i7-6700 CPU @ 3.40GHz 16Gig the resyncing process went from 170 seconds to 145 seconds (during hotspot recording). After this patch has been applied we could add similar approach to references (references between data blocks) and functionality (tagged deletion). In my understanding this could reduce the resyncing process to less than a second. Opening the village production file between 10 and 20 seconds. Flame graphs showing that UI remapping isn't visible anymore (`WM_main_remap_editor_id_reference`) * Master {F12769210 size=full} * This patch {F12769211 size=full} Reviewed By: mont29 Maniphest Tasks: T94185 Differential Revision: https://developer.blender.org/D13615
2022-01-25 14:51:35 +01:00
{
if (mappings.apply(reinterpret_cast<ID **>(&v3d->camera), ID_REMAP_APPLY_DEFAULT) ==
Performance: Remap multiple items in UI During sprite fright loading of complex scenes would spend a long time in remapping ID's The remapping process is done on a per ID instance that resulted in a very time consuming process that goes over every possible ID reference to find out if it needs to be updated. If there are N of references to ID blocks and there are M ID blocks that needed to be remapped it would take N*M checks. These checks are scattered around the place and memory. Each reference would only be updated at most once, but most of the time no update is needed at all. Idea: By grouping the changes together will reduce the number of checks resulting in improved performance. This would only require N checks. Additional benefits is improved data locality as data is only loaded once in the L2 cache. It has be implemented for the resyncing process and UI editors. On an Intel(R) Core(TM) i7-6700 CPU @ 3.40GHz 16Gig the resyncing process went from 170 seconds to 145 seconds (during hotspot recording). After this patch has been applied we could add similar approach to references (references between data blocks) and functionality (tagged deletion). In my understanding this could reduce the resyncing process to less than a second. Opening the village production file between 10 and 20 seconds. Flame graphs showing that UI remapping isn't visible anymore (`WM_main_remap_editor_id_reference`) * Master {F12769210 size=full} * This patch {F12769211 size=full} Reviewed By: mont29 Maniphest Tasks: T94185 Differential Revision: https://developer.blender.org/D13615
2022-01-25 14:51:35 +01:00
ID_REMAP_RESULT_SOURCE_UNASSIGNED)
{
/* 3D view might be inactive, in that case needs to use slink->regionbase */
ListBase *regionbase = (slink == area->spacedata.first) ? &area->regionbase :
&slink->regionbase;
LISTBASE_FOREACH (ARegion *, region, regionbase) {
Performance: Remap multiple items in UI During sprite fright loading of complex scenes would spend a long time in remapping ID's The remapping process is done on a per ID instance that resulted in a very time consuming process that goes over every possible ID reference to find out if it needs to be updated. If there are N of references to ID blocks and there are M ID blocks that needed to be remapped it would take N*M checks. These checks are scattered around the place and memory. Each reference would only be updated at most once, but most of the time no update is needed at all. Idea: By grouping the changes together will reduce the number of checks resulting in improved performance. This would only require N checks. Additional benefits is improved data locality as data is only loaded once in the L2 cache. It has be implemented for the resyncing process and UI editors. On an Intel(R) Core(TM) i7-6700 CPU @ 3.40GHz 16Gig the resyncing process went from 170 seconds to 145 seconds (during hotspot recording). After this patch has been applied we could add similar approach to references (references between data blocks) and functionality (tagged deletion). In my understanding this could reduce the resyncing process to less than a second. Opening the village production file between 10 and 20 seconds. Flame graphs showing that UI remapping isn't visible anymore (`WM_main_remap_editor_id_reference`) * Master {F12769210 size=full} * This patch {F12769211 size=full} Reviewed By: mont29 Maniphest Tasks: T94185 Differential Revision: https://developer.blender.org/D13615
2022-01-25 14:51:35 +01:00
if (region->regiontype == RGN_TYPE_WINDOW) {
RegionView3D *rv3d = is_local ? ((RegionView3D *)region->regiondata)->localvd :
2022-09-23 16:51:29 +02:00
static_cast<RegionView3D *>(region->regiondata);
Performance: Remap multiple items in UI During sprite fright loading of complex scenes would spend a long time in remapping ID's The remapping process is done on a per ID instance that resulted in a very time consuming process that goes over every possible ID reference to find out if it needs to be updated. If there are N of references to ID blocks and there are M ID blocks that needed to be remapped it would take N*M checks. These checks are scattered around the place and memory. Each reference would only be updated at most once, but most of the time no update is needed at all. Idea: By grouping the changes together will reduce the number of checks resulting in improved performance. This would only require N checks. Additional benefits is improved data locality as data is only loaded once in the L2 cache. It has be implemented for the resyncing process and UI editors. On an Intel(R) Core(TM) i7-6700 CPU @ 3.40GHz 16Gig the resyncing process went from 170 seconds to 145 seconds (during hotspot recording). After this patch has been applied we could add similar approach to references (references between data blocks) and functionality (tagged deletion). In my understanding this could reduce the resyncing process to less than a second. Opening the village production file between 10 and 20 seconds. Flame graphs showing that UI remapping isn't visible anymore (`WM_main_remap_editor_id_reference`) * Master {F12769210 size=full} * This patch {F12769211 size=full} Reviewed By: mont29 Maniphest Tasks: T94185 Differential Revision: https://developer.blender.org/D13615
2022-01-25 14:51:35 +01:00
if (rv3d && (rv3d->persp == RV3D_CAMOB)) {
rv3d->persp = RV3D_PERSP;
ID-Remap - Step one: core work (cleanup and rework of generic ID datablock handling). This commit changes a lot of how IDs are handled internally, especially the unlinking/freeing processes. So far, this was very fuzy, to summarize cleanly deleting or replacing a datablock was pretty much impossible, except for a few special cases. Also, unlinking was handled by each datatype, in a rather messy and prone-to-errors way (quite a few ID usages were missed or wrongly handled that way). One of the main goal of id-remap branch was to cleanup this, and fatorize ID links handling by using library_query utils to allow generic handling of those, which is now the case (now, generic ID links handling is only "knwon" from readfile.c and library_query.c). This commit also adds backends to allow live replacement and deletion of datablocks in Blender (so-called 'remapping' process, where we replace all usages of a given ID pointer by a new one, or NULL one in case of unlinking). This will allow nice new features, like ability to easily reload or relocate libraries, real immediate deletion of datablocks in blender, replacement of one datablock by another, etc. Some of those are for next commits. A word of warning: this commit is highly risky, because it affects potentially a lot in Blender core. Though it was tested rather deeply, being totally impossible to check all possible ID usage cases, it's likely there are some remaining issues and bugs in new code... Please report them! ;) Review task: D2027 (https://developer.blender.org/D2027). Reviewed by campbellbarton, thanks a bunch.
2016-06-22 17:29:38 +02:00
}
}
}
Performance: Remap multiple items in UI During sprite fright loading of complex scenes would spend a long time in remapping ID's The remapping process is done on a per ID instance that resulted in a very time consuming process that goes over every possible ID reference to find out if it needs to be updated. If there are N of references to ID blocks and there are M ID blocks that needed to be remapped it would take N*M checks. These checks are scattered around the place and memory. Each reference would only be updated at most once, but most of the time no update is needed at all. Idea: By grouping the changes together will reduce the number of checks resulting in improved performance. This would only require N checks. Additional benefits is improved data locality as data is only loaded once in the L2 cache. It has be implemented for the resyncing process and UI editors. On an Intel(R) Core(TM) i7-6700 CPU @ 3.40GHz 16Gig the resyncing process went from 170 seconds to 145 seconds (during hotspot recording). After this patch has been applied we could add similar approach to references (references between data blocks) and functionality (tagged deletion). In my understanding this could reduce the resyncing process to less than a second. Opening the village production file between 10 and 20 seconds. Flame graphs showing that UI remapping isn't visible anymore (`WM_main_remap_editor_id_reference`) * Master {F12769210 size=full} * This patch {F12769211 size=full} Reviewed By: mont29 Maniphest Tasks: T94185 Differential Revision: https://developer.blender.org/D13615
2022-01-25 14:51:35 +01:00
}
}
static void view3d_id_remap(ScrArea *area,
SpaceLink *slink,
const blender::bke::id::IDRemapper &mappings)
Performance: Remap multiple items in UI During sprite fright loading of complex scenes would spend a long time in remapping ID's The remapping process is done on a per ID instance that resulted in a very time consuming process that goes over every possible ID reference to find out if it needs to be updated. If there are N of references to ID blocks and there are M ID blocks that needed to be remapped it would take N*M checks. These checks are scattered around the place and memory. Each reference would only be updated at most once, but most of the time no update is needed at all. Idea: By grouping the changes together will reduce the number of checks resulting in improved performance. This would only require N checks. Additional benefits is improved data locality as data is only loaded once in the L2 cache. It has be implemented for the resyncing process and UI editors. On an Intel(R) Core(TM) i7-6700 CPU @ 3.40GHz 16Gig the resyncing process went from 170 seconds to 145 seconds (during hotspot recording). After this patch has been applied we could add similar approach to references (references between data blocks) and functionality (tagged deletion). In my understanding this could reduce the resyncing process to less than a second. Opening the village production file between 10 and 20 seconds. Flame graphs showing that UI remapping isn't visible anymore (`WM_main_remap_editor_id_reference`) * Master {F12769210 size=full} * This patch {F12769211 size=full} Reviewed By: mont29 Maniphest Tasks: T94185 Differential Revision: https://developer.blender.org/D13615
2022-01-25 14:51:35 +01:00
{
if (!mappings.contains_mappings_for_any(FILTER_ID_OB | FILTER_ID_MA | FILTER_ID_IM |
FILTER_ID_MC))
Performance: Remap multiple items in UI During sprite fright loading of complex scenes would spend a long time in remapping ID's The remapping process is done on a per ID instance that resulted in a very time consuming process that goes over every possible ID reference to find out if it needs to be updated. If there are N of references to ID blocks and there are M ID blocks that needed to be remapped it would take N*M checks. These checks are scattered around the place and memory. Each reference would only be updated at most once, but most of the time no update is needed at all. Idea: By grouping the changes together will reduce the number of checks resulting in improved performance. This would only require N checks. Additional benefits is improved data locality as data is only loaded once in the L2 cache. It has be implemented for the resyncing process and UI editors. On an Intel(R) Core(TM) i7-6700 CPU @ 3.40GHz 16Gig the resyncing process went from 170 seconds to 145 seconds (during hotspot recording). After this patch has been applied we could add similar approach to references (references between data blocks) and functionality (tagged deletion). In my understanding this could reduce the resyncing process to less than a second. Opening the village production file between 10 and 20 seconds. Flame graphs showing that UI remapping isn't visible anymore (`WM_main_remap_editor_id_reference`) * Master {F12769210 size=full} * This patch {F12769211 size=full} Reviewed By: mont29 Maniphest Tasks: T94185 Differential Revision: https://developer.blender.org/D13615
2022-01-25 14:51:35 +01:00
{
return;
}
View3D *view3d = (View3D *)slink;
view3d_id_remap_v3d(area, slink, view3d, mappings, false);
view3d_id_remap_v3d_ob_centers(view3d, mappings);
2022-09-23 16:51:29 +02:00
if (view3d->localvd != nullptr) {
/* Object centers in local-view aren't used, see: #52663 */
Performance: Remap multiple items in UI During sprite fright loading of complex scenes would spend a long time in remapping ID's The remapping process is done on a per ID instance that resulted in a very time consuming process that goes over every possible ID reference to find out if it needs to be updated. If there are N of references to ID blocks and there are M ID blocks that needed to be remapped it would take N*M checks. These checks are scattered around the place and memory. Each reference would only be updated at most once, but most of the time no update is needed at all. Idea: By grouping the changes together will reduce the number of checks resulting in improved performance. This would only require N checks. Additional benefits is improved data locality as data is only loaded once in the L2 cache. It has be implemented for the resyncing process and UI editors. On an Intel(R) Core(TM) i7-6700 CPU @ 3.40GHz 16Gig the resyncing process went from 170 seconds to 145 seconds (during hotspot recording). After this patch has been applied we could add similar approach to references (references between data blocks) and functionality (tagged deletion). In my understanding this could reduce the resyncing process to less than a second. Opening the village production file between 10 and 20 seconds. Flame graphs showing that UI remapping isn't visible anymore (`WM_main_remap_editor_id_reference`) * Master {F12769210 size=full} * This patch {F12769211 size=full} Reviewed By: mont29 Maniphest Tasks: T94185 Differential Revision: https://developer.blender.org/D13615
2022-01-25 14:51:35 +01:00
view3d_id_remap_v3d(area, slink, view3d->localvd, mappings, true);
/* Remapping is potentially modifying ID pointers, and there is a local View3D, mark it for a
* check for emptiness. */
view3d->localvd->runtime.flag |= V3D_RUNTIME_LOCAL_MAYBE_EMPTY;
ID-Remap - Step one: core work (cleanup and rework of generic ID datablock handling). This commit changes a lot of how IDs are handled internally, especially the unlinking/freeing processes. So far, this was very fuzy, to summarize cleanly deleting or replacing a datablock was pretty much impossible, except for a few special cases. Also, unlinking was handled by each datatype, in a rather messy and prone-to-errors way (quite a few ID usages were missed or wrongly handled that way). One of the main goal of id-remap branch was to cleanup this, and fatorize ID links handling by using library_query utils to allow generic handling of those, which is now the case (now, generic ID links handling is only "knwon" from readfile.c and library_query.c). This commit also adds backends to allow live replacement and deletion of datablocks in Blender (so-called 'remapping' process, where we replace all usages of a given ID pointer by a new one, or NULL one in case of unlinking). This will allow nice new features, like ability to easily reload or relocate libraries, real immediate deletion of datablocks in blender, replacement of one datablock by another, etc. Some of those are for next commits. A word of warning: this commit is highly risky, because it affects potentially a lot in Blender core. Though it was tested rather deeply, being totally impossible to check all possible ID usage cases, it's likely there are some remaining issues and bugs in new code... Please report them! ;) Review task: D2027 (https://developer.blender.org/D2027). Reviewed by campbellbarton, thanks a bunch.
2016-06-22 17:29:38 +02:00
}
Geometry Nodes: viewport preview This adds support for showing geometry passed to the Viewer in the 3d viewport (instead of just in the spreadsheet). The "viewer geometry" bypasses the group output. So it is not necessary to change the final output of the node group to be able to see the intermediate geometry. **Activation and deactivation of a viewer node** * A viewer node is activated by clicking on it. * Ctrl+shift+click on any node/socket connects it to the viewer and makes it active. * Ctrl+shift+click in empty space deactivates the active viewer. * When the active viewer is not visible anymore (e.g. another object is selected, or the current node group is exit), it is deactivated. * Clicking on the icon in the header of the Viewer node toggles whether its active or not. **Pinning** * The spreadsheet still allows pinning the active viewer as before. When pinned, the spreadsheet still references the viewer node even when it becomes inactive. * The viewport does not support pinning at the moment. It always shows the active viewer. **Attribute** * When a field is linked to the second input of the viewer node it is displayed as an overlay in the viewport. * When possible the correct domain for the attribute is determined automatically. This does not work in all cases. It falls back to the face corner domain on meshes and the point domain on curves. When necessary, the domain can be picked manually. * The spreadsheet now only shows the "Viewer" column for the domain that is selected in the Viewer node. * Instance attributes are visualized as a constant color per instance. **Viewport Options** * The attribute overlay opacity can be controlled with the "Viewer Node" setting in the overlays popover. * A viewport can be configured not to show intermediate viewer-geometry by disabling the "Viewer Node" option in the "View" menu. **Implementation Details** * The "spreadsheet context path" was generalized to a "viewer path" that is used in more places now. * The viewer node itself determines the attribute domain, evaluates the field and stores the result in a `.viewer` attribute. * A new "viewer attribute' overlay displays the data from the `.viewer` attribute. * The ground truth for the active viewer node is stored in the workspace now. Node editors, spreadsheets and viewports retrieve the active viewer from there unless they are pinned. * The depsgraph object iterator has a new "viewer path" setting. When set, the viewed geometry of the corresponding object is part of the iterator instead of the final evaluated geometry. * To support the instance attribute overlay `DupliObject` was extended to contain the information necessary for drawing the overlay. * The ctrl+shift+click operator has been refactored so that it can make existing links to viewers active again. * The auto-domain-detection in the Viewer node works by checking the "preferred domain" for every field input. If there is not exactly one preferred domain, the fallback is used. Known limitations: * Loose edges of meshes don't have the attribute overlay. This could be added separately if necessary. * Some attributes are hard to visualize as a color directly. For example, the values might have to be normalized or some should be drawn as arrays. For now, we encourage users to build node groups that generate appropriate viewer-geometry. We might include some of that functionality in future versions. Support for displaying attribute values as text in the viewport is planned as well. * There seems to be an issue with the attribute overlay for pointclouds on nvidia gpus, to be investigated. Differential Revision: https://developer.blender.org/D15954
2022-09-28 17:54:59 +02:00
BKE_viewer_path_id_remap(&view3d->viewer_path, mappings);
ID-Remap - Step one: core work (cleanup and rework of generic ID datablock handling). This commit changes a lot of how IDs are handled internally, especially the unlinking/freeing processes. So far, this was very fuzy, to summarize cleanly deleting or replacing a datablock was pretty much impossible, except for a few special cases. Also, unlinking was handled by each datatype, in a rather messy and prone-to-errors way (quite a few ID usages were missed or wrongly handled that way). One of the main goal of id-remap branch was to cleanup this, and fatorize ID links handling by using library_query utils to allow generic handling of those, which is now the case (now, generic ID links handling is only "knwon" from readfile.c and library_query.c). This commit also adds backends to allow live replacement and deletion of datablocks in Blender (so-called 'remapping' process, where we replace all usages of a given ID pointer by a new one, or NULL one in case of unlinking). This will allow nice new features, like ability to easily reload or relocate libraries, real immediate deletion of datablocks in blender, replacement of one datablock by another, etc. Some of those are for next commits. A word of warning: this commit is highly risky, because it affects potentially a lot in Blender core. Though it was tested rather deeply, being totally impossible to check all possible ID usage cases, it's likely there are some remaining issues and bugs in new code... Please report them! ;) Review task: D2027 (https://developer.blender.org/D2027). Reviewed by campbellbarton, thanks a bunch.
2016-06-22 17:29:38 +02:00
}
static void view3d_foreach_id(SpaceLink *space_link, LibraryForeachIDData *data)
{
View3D *v3d = reinterpret_cast<View3D *>(space_link);
BKE_LIB_FOREACHID_PROCESS_IDSUPER(data, v3d->camera, IDWALK_CB_DIRECT_WEAK_LINK);
BKE_LIB_FOREACHID_PROCESS_IDSUPER(data, v3d->ob_center, IDWALK_CB_DIRECT_WEAK_LINK);
if (v3d->localvd) {
BKE_LIB_FOREACHID_PROCESS_IDSUPER(data, v3d->localvd->camera, IDWALK_CB_DIRECT_WEAK_LINK);
/* If potentially modifying ID pointers, and there is a local View3D, mark it for a check for
* emptiness. */
const int flags = BKE_lib_query_foreachid_process_flags_get(data);
if ((flags & IDWALK_READONLY) == 0) {
v3d->localvd->runtime.flag |= V3D_RUNTIME_LOCAL_MAYBE_EMPTY;
}
}
BKE_viewer_path_foreach_id(data, &v3d->viewer_path);
}
static void view3d_space_blend_read_data(BlendDataReader *reader, SpaceLink *sl)
{
View3D *v3d = (View3D *)sl;
memset(&v3d->runtime, 0x0, sizeof(v3d->runtime));
if (v3d->gpd) {
BLO_read_struct(reader, bGPdata, &v3d->gpd);
BKE_gpencil_blend_read_data(reader, v3d->gpd);
}
BLO_read_struct(reader, RegionView3D, &v3d->localvd);
/* render can be quite heavy, set to solid on load */
if (v3d->shading.type == OB_RENDER) {
v3d->shading.type = OB_SOLID;
}
v3d->shading.prev_type = OB_SOLID;
BKE_screen_view3d_shading_blend_read_data(reader, &v3d->shading);
BKE_screen_view3d_do_versions_250(v3d, &sl->regionbase);
Geometry Nodes: viewport preview This adds support for showing geometry passed to the Viewer in the 3d viewport (instead of just in the spreadsheet). The "viewer geometry" bypasses the group output. So it is not necessary to change the final output of the node group to be able to see the intermediate geometry. **Activation and deactivation of a viewer node** * A viewer node is activated by clicking on it. * Ctrl+shift+click on any node/socket connects it to the viewer and makes it active. * Ctrl+shift+click in empty space deactivates the active viewer. * When the active viewer is not visible anymore (e.g. another object is selected, or the current node group is exit), it is deactivated. * Clicking on the icon in the header of the Viewer node toggles whether its active or not. **Pinning** * The spreadsheet still allows pinning the active viewer as before. When pinned, the spreadsheet still references the viewer node even when it becomes inactive. * The viewport does not support pinning at the moment. It always shows the active viewer. **Attribute** * When a field is linked to the second input of the viewer node it is displayed as an overlay in the viewport. * When possible the correct domain for the attribute is determined automatically. This does not work in all cases. It falls back to the face corner domain on meshes and the point domain on curves. When necessary, the domain can be picked manually. * The spreadsheet now only shows the "Viewer" column for the domain that is selected in the Viewer node. * Instance attributes are visualized as a constant color per instance. **Viewport Options** * The attribute overlay opacity can be controlled with the "Viewer Node" setting in the overlays popover. * A viewport can be configured not to show intermediate viewer-geometry by disabling the "Viewer Node" option in the "View" menu. **Implementation Details** * The "spreadsheet context path" was generalized to a "viewer path" that is used in more places now. * The viewer node itself determines the attribute domain, evaluates the field and stores the result in a `.viewer` attribute. * A new "viewer attribute' overlay displays the data from the `.viewer` attribute. * The ground truth for the active viewer node is stored in the workspace now. Node editors, spreadsheets and viewports retrieve the active viewer from there unless they are pinned. * The depsgraph object iterator has a new "viewer path" setting. When set, the viewed geometry of the corresponding object is part of the iterator instead of the final evaluated geometry. * To support the instance attribute overlay `DupliObject` was extended to contain the information necessary for drawing the overlay. * The ctrl+shift+click operator has been refactored so that it can make existing links to viewers active again. * The auto-domain-detection in the Viewer node works by checking the "preferred domain" for every field input. If there is not exactly one preferred domain, the fallback is used. Known limitations: * Loose edges of meshes don't have the attribute overlay. This could be added separately if necessary. * Some attributes are hard to visualize as a color directly. For example, the values might have to be normalized or some should be drawn as arrays. For now, we encourage users to build node groups that generate appropriate viewer-geometry. We might include some of that functionality in future versions. Support for displaying attribute values as text in the viewport is planned as well. * There seems to be an issue with the attribute overlay for pointclouds on nvidia gpus, to be investigated. Differential Revision: https://developer.blender.org/D15954
2022-09-28 17:54:59 +02:00
BKE_viewer_path_blend_read_data(reader, &v3d->viewer_path);
}
static void view3d_space_blend_write(BlendWriter *writer, SpaceLink *sl)
{
View3D *v3d = (View3D *)sl;
BLO_write_struct(writer, View3D, v3d);
if (v3d->localvd) {
BLO_write_struct(writer, View3D, v3d->localvd);
}
BKE_screen_view3d_shading_blend_write(writer, &v3d->shading);
Geometry Nodes: viewport preview This adds support for showing geometry passed to the Viewer in the 3d viewport (instead of just in the spreadsheet). The "viewer geometry" bypasses the group output. So it is not necessary to change the final output of the node group to be able to see the intermediate geometry. **Activation and deactivation of a viewer node** * A viewer node is activated by clicking on it. * Ctrl+shift+click on any node/socket connects it to the viewer and makes it active. * Ctrl+shift+click in empty space deactivates the active viewer. * When the active viewer is not visible anymore (e.g. another object is selected, or the current node group is exit), it is deactivated. * Clicking on the icon in the header of the Viewer node toggles whether its active or not. **Pinning** * The spreadsheet still allows pinning the active viewer as before. When pinned, the spreadsheet still references the viewer node even when it becomes inactive. * The viewport does not support pinning at the moment. It always shows the active viewer. **Attribute** * When a field is linked to the second input of the viewer node it is displayed as an overlay in the viewport. * When possible the correct domain for the attribute is determined automatically. This does not work in all cases. It falls back to the face corner domain on meshes and the point domain on curves. When necessary, the domain can be picked manually. * The spreadsheet now only shows the "Viewer" column for the domain that is selected in the Viewer node. * Instance attributes are visualized as a constant color per instance. **Viewport Options** * The attribute overlay opacity can be controlled with the "Viewer Node" setting in the overlays popover. * A viewport can be configured not to show intermediate viewer-geometry by disabling the "Viewer Node" option in the "View" menu. **Implementation Details** * The "spreadsheet context path" was generalized to a "viewer path" that is used in more places now. * The viewer node itself determines the attribute domain, evaluates the field and stores the result in a `.viewer` attribute. * A new "viewer attribute' overlay displays the data from the `.viewer` attribute. * The ground truth for the active viewer node is stored in the workspace now. Node editors, spreadsheets and viewports retrieve the active viewer from there unless they are pinned. * The depsgraph object iterator has a new "viewer path" setting. When set, the viewed geometry of the corresponding object is part of the iterator instead of the final evaluated geometry. * To support the instance attribute overlay `DupliObject` was extended to contain the information necessary for drawing the overlay. * The ctrl+shift+click operator has been refactored so that it can make existing links to viewers active again. * The auto-domain-detection in the Viewer node works by checking the "preferred domain" for every field input. If there is not exactly one preferred domain, the fallback is used. Known limitations: * Loose edges of meshes don't have the attribute overlay. This could be added separately if necessary. * Some attributes are hard to visualize as a color directly. For example, the values might have to be normalized or some should be drawn as arrays. For now, we encourage users to build node groups that generate appropriate viewer-geometry. We might include some of that functionality in future versions. Support for displaying attribute values as text in the viewport is planned as well. * There seems to be an issue with the attribute overlay for pointclouds on nvidia gpus, to be investigated. Differential Revision: https://developer.blender.org/D15954
2022-09-28 17:54:59 +02:00
BKE_viewer_path_blend_write(writer, &v3d->viewer_path);
}
void ED_spacetype_view3d()
{
using namespace blender::ed;
std::unique_ptr<SpaceType> st = std::make_unique<SpaceType>();
ARegionType *art;
st->spaceid = SPACE_VIEW3D;
STRNCPY(st->name, "View3D");
st->create = view3d_create;
st->free = view3d_free;
st->init = view3d_init;
st->exit = view3d_exit;
st->listener = space_view3d_listener;
st->refresh = space_view3d_refresh;
st->duplicate = view3d_duplicate;
st->operatortypes = view3d_operatortypes;
st->keymap = view3d_keymap;
st->dropboxes = view3d_dropboxes;
st->gizmos = view3d_widgets;
st->context = view3d_context;
ID-Remap - Step one: core work (cleanup and rework of generic ID datablock handling). This commit changes a lot of how IDs are handled internally, especially the unlinking/freeing processes. So far, this was very fuzy, to summarize cleanly deleting or replacing a datablock was pretty much impossible, except for a few special cases. Also, unlinking was handled by each datatype, in a rather messy and prone-to-errors way (quite a few ID usages were missed or wrongly handled that way). One of the main goal of id-remap branch was to cleanup this, and fatorize ID links handling by using library_query utils to allow generic handling of those, which is now the case (now, generic ID links handling is only "knwon" from readfile.c and library_query.c). This commit also adds backends to allow live replacement and deletion of datablocks in Blender (so-called 'remapping' process, where we replace all usages of a given ID pointer by a new one, or NULL one in case of unlinking). This will allow nice new features, like ability to easily reload or relocate libraries, real immediate deletion of datablocks in blender, replacement of one datablock by another, etc. Some of those are for next commits. A word of warning: this commit is highly risky, because it affects potentially a lot in Blender core. Though it was tested rather deeply, being totally impossible to check all possible ID usage cases, it's likely there are some remaining issues and bugs in new code... Please report them! ;) Review task: D2027 (https://developer.blender.org/D2027). Reviewed by campbellbarton, thanks a bunch.
2016-06-22 17:29:38 +02:00
st->id_remap = view3d_id_remap;
st->foreach_id = view3d_foreach_id;
st->blend_read_data = view3d_space_blend_read_data;
st->blend_read_after_liblink = nullptr;
st->blend_write = view3d_space_blend_write;
/* regions: main window */
2022-09-23 16:51:29 +02:00
art = MEM_cnew<ARegionType>("spacetype view3d main region");
art->regionid = RGN_TYPE_WINDOW;
art->keymapflag = ED_KEYMAP_GIZMO | ED_KEYMAP_TOOL | ED_KEYMAP_GPENCIL;
art->draw = view3d_main_region_draw;
art->init = view3d_main_region_init;
art->exit = view3d_main_region_exit;
art->free = view3d_main_region_free;
art->duplicate = view3d_main_region_duplicate;
art->listener = view3d_main_region_listener;
art->message_subscribe = view3d_main_region_message_subscribe;
art->cursor = view3d_main_region_cursor;
art->lock = 1; /* can become flag, see BKE_spacedata_draw_locks */
BLI_addhead(&st->regiontypes, art);
2023-05-24 11:21:18 +10:00
/* regions: list-view/buttons */
2022-09-23 16:51:29 +02:00
art = MEM_cnew<ARegionType>("spacetype view3d buttons region");
art->regionid = RGN_TYPE_UI;
art->prefsizex = UI_SIDEBAR_PANEL_WIDTH;
art->keymapflag = ED_KEYMAP_UI | ED_KEYMAP_FRAMES;
art->listener = view3d_buttons_region_listener;
art->message_subscribe = ED_area_do_mgs_subscribe_for_tool_ui;
art->init = view3d_buttons_region_init;
art->layout = view3d_buttons_region_layout;
art->draw = ED_region_panels_draw;
BLI_addhead(&st->regiontypes, art);
view3d_buttons_register(art);
2009-04-27 13:44:11 +00:00
/* regions: tool(bar) */
2022-09-23 16:51:29 +02:00
art = MEM_cnew<ARegionType>("spacetype view3d tools region");
2009-04-27 13:44:11 +00:00
art->regionid = RGN_TYPE_TOOLS;
art->prefsizex = int(UI_TOOLBAR_WIDTH);
2012-07-08 20:36:00 +00:00
art->prefsizey = 50; /* XXX */
art->keymapflag = ED_KEYMAP_UI | ED_KEYMAP_FRAMES;
art->listener = view3d_buttons_region_listener;
art->message_subscribe = ED_region_generic_tools_region_message_subscribe;
art->snap_size = ED_region_generic_tools_region_snap_size;
art->init = view3d_tools_region_init;
art->draw = view3d_tools_region_draw;
2009-04-27 13:44:11 +00:00
BLI_addhead(&st->regiontypes, art);
/* regions: tool header */
2022-09-23 16:51:29 +02:00
art = MEM_cnew<ARegionType>("spacetype view3d tool header region");
art->regionid = RGN_TYPE_TOOL_HEADER;
art->prefsizey = HEADERY;
art->keymapflag = ED_KEYMAP_UI | ED_KEYMAP_VIEW2D | ED_KEYMAP_FRAMES | ED_KEYMAP_HEADER;
art->listener = view3d_header_region_listener;
art->message_subscribe = ED_area_do_mgs_subscribe_for_tool_header;
art->init = view3d_header_region_init;
art->draw = view3d_tools_header_region_draw;
BLI_addhead(&st->regiontypes, art);
/* regions: header */
2022-09-23 16:51:29 +02:00
art = MEM_cnew<ARegionType>("spacetype view3d header region");
art->regionid = RGN_TYPE_HEADER;
art->prefsizey = HEADERY;
art->keymapflag = ED_KEYMAP_UI | ED_KEYMAP_VIEW2D | ED_KEYMAP_FRAMES | ED_KEYMAP_HEADER;
art->listener = view3d_header_region_listener;
art->message_subscribe = view3d_header_region_message_subscribe;
art->init = view3d_header_region_init;
art->draw = view3d_header_region_draw;
BLI_addhead(&st->regiontypes, art);
/* regions: asset shelf */
art = MEM_cnew<ARegionType>("spacetype view3d asset shelf region");
art->regionid = RGN_TYPE_ASSET_SHELF;
art->keymapflag = ED_KEYMAP_UI | ED_KEYMAP_ASSET_SHELF | ED_KEYMAP_FRAMES;
art->duplicate = asset::shelf::region_duplicate;
art->free = asset::shelf::region_free;
art->on_poll_success = asset::shelf::region_on_poll_success;
art->listener = asset::shelf::region_listen;
art->message_subscribe = asset::shelf::region_message_subscribe;
art->poll = asset::shelf::regions_poll;
art->snap_size = asset::shelf::region_snap;
art->on_user_resize = asset::shelf::region_on_user_resize;
art->context = asset::shelf::context;
art->init = view3d_asset_shelf_region_init;
art->layout = asset::shelf::region_layout;
art->draw = asset::shelf::region_draw;
BLI_addhead(&st->regiontypes, art);
/* regions: asset shelf header */
art = MEM_cnew<ARegionType>("spacetype view3d asset shelf header region");
art->regionid = RGN_TYPE_ASSET_SHELF_HEADER;
art->keymapflag = ED_KEYMAP_UI | ED_KEYMAP_ASSET_SHELF | ED_KEYMAP_VIEW2D | ED_KEYMAP_FOOTER;
art->init = asset::shelf::header_region_init;
art->poll = asset::shelf::regions_poll;
art->draw = asset::shelf::header_region;
art->listener = asset::shelf::header_region_listen;
art->context = asset::shelf::context;
BLI_addhead(&st->regiontypes, art);
asset::shelf::types_register(art, SPACE_VIEW3D);
/* regions: hud */
art = ED_area_type_hud(st->spaceid);
BLI_addhead(&st->regiontypes, art);
/* regions: xr */
2022-09-23 16:51:29 +02:00
art = MEM_cnew<ARegionType>("spacetype view3d xr region");
art->regionid = RGN_TYPE_XR;
BLI_addhead(&st->regiontypes, art);
WM_menutype_add(
MEM_cnew<MenuType>(__func__, blender::ed::geometry::node_group_operator_assets_menu()));
WM_menutype_add(MEM_cnew<MenuType>(
__func__, blender::ed::geometry::node_group_operator_assets_menu_unassigned()));
BKE_spacetype_register(std::move(st));
}