diff --git a/GNUmakefile b/GNUmakefile index 991fa37f07e..333afe353b6 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -84,6 +84,7 @@ Spell Checkers * check_spelling_c: Check for spelling errors (C/C++ only), * check_spelling_py: Check for spelling errors (Python only). * check_spelling_shaders: Check for spelling errors (GLSL,OSL & MSL only). + * check_spelling_cmake: Check for spelling errors (CMake only). Note: an additional word-list is maintained at: 'tools/check_source/check_spelling_c_config.py' @@ -520,6 +521,16 @@ check_spelling_shaders: .FORCE "$(BLENDER_DIR)/intern/" \ "$(BLENDER_DIR)/source/" +check_spelling_cmake: .FORCE + @PYTHONIOENCODING=utf_8 $(PYTHON) \ + "$(BLENDER_DIR)/tools/check_source/check_spelling.py" \ + --cache-file=$(CHECK_SPELLING_CACHE) \ + --match=".*\.(cmake)$$" \ + --match=".*\bCMakeLists\.(txt)$$" \ + "$(BLENDER_DIR)/build_files/" \ + "$(BLENDER_DIR)/intern/" \ + "$(BLENDER_DIR)/source/" + check_descriptions: .FORCE @$(BLENDER_BIN) --background --factory-startup --python \ "$(BLENDER_DIR)/tools/check_source/check_descriptions.py" diff --git a/tools/check_source/check_spelling.py b/tools/check_source/check_spelling.py index f419b5597ce..8229eaa2640 100755 --- a/tools/check_source/check_spelling.py +++ b/tools/check_source/check_spelling.py @@ -84,6 +84,8 @@ SOURCE_EXT = ( "glsl", "osl", "py", + "txt", # for `CMakeLists.txt`. + "cmake", ) BASEDIR = os.path.abspath(os.path.dirname(__file__)) @@ -319,6 +321,8 @@ def extract_code_strings(filepath: str) -> Tuple[List[Comment], Set[str]]: # return comments, code_words if filepath.endswith(".py"): lex = lexers.get_lexer_by_name("python") + elif filepath.endswith((".cmake", ".txt")): + lex = lexers.get_lexer_by_name("cmake") else: lex = lexers.get_lexer_by_name("c") @@ -367,6 +371,35 @@ def extract_py_comments(filepath: str) -> Tuple[List[Comment], Set[str]]: return comments, code_words +def extract_cmake_comments(filepath: str) -> Tuple[List[Comment], Set[str]]: + from pygments import lexers + from pygments.token import Token + + lex = lexers.get_lexer_by_name("cmake") + + with open(filepath, encoding='utf-8') as fh: + source = fh.read() + + comments = [] + code_words = set() + + slineno = 0 + for ty, ttext in lex.get_tokens(source): + if ty in {Token.Literal.String, Token.Literal.String.Double, Token.Literal.String.Single}: + # Disable because most CMake strings are references to paths/code." + if False: + comments.append(Comment(filepath, ttext, slineno, 'STRING')) + elif ty in {Token.Comment, Token.Comment.Single}: + comments.append(Comment(filepath, ttext, slineno, 'COMMENT')) + else: + for match in re_vars.finditer(ttext): + code_words.add(match.group(0)) + # Ugh - not nice or fast. + slineno += ttext.count("\n") + + return comments, code_words + + def extract_c_comments(filepath: str) -> Tuple[List[Comment], Set[str]]: """ Extracts comments like this: @@ -520,6 +553,8 @@ def spell_check_file( if extract_type == 'COMMENTS': if filepath.endswith(".py"): comment_list, code_words = extract_py_comments(filepath) + elif filepath.endswith((".cmake", ".txt")): + comment_list, code_words = extract_cmake_comments(filepath) else: comment_list, code_words = extract_c_comments(filepath) elif extract_type == 'STRINGS':