From 0001777892cade1525b0b0117c14029c23e44256 Mon Sep 17 00:00:00 2001 From: Pierre Gondois Date: Wed, 30 Apr 2025 12:57:01 +0200 Subject: [PATCH 01/15] BaseTools: Remove unused import Running the vulture tool gave the following report. Remove the unused import. - BPDG/BPDG.py:21: unused import 'encodings' (90% confidence) - build/build.py:61: unused import 'Manager' (90% confidence) - Ecc/CParser4/CLexer.py:4: unused import 'TextIO' (90% confidence) - Ecc/CParser4/CParser.py:5: unused import 'TextIO' (90% confidence) - Eot/CParser4/CLexer.py:4: unused import 'TextIO' (90% confidence) - Eot/CParser4/CParser.py:5: unused import 'TextIO' (90% confidence) - Eot/EotMain.py:28: unused import 'ConvertGuid' (90% confidence) - GenFds/FdfParser.py:16: unused import 'hexdigits' (90% confidence) - Table/TableEotReport.py:16: unused import 'EotToolError' (90% confidence) - UPT/Library/CommentGenerating.py:19: unused import 'USAGE_ITEM_NOTIFY' (90% confidence) - UPT/Library/ParserValidate.py:18: unused import 'COMPONENT_TYPE_LIST' (90% confidence) - UPT/Library/UniClassObject.py:25: unused import 'CheckUTF16FileHeader' (90% confidence) Signed-off-by: Pierre Gondois --- BaseTools/Source/Python/BPDG/BPDG.py | 1 - BaseTools/Source/Python/Ecc/CParser4/CLexer.py | 1 - BaseTools/Source/Python/Ecc/CParser4/CParser.py | 1 - BaseTools/Source/Python/Eot/CParser4/CLexer.py | 1 - BaseTools/Source/Python/Eot/CParser4/CParser.py | 1 - BaseTools/Source/Python/Eot/EotMain.py | 1 - BaseTools/Source/Python/GenFds/FdfParser.py | 1 - BaseTools/Source/Python/Table/TableEotReport.py | 1 - BaseTools/Source/Python/UPT/Library/CommentGenerating.py | 1 - BaseTools/Source/Python/UPT/Library/ParserValidate.py | 1 - BaseTools/Source/Python/UPT/Library/UniClassObject.py | 1 - BaseTools/Source/Python/build/build.py | 1 - 12 files changed, 12 deletions(-) diff --git a/BaseTools/Source/Python/BPDG/BPDG.py b/BaseTools/Source/Python/BPDG/BPDG.py index 283e08a37a0f..86f773cbf04f 100644 --- a/BaseTools/Source/Python/BPDG/BPDG.py +++ b/BaseTools/Source/Python/BPDG/BPDG.py @@ -18,7 +18,6 @@ from __future__ import absolute_import import Common.LongFilePathOs as os import sys -import encodings.ascii from optparse import OptionParser from Common import EdkLogger diff --git a/BaseTools/Source/Python/Ecc/CParser4/CLexer.py b/BaseTools/Source/Python/Ecc/CParser4/CLexer.py index a2cc5bf56e66..f0c6e66f0df5 100644 --- a/BaseTools/Source/Python/Ecc/CParser4/CLexer.py +++ b/BaseTools/Source/Python/Ecc/CParser4/CLexer.py @@ -1,7 +1,6 @@ # Generated from C.g4 by ANTLR 4.7.1 from antlr4 import * from io import StringIO -from typing.io import TextIO import sys diff --git a/BaseTools/Source/Python/Ecc/CParser4/CParser.py b/BaseTools/Source/Python/Ecc/CParser4/CParser.py index 31d23d55aa57..3946497f816b 100644 --- a/BaseTools/Source/Python/Ecc/CParser4/CParser.py +++ b/BaseTools/Source/Python/Ecc/CParser4/CParser.py @@ -2,7 +2,6 @@ # encoding: utf-8 from antlr4 import * from io import StringIO -from typing.io import TextIO import sys diff --git a/BaseTools/Source/Python/Eot/CParser4/CLexer.py b/BaseTools/Source/Python/Eot/CParser4/CLexer.py index 54374fd6e82d..6666664d6bb8 100644 --- a/BaseTools/Source/Python/Eot/CParser4/CLexer.py +++ b/BaseTools/Source/Python/Eot/CParser4/CLexer.py @@ -1,7 +1,6 @@ # Generated from C.g4 by ANTLR 4.7.1 from antlr4 import * from io import StringIO -from typing.io import TextIO import sys diff --git a/BaseTools/Source/Python/Eot/CParser4/CParser.py b/BaseTools/Source/Python/Eot/CParser4/CParser.py index 31d23d55aa57..3946497f816b 100644 --- a/BaseTools/Source/Python/Eot/CParser4/CParser.py +++ b/BaseTools/Source/Python/Eot/CParser4/CParser.py @@ -2,7 +2,6 @@ # encoding: utf-8 from antlr4 import * from io import StringIO -from typing.io import TextIO import sys diff --git a/BaseTools/Source/Python/Eot/EotMain.py b/BaseTools/Source/Python/Eot/EotMain.py index 791fcdfeaed8..a5063782883d 100644 --- a/BaseTools/Source/Python/Eot/EotMain.py +++ b/BaseTools/Source/Python/Eot/EotMain.py @@ -25,7 +25,6 @@ from array import array from Eot.Report import Report from Common.BuildVersion import gBUILD_VERSION -from Eot.Parser import ConvertGuid from Common.LongFilePathSupport import OpenLongFilePath as open import struct import uuid diff --git a/BaseTools/Source/Python/GenFds/FdfParser.py b/BaseTools/Source/Python/GenFds/FdfParser.py index feb4c727794f..c821494d0d82 100644 --- a/BaseTools/Source/Python/GenFds/FdfParser.py +++ b/BaseTools/Source/Python/GenFds/FdfParser.py @@ -13,7 +13,6 @@ from __future__ import print_function from __future__ import absolute_import from re import compile, DOTALL -from string import hexdigits from uuid import UUID from Common.BuildToolError import * diff --git a/BaseTools/Source/Python/Table/TableEotReport.py b/BaseTools/Source/Python/Table/TableEotReport.py index 72bc11f6dbc2..abd433ec8ef1 100644 --- a/BaseTools/Source/Python/Table/TableEotReport.py +++ b/BaseTools/Source/Python/Table/TableEotReport.py @@ -13,7 +13,6 @@ import Common.LongFilePathOs as os, time from Table.Table import Table from Common.StringUtils import ConvertToSqlString2 -import Eot.EotToolError as EotToolError import Eot.EotGlobalData as EotGlobalData ## TableReport diff --git a/BaseTools/Source/Python/UPT/Library/CommentGenerating.py b/BaseTools/Source/Python/UPT/Library/CommentGenerating.py index bded508f565a..4c50adac6e50 100644 --- a/BaseTools/Source/Python/UPT/Library/CommentGenerating.py +++ b/BaseTools/Source/Python/UPT/Library/CommentGenerating.py @@ -16,7 +16,6 @@ from Library.StringUtils import GetSplitValueList from Library.DataType import TAB_SPACE_SPLIT from Library.DataType import TAB_INF_GUIDTYPE_VAR -from Library.DataType import USAGE_ITEM_NOTIFY from Library.DataType import ITEM_UNDEFINED from Library.DataType import TAB_HEADER_COMMENT from Library.DataType import TAB_BINARY_HEADER_COMMENT diff --git a/BaseTools/Source/Python/UPT/Library/ParserValidate.py b/BaseTools/Source/Python/UPT/Library/ParserValidate.py index 62f406141cc6..edb0c6275aef 100644 --- a/BaseTools/Source/Python/UPT/Library/ParserValidate.py +++ b/BaseTools/Source/Python/UPT/Library/ParserValidate.py @@ -15,7 +15,6 @@ import platform from Library.DataType import MODULE_LIST -from Library.DataType import COMPONENT_TYPE_LIST from Library.DataType import PCD_USAGE_TYPE_LIST_OF_MODULE from Library.DataType import TAB_SPACE_SPLIT from Library.StringUtils import GetSplitValueList diff --git a/BaseTools/Source/Python/UPT/Library/UniClassObject.py b/BaseTools/Source/Python/UPT/Library/UniClassObject.py index 8c44dc225277..381f978cfeb3 100644 --- a/BaseTools/Source/Python/UPT/Library/UniClassObject.py +++ b/BaseTools/Source/Python/UPT/Library/UniClassObject.py @@ -22,7 +22,6 @@ from Library.Misc import PathClass from Library.Misc import GetCharIndexOutStr from Library import DataType as DT -from Library.ParserValidate import CheckUTF16FileHeader ## # Static definitions diff --git a/BaseTools/Source/Python/build/build.py b/BaseTools/Source/Python/build/build.py index f471aea172ae..a78d4e95013a 100755 --- a/BaseTools/Source/Python/build/build.py +++ b/BaseTools/Source/Python/build/build.py @@ -58,7 +58,6 @@ import Common.GlobalData as GlobalData from GenFds.GenFds import GenFds, GenFdsApi import multiprocessing as mp -from multiprocessing import Manager from AutoGen.DataPipe import MemoryDataPipe from AutoGen.ModuleAutoGenHelper import WorkSpaceInfo, PlatformInfo from GenFds.FdfParser import FdfParser From 811e11d74ae902ba20ac79db17a16778a53de7c4 Mon Sep 17 00:00:00 2001 From: Pierre Gondois Date: Wed, 30 Apr 2025 10:06:21 +0200 Subject: [PATCH 02/15] BaseTools: Remove unreachable code Running the vulture tool gave the following report. Remove the unreachable code. - TargetTool/TargetTool.py:49: unreachable code after 'raise' (100% confidence) - UPT/Library/UniClassObject.py:137: unreachable code after 'return' (100% confidence) - UPT/Object/Parser/InfDefineObject.py:795: unreachable code after 'if' (100% confidence) - Ecc/Check.py:1504: unreachable code after 'return' (100% confidence) Signed-off-by: Pierre Gondois --- BaseTools/Source/Python/Ecc/Check.py | 1 - .../Source/Python/TargetTool/TargetTool.py | 1 - .../Python/UPT/Library/UniClassObject.py | 34 ------------------- .../UPT/Object/Parser/InfDefineObject.py | 1 - 4 files changed, 37 deletions(-) diff --git a/BaseTools/Source/Python/Ecc/Check.py b/BaseTools/Source/Python/Ecc/Check.py index 160e803764fe..c31ee703a7de 100644 --- a/BaseTools/Source/Python/Ecc/Check.py +++ b/BaseTools/Source/Python/Ecc/Check.py @@ -1501,7 +1501,6 @@ def FindPara(FilePath, Para, CallingLine): if Line.startswith('%s = ' % Para): Line = Line.strip() return Line - break return '' diff --git a/BaseTools/Source/Python/TargetTool/TargetTool.py b/BaseTools/Source/Python/TargetTool/TargetTool.py index 7f2479f0f0ac..8b344251fb6b 100644 --- a/BaseTools/Source/Python/TargetTool/TargetTool.py +++ b/BaseTools/Source/Python/TargetTool/TargetTool.py @@ -46,7 +46,6 @@ def LoadTargetTxtFile(self, filename): return self.ConvertTextFileToDict(filename, '#', '=') else: raise ParseError('LoadTargetTxtFile() : No Target.txt file exists.') - return 1 # # Convert a text file to a dictionary diff --git a/BaseTools/Source/Python/UPT/Library/UniClassObject.py b/BaseTools/Source/Python/UPT/Library/UniClassObject.py index 381f978cfeb3..80924a6f2a9e 100644 --- a/BaseTools/Source/Python/UPT/Library/UniClassObject.py +++ b/BaseTools/Source/Python/UPT/Library/UniClassObject.py @@ -133,40 +133,6 @@ def ConvertSpecialUnicodes(Uni): def GetLanguageCode1766(LangName, File=None): return LangName - length = len(LangName) - if length == 2: - if LangName.isalpha(): - for Key in gLANG_CONV_TABLE.keys(): - if gLANG_CONV_TABLE.get(Key) == LangName.lower(): - return Key - elif length == 3: - if LangName.isalpha() and gLANG_CONV_TABLE.get(LangName.lower()): - return LangName - else: - EdkLogger.Error("Unicode File Parser", - ToolError.FORMAT_INVALID, - "Invalid RFC 1766 language code : %s" % LangName, - File) - elif length == 5: - if LangName[0:2].isalpha() and LangName[2] == '-': - for Key in gLANG_CONV_TABLE.keys(): - if gLANG_CONV_TABLE.get(Key) == LangName[0:2].lower(): - return Key - elif length >= 6: - if LangName[0:2].isalpha() and LangName[2] == '-': - for Key in gLANG_CONV_TABLE.keys(): - if gLANG_CONV_TABLE.get(Key) == LangName[0:2].lower(): - return Key - if LangName[0:3].isalpha() and gLANG_CONV_TABLE.get(LangName.lower()) is None and LangName[3] == '-': - for Key in gLANG_CONV_TABLE.keys(): - if Key == LangName[0:3].lower(): - return Key - - EdkLogger.Error("Unicode File Parser", - ToolError.FORMAT_INVALID, - "Invalid RFC 4646 language code : %s" % LangName, - File) - ## GetLanguageCode # # Check the language code read from .UNI file and convert RFC 1766 codes to RFC 4646 codes if appropriate diff --git a/BaseTools/Source/Python/UPT/Object/Parser/InfDefineObject.py b/BaseTools/Source/Python/UPT/Object/Parser/InfDefineObject.py index a1b691ff0300..fda1fc9664a3 100644 --- a/BaseTools/Source/Python/UPT/Object/Parser/InfDefineObject.py +++ b/BaseTools/Source/Python/UPT/Object/Parser/InfDefineObject.py @@ -792,7 +792,6 @@ def SetSpecification(self, Specification, Comments): ErrorInInf(ST.ERR_INF_PARSER_DEFINE_FROMAT_INVALID%(Name), LineInfo=self.CurrentLine) return False - return True def GetSpecification(self): return self.Specification From 0fc2ffeb7b10ae2740b6dadbb16b38258b407e34 Mon Sep 17 00:00:00 2001 From: Pierre Gondois Date: Wed, 30 Apr 2025 14:29:25 +0200 Subject: [PATCH 03/15] BaseTools: AutoGen: Remove unnecessary code Running the vulture tool on the AutoGen folder gave the following report. Remove the unnecessary code. - AutoGen/BuildEngine.py:333: unused attribute 'SupportedToolChainFamilyList' (60% confidence) - AutoGen/BuildEngine.py:346: unused attribute '_RuleObjectList' (60% confidence) - AutoGen/GenMake.py:450: unused attribute 'FileBuildTargetList' (60% confidence) - AutoGen/GenMake.py:452: unused attribute 'PendingBuildTargetList' (60% confidence) - AutoGen/GenMake.py:458: unused attribute 'LibraryBuildCommandList' (60% confidence) - AutoGen/GenMake.py:459: unused attribute 'LibraryFileList' (60% confidence) - AutoGen/GenMake.py:462: unused attribute 'SystemLibraryList' (60% confidence) - AutoGen/GenMake.py:1168: unused method 'GetFileDependency' (60% confidence) - AutoGen/GenMake.py:1466: unused attribute 'ModuleBuildCommandList' (60% confidence) - AutoGen/GenPcdDb.py:442: unused class 'DbSkuHeadTableItemList' (60% confidence) - AutoGen/GenVar.py:31: unused attribute 'VpdRegionOffset' (60% confidence) - AutoGen/GenVar.py:43: unused attribute 'VpdRegionOffset' (60% confidence) - AutoGen/ModuleAutoGen.py:257: unused attribute 'FileDependCache' (60% confidence) - AutoGen/ModuleAutoGen.py:684: unused method 'BuildOptionIncPathList' (60% confidence) - AutoGen/PlatformAutoGen.py:994: unused method 'EdkBuildOption' (60% confidence) - AutoGen/UniClassObject.py:171: unused attribute 'StringNameByteList' (60% confidence) - AutoGen/UniClassObject.py:181: unused attribute 'StringNameByteList' (60% confidence) - AutoGen/UniClassObject.py:366: unused method 'GetIncludeFile' (60% confidence) - AutoGen/UniClassObject.py:606: unused method 'FindStringValue' (60% confidence) - AutoGen/UniClassObject.py:616: unused method 'FindByToken' (60% confidence) - AutoGen/ValidCheckingInfoObject.py:228: unused attribute 'ValidData' (60% confidence) - AutoGen/ValidCheckingInfoObject.py:231: unused attribute 'ValidData' (60% confidence) Signed-off-by: Pierre Gondois --- .../Source/Python/AutoGen/BuildEngine.py | 2 - BaseTools/Source/Python/AutoGen/GenMake.py | 21 --------- BaseTools/Source/Python/AutoGen/GenPcdDb.py | 17 ------- BaseTools/Source/Python/AutoGen/GenVar.py | 4 -- .../Source/Python/AutoGen/ModuleAutoGen.py | 45 ------------------- .../Source/Python/AutoGen/PlatformAutoGen.py | 6 --- .../Source/Python/AutoGen/UniClassObject.py | 29 ------------ .../Python/AutoGen/ValidCheckingInfoObject.py | 2 - 8 files changed, 126 deletions(-) diff --git a/BaseTools/Source/Python/AutoGen/BuildEngine.py b/BaseTools/Source/Python/AutoGen/BuildEngine.py index 45b39d7878d5..b829a2503cfa 100644 --- a/BaseTools/Source/Python/AutoGen/BuildEngine.py +++ b/BaseTools/Source/Python/AutoGen/BuildEngine.py @@ -330,7 +330,6 @@ def __init__(self, File=None, Content=None, LineIndex=0, SupportedFamily=[TAB_CO else: EdkLogger.error("build", PARAMETER_MISSING, ExtraData="No rule file or string given") - self.SupportedToolChainFamilyList = SupportedFamily self.RuleDatabase = tdict(True, 4) # {FileExt, ModuleType, Arch, Family : FileBuildRule object} self.Ext2FileType = {} # {ext : file-type} self.FileTypeList = set() @@ -343,7 +342,6 @@ def __init__(self, File=None, Content=None, LineIndex=0, SupportedFamily=[TAB_CO self._ArchList = set() self._FamilyList = [] self._TotalToolChainFamilySet = set() - self._RuleObjectList = [] # FileBuildRule object list self._FileVersion = "" self.Parse() diff --git a/BaseTools/Source/Python/AutoGen/GenMake.py b/BaseTools/Source/Python/AutoGen/GenMake.py index 547c708fc7c3..e5f282c4acce 100755 --- a/BaseTools/Source/Python/AutoGen/GenMake.py +++ b/BaseTools/Source/Python/AutoGen/GenMake.py @@ -447,19 +447,14 @@ def __init__(self, ModuleAutoGen): self.ResultFileList = [] self.IntermediateDirectoryList = ["$(DEBUG_DIR)", "$(OUTPUT_DIR)"] - self.FileBuildTargetList = [] # [(src, target string)] self.BuildTargetList = [] # [target string] - self.PendingBuildTargetList = [] # [FileBuildRule objects] self.CommonFileDependency = [] self.FileListMacros = {} self.ListFileMacros = {} self.ObjTargetDict = OrderedDict() self.FileCache = {} - self.LibraryBuildCommandList = [] - self.LibraryFileList = [] self.LibraryMakefileList = [] self.LibraryBuildDirectoryList = [] - self.SystemLibraryList = [] self.Macros = OrderedDict() self.Macros["OUTPUT_DIR" ] = self._AutoGenObject.Macros["OUTPUT_DIR"] self.Macros["DEBUG_DIR" ] = self._AutoGenObject.Macros["DEBUG_DIR"] @@ -1157,21 +1152,6 @@ def ProcessDependentLibrary(self): if not LibraryAutoGen.IsBinaryModule: self.LibraryBuildDirectoryList.append(self.PlaceMacro(LibraryAutoGen.BuildDir, self.Macros)) - ## Return a list containing source file's dependencies - # - # @param FileList The list of source files - # @param ForceInculeList The list of files which will be included forcely - # @param SearchPathList The list of search path - # - # @retval dict The mapping between source file path and its dependencies - # - def GetFileDependency(self, FileList, ForceInculeList, SearchPathList): - Dependency = {} - for F in FileList: - Dependency[F] = GetDependencyList(self._AutoGenObject, self.FileCache, F, ForceInculeList, SearchPathList) - return Dependency - - ## CustomMakefile class # # This class encapsules makefie and its generation for module. It uses template to generate @@ -1463,7 +1443,6 @@ class PlatformMakefile(BuildFile): # def __init__(self, PlatformAutoGen): BuildFile.__init__(self, PlatformAutoGen) - self.ModuleBuildCommandList = [] self.ModuleMakefileList = [] self.IntermediateDirectoryList = [] self.ModuleBuildDirectoryList = [] diff --git a/BaseTools/Source/Python/AutoGen/GenPcdDb.py b/BaseTools/Source/Python/AutoGen/GenPcdDb.py index ad5dae0e5a2f..28df647bfe4e 100644 --- a/BaseTools/Source/Python/AutoGen/GenPcdDb.py +++ b/BaseTools/Source/Python/AutoGen/GenPcdDb.py @@ -435,23 +435,6 @@ def GetListSize(self): self.ListSize += self.ItemSize return self.ListSize -## DbSkuHeadTableItemList -# -# The class holds the Sku header value table -# -class DbSkuHeadTableItemList (DbItemList): - def __init__(self, ItemSize, DataList=None, RawDataList=None): - DbItemList.__init__(self, ItemSize, DataList, RawDataList) - - def PackData(self): - PackStr = "=LL" - Buffer = bytearray() - for Data in self.RawDataList: - Buffer += pack(PackStr, - GetIntegerValue(Data[0]), - GetIntegerValue(Data[1])) - return Buffer - ## DbSizeTableItemList # # The class holds the size table diff --git a/BaseTools/Source/Python/AutoGen/GenVar.py b/BaseTools/Source/Python/AutoGen/GenVar.py index f2ad54ba630e..54d786438077 100644 --- a/BaseTools/Source/Python/AutoGen/GenVar.py +++ b/BaseTools/Source/Python/AutoGen/GenVar.py @@ -28,7 +28,6 @@ def __init__(self, DefaultStoreMap, SkuIdMap): self.DefaultStoreMap = DefaultStoreMap self.SkuIdMap = SkuIdMap self.VpdRegionSize = 0 - self.VpdRegionOffset = 0 self.NVHeaderBuff = None self.VarDefaultBuff = None self.VarDeltaBuff = None @@ -39,9 +38,6 @@ def append_variable(self, uefi_var): def SetVpdRegionMaxSize(self, maxsize): self.VpdRegionSize = maxsize - def SetVpdRegionOffset(self, vpdoffset): - self.VpdRegionOffset = vpdoffset - def PatchNVStoreDefaultMaxSize(self, maxsize): if not self.NVHeaderBuff: return "" diff --git a/BaseTools/Source/Python/AutoGen/ModuleAutoGen.py b/BaseTools/Source/Python/AutoGen/ModuleAutoGen.py index 65a2176ca982..aa0b71632e8e 100755 --- a/BaseTools/Source/Python/AutoGen/ModuleAutoGen.py +++ b/BaseTools/Source/Python/AutoGen/ModuleAutoGen.py @@ -254,7 +254,6 @@ def _InitWorker(self, Workspace, ModuleFile, Target, Toolchain, Arch, PlatformFi self.AutoGenDepSet = set() self.ReferenceModules = [] self.ConstPcd = {} - self.FileDependCache = {} def __init_platform_info__(self): pinfo = self.DataPipe.Get("P_Info") @@ -677,50 +676,6 @@ def BuildOption(self): self.BuildRuleOrder = ['.%s' % Ext for Ext in self.BuildRuleOrder.split()] return RetVal - ## Get include path list from tool option for the module build - # - # @retval list The include path list - # - @cached_property - def BuildOptionIncPathList(self): - # - # Regular expression for finding Include Directories, the difference between MSFT and INTEL/GCC - # is the former use /I , the Latter used -I to specify include directories - # - if self.PlatformInfo.ToolChainFamily in (TAB_COMPILER_MSFT): - BuildOptIncludeRegEx = gBuildOptIncludePatternMsft - elif self.PlatformInfo.ToolChainFamily in ('INTEL', 'GCC'): - BuildOptIncludeRegEx = gBuildOptIncludePatternOther - else: - # - # New ToolChainFamily, don't known whether there is option to specify include directories - # - return [] - - RetVal = [] - for Tool in ('CC', 'PP', 'VFRPP', 'ASLPP', 'ASLCC', 'APP', 'ASM'): - try: - FlagOption = self.BuildOption[Tool]['FLAGS'] - except KeyError: - FlagOption = '' - - IncPathList = [NormPath(Path, self.Macros) for Path in BuildOptIncludeRegEx.findall(FlagOption)] - - # - # EDK II modules must not reference header files outside of the packages they depend on or - # within the module's directory tree. Report error if violation. - # - if GlobalData.gDisableIncludePathCheck == False: - for Path in IncPathList: - if (Path not in self.IncludePathList) and (CommonPath([Path, self.MetaFile.Dir]) != self.MetaFile.Dir): - ErrMsg = "The include directory for the EDK II module in this line is invalid %s specified in %s FLAGS '%s'" % (Path, Tool, FlagOption) - EdkLogger.error("build", - PARAMETER_INVALID, - ExtraData=ErrMsg, - File=str(self.MetaFile)) - RetVal += IncPathList - return RetVal - ## Return a list of files which can be built from source # # What kind of files can be built is determined by build rules in diff --git a/BaseTools/Source/Python/AutoGen/PlatformAutoGen.py b/BaseTools/Source/Python/AutoGen/PlatformAutoGen.py index e80203dce22e..68b9d89062b8 100644 --- a/BaseTools/Source/Python/AutoGen/PlatformAutoGen.py +++ b/BaseTools/Source/Python/AutoGen/PlatformAutoGen.py @@ -251,7 +251,6 @@ def CollectVariables(self, DynamicPcdSet): VariableInfo = VariableMgr(self.DscBuildDataObj._GetDefaultStores(), self.DscBuildDataObj.SkuIds) VariableInfo.SetVpdRegionMaxSize(VpdRegionSize) - VariableInfo.SetVpdRegionOffset(VpdRegionBase) Index = 0 for Pcd in sorted(DynamicPcdSet): pcdname = ".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName)) @@ -990,11 +989,6 @@ def BuildOption(self): def _BuildOptionWithToolDef(self, ToolDef): return self._ExpandBuildOption(self.Platform.BuildOptions, ToolDef=ToolDef) - ## Return the build options specific for EDK modules in this platform - @cached_property - def EdkBuildOption(self): - return self._ExpandBuildOption(self.Platform.BuildOptions, EDK_NAME) - ## Return the build options specific for EDKII modules in this platform @cached_property def EdkIIBuildOption(self): diff --git a/BaseTools/Source/Python/AutoGen/UniClassObject.py b/BaseTools/Source/Python/AutoGen/UniClassObject.py index b16330e36825..6d62fa00c433 100644 --- a/BaseTools/Source/Python/AutoGen/UniClassObject.py +++ b/BaseTools/Source/Python/AutoGen/UniClassObject.py @@ -168,7 +168,6 @@ def Ucs2Search(name): class StringDefClassObject(object): def __init__(self, Name = None, Value = None, Referenced = False, Token = None, UseOtherLangDef = ''): self.StringName = '' - self.StringNameByteList = [] self.StringValue = '' self.StringValueByteList = '' self.Token = 0 @@ -178,7 +177,6 @@ def __init__(self, Name = None, Value = None, Referenced = False, Token = None, if Name is not None: self.StringName = Name - self.StringNameByteList = UniToHexList(Name) if Value is not None: self.StringValue = Value + u'\x00' # Add a NULL at string tail self.StringValueByteList = UniToHexList(self.StringValue) @@ -360,13 +358,6 @@ def GetStringObject(self, Item): Language = GetLanguageCode(Language, self.IsCompatibleMode, self.File) self.AddStringToList(Name, Language, Value) - # - # Get include file list and load them - # - def GetIncludeFile(self, Item, Dir): - FileName = Item[Item.find(u'#include ') + len(u'#include ') :Item.find(u' ', len(u'#include '))][1:-1] - self.LoadUniFile(FileName) - # # Pre-process before parse .uni file # @@ -600,26 +591,6 @@ def SetStringReferenced(self, Name): Item = self.OrderedStringList[Lang][ItemIndexInList] Item.Referenced = True - # - # Search the string in language definition by Name - # - def FindStringValue(self, Name, Lang): - if Name in self.OrderedStringDict[Lang]: - ItemIndexInList = self.OrderedStringDict[Lang][Name] - return self.OrderedStringList[Lang][ItemIndexInList] - - return None - - # - # Search the string in language definition by Token - # - def FindByToken(self, Token, Lang): - for Item in self.OrderedStringList[Lang]: - if Item.Token == Token: - return Item - - return None - # # Re-order strings and re-generate tokens # diff --git a/BaseTools/Source/Python/AutoGen/ValidCheckingInfoObject.py b/BaseTools/Source/Python/AutoGen/ValidCheckingInfoObject.py index ad8c9b598025..d4fc31a0b1fe 100644 --- a/BaseTools/Source/Python/AutoGen/ValidCheckingInfoObject.py +++ b/BaseTools/Source/Python/AutoGen/ValidCheckingInfoObject.py @@ -225,10 +225,8 @@ def __init__(self, VarOffset, data, PcdDataType): self.data = set() try: self.StorageWidth = MAX_SIZE_TYPE[self.PcdDataType] - self.ValidData = True except: self.StorageWidth = 0 - self.ValidData = False def __eq__(self, validObj): return validObj and self.VarOffset == validObj.VarOffset From f27b2c39d2cdab1a6db945082708731e328a30dd Mon Sep 17 00:00:00 2001 From: Pierre Gondois Date: Wed, 30 Apr 2025 14:31:24 +0200 Subject: [PATCH 04/15] BaseTools: build: Remove unnecessary code Running the vulture tool on the build folder gave the following report. Remove the unnecessary code. - build/build.py:79: unused function 'IsToolInPath' (60% confidence) - build/build.py:396: unused class 'PlatformMakeUnit' (60% confidence) - build/build.py:570: unused method 'GetErrorMessage' (60% confidence) - build/build.py:806: unused attribute 'HashSkipModules' (60% confidence) - build/build.py:807: unused attribute 'Db_Flag' (60% confidence) - build/build.py:1048: unused attribute 'Db_Flag' (60% confidence) - build/build.py:2500: unused method 'GetRealPathOfTool' (60% confidence) - build/BuildReport.py:1497: unused method 'StrtoHex' (60% confidence) Signed-off-by: Pierre Gondois --- BaseTools/Source/Python/build/BuildReport.py | 30 ----------- BaseTools/Source/Python/build/build.py | 55 -------------------- 2 files changed, 85 deletions(-) diff --git a/BaseTools/Source/Python/build/BuildReport.py b/BaseTools/Source/Python/build/BuildReport.py index 3c466596a2ac..1bf63a39b64c 100644 --- a/BaseTools/Source/Python/build/BuildReport.py +++ b/BaseTools/Source/Python/build/BuildReport.py @@ -1494,36 +1494,6 @@ def PrintStructureInfo(self, File, Struct): else: FileWrite(File, ' %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0])) - def StrtoHex(self, value): - try: - value = hex(int(value)) - return value - except: - if value.startswith("L\"") and value.endswith("\""): - valuelist = [] - for ch in value[2:-1]: - valuelist.append(hex(ord(ch))) - valuelist.append('0x00') - return valuelist - elif value.startswith("\"") and value.endswith("\""): - return hex(ord(value[1:-1])) - elif value.startswith("{") and value.endswith("}"): - valuelist = [] - if ',' not in value: - return value[1:-1] - for ch in value[1:-1].split(','): - ch = ch.strip() - if ch.startswith('0x') or ch.startswith('0X'): - valuelist.append(ch) - continue - try: - valuelist.append(hex(int(ch.strip()))) - except: - pass - return valuelist - else: - return value - def IsStructurePcd(self, PcdToken, PcdTokenSpaceGuid): if GlobalData.gStructurePcd and (self.Arch in GlobalData.gStructurePcd) and ((PcdToken, PcdTokenSpaceGuid) in GlobalData.gStructurePcd[self.Arch]): return True diff --git a/BaseTools/Source/Python/build/build.py b/BaseTools/Source/Python/build/build.py index a78d4e95013a..0ca71e5458c0 100755 --- a/BaseTools/Source/Python/build/build.py +++ b/BaseTools/Source/Python/build/build.py @@ -71,22 +71,6 @@ TemporaryTablePattern = re.compile(r'^_\d+_\d+_[a-fA-F0-9]+$') TmpTableDict = {} -## Check environment PATH variable to make sure the specified tool is found -# -# If the tool is found in the PATH, then True is returned -# Otherwise, False is returned -# -def IsToolInPath(tool): - if 'PATHEXT' in os.environ: - extns = os.environ['PATHEXT'].split(os.path.pathsep) - else: - extns = ('',) - for pathDir in os.environ['PATH'].split(os.path.pathsep): - for ext in extns: - if os.path.exists(os.path.join(pathDir, tool + ext)): - return True - return False - ## Check environment variables # # Check environment variables that must be set for build. Currently they are @@ -385,26 +369,6 @@ def __init__(self, Obj, BuildCommand,Target): if Target in [None, "", "all"]: self.Target = "tbuild" -## The smallest platform unit that can be built by nmake/make command in multi-thread build mode -# -# This class is for platform build by nmake/make build system. The "Obj" parameter -# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could -# be make units missing build. -# -# Currently the "Obj" should be only PlatformAutoGen object. -# -class PlatformMakeUnit(BuildUnit): - ## The constructor - # - # @param self The object pointer - # @param Obj The PlatformAutoGen object the build is working on - # @param Target The build target name, one of gSupportedTarget - # - def __init__(self, Obj, BuildCommand, Target): - Dependency = [ModuleMakeUnit(Lib, BuildCommand, Target) for Lib in self.BuildObject.LibraryAutoGenList] - Dependency.extend([ModuleMakeUnit(Mod, BuildCommand,Target) for Mod in self.BuildObject.ModuleAutoGenList]) - BuildUnit.__init__(self, Obj, BuildCommand, Target, Dependency, Obj.MakeFileDir) - ## The class representing the task of a module build or platform build # # This class manages the build tasks in multi-thread build mode. Its jobs include @@ -562,15 +526,6 @@ def Abort(): def HasError(): return BuildTask._ErrorFlag.is_set() - ## Get error message in running thread - # - # Since the main thread cannot catch exceptions in other thread, we have to - # use a static variable to communicate this message to main thread. - # - @staticmethod - def GetErrorMessage(): - return BuildTask._ErrorMessage - ## Factory method to create a BuildTask object # # This method will check if a module is building or has been built. And if @@ -803,8 +758,6 @@ def __init__(self, Target, WorkspaceDir, BuildOptions,log_q): self.LoadFixAddress = 0 self.UniFlag = BuildOptions.Flag self.BuildModules = [] - self.HashSkipModules = [] - self.Db_Flag = False self.LaunchPrebuildFlag = False self.PlatformBuildPath = os.path.join(GlobalData.gConfDirectory, '.cache', '.PlatformBuild') if BuildOptions.CommandLength: @@ -1045,7 +998,6 @@ def InitPreBuild(self): if 'PREBUILD' in GlobalData.gCommandLineDefines: self.Prebuild = GlobalData.gCommandLineDefines.get('PREBUILD') else: - self.Db_Flag = True Platform = self.Db.MapPlatform(str(self.PlatformFile)) self.Prebuild = str(Platform.Prebuild) if self.Prebuild: @@ -2495,13 +2447,6 @@ def CreateGuidedSectionToolsFile(self,Wa): print(' '.join(guidedSectionTool), file=toolsFile) toolsFile.close() - ## Returns the real path of the tool. - # - def GetRealPathOfTool (self, tool): - if os.path.exists(tool): - return os.path.realpath(tool) - return tool - ## Launch the module or platform build # def Launch(self): From 4556467428ee3ea589bd177e29aca3ecef0d8212 Mon Sep 17 00:00:00 2001 From: Pierre Gondois Date: Wed, 30 Apr 2025 14:31:47 +0200 Subject: [PATCH 05/15] BaseTools: Capsule: Remove unnecessary code Running the vulture tool on the Capsule folder gave the following report. Remove the unnecessary code. - Capsule/WindowsCapsuleSupportHelper.py:26: unused method 'RegisterHelpers' (60% confidence) Signed-off-by: Pierre Gondois --- .../Source/Python/Capsule/WindowsCapsuleSupportHelper.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/BaseTools/Source/Python/Capsule/WindowsCapsuleSupportHelper.py b/BaseTools/Source/Python/Capsule/WindowsCapsuleSupportHelper.py index a29ac21ae890..b82c51853edd 100644 --- a/BaseTools/Source/Python/Capsule/WindowsCapsuleSupportHelper.py +++ b/BaseTools/Source/Python/Capsule/WindowsCapsuleSupportHelper.py @@ -23,11 +23,6 @@ class WindowsCapsuleSupportHelper(object): - def RegisterHelpers(self, obj): - fp = os.path.abspath(__file__) - obj.Register("PackageWindowsCapsuleFiles", WindowsCapsuleSupportHelper.PackageWindowsCapsuleFiles, fp) - - @staticmethod def PackageWindowsCapsuleFiles(OutputFolder, ProductName, ProductFmpGuid, CapsuleVersion_DotString, CapsuleVersion_HexString, ProductFwProvider, ProductFwMfgName, ProductFwDesc, CapsuleFileName, PfxFile=None, PfxPass=None, From 3de6e36934128326e9fc183851786d5721828362 Mon Sep 17 00:00:00 2001 From: Pierre Gondois Date: Wed, 30 Apr 2025 14:35:54 +0200 Subject: [PATCH 06/15] BaseTools: Ecc: Remove unnecessary code Running the vulture tool on the Ecc folder gave the following report. Remove the unnecessary code. - Ecc/c.py:52: unused function 'GetConfig' (60% confidence) - Ecc/c.py:484: unused function 'GetFileModificationTimeFromDB' (60% confidence) - Ecc/CodeFragmentCollector.py:77: unused attribute '__Token' (60% confidence) - Ecc/CodeFragmentCollector.py:78: unused attribute '__SkippedChars' (60% confidence) - Ecc/CodeFragmentCollector.py:109: unused method '__EndOfLine' (60% confidence) - Ecc/CodeFragmentCollector.py:134: unused method '__UndoOneChar' (60% confidence) - Ecc/CodeFragmentCollector.py:221: unused method '__InsertComma' (60% confidence) - Ecc/CodeFragmentCollector.py:539: unused method 'PrintFragments' (60% confidence) - Ecc/Database.py:81: unused attribute 'text_factory' (60% confidence) - Ecc/Database.py:214: unused method 'UpdateIdentifierBelongsToFunction_disabled' (60% confidence) - Ecc/MetaFileWorkspace/MetaDataTable.py:142: unused method 'IsIntegral' (60% confidence) - Ecc/MetaFileWorkspace/MetaDataTable.py:205: unused method 'GetCrossIndex' (60% confidence) - Ecc/Xml/XmlRoutines.py:145: unused function 'XmlElementData' (60% confidence) - Ecc/Xml/XmlRoutines.py:162: unused function 'XmlElementList' (60% confidence) - Ecc/Xml/XmlRoutines.py:192: unused function 'XmlNodeName' (60% confidence) Signed-off-by: Pierre Gondois --- .../Python/Ecc/CodeFragmentCollector.py | 62 ------------------- BaseTools/Source/Python/Ecc/Database.py | 54 ---------------- .../Ecc/MetaFileWorkspace/MetaDataTable.py | 22 ------- .../Source/Python/Ecc/Xml/XmlRoutines.py | 47 -------------- BaseTools/Source/Python/Ecc/c.py | 15 ----- 5 files changed, 200 deletions(-) diff --git a/BaseTools/Source/Python/Ecc/CodeFragmentCollector.py b/BaseTools/Source/Python/Ecc/CodeFragmentCollector.py index d8d6aff08a6e..5bd3eec040ff 100644 --- a/BaseTools/Source/Python/Ecc/CodeFragmentCollector.py +++ b/BaseTools/Source/Python/Ecc/CodeFragmentCollector.py @@ -74,8 +74,6 @@ def __init__(self, FileName): self.CurrentLineNumber = 1 self.CurrentOffsetWithinLine = 0 self.TokenReleaceList = [] - self.__Token = "" - self.__SkippedChars = "" ## __EndOfFile() method # @@ -98,21 +96,6 @@ def __EndOfFile(self): else: return False - ## __EndOfLine() method - # - # Judge current buffer pos is at line end - # - # @param self The object pointer - # @retval True Current File buffer position is at line end - # @retval False Current File buffer position is NOT at line end - # - def __EndOfLine(self): - SizeOfCurrentLine = len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - if self.CurrentOffsetWithinLine >= SizeOfCurrentLine - 1: - return True - else: - return False - ## Rewind() method # # Reset file data buffer to the initial state @@ -123,25 +106,6 @@ def Rewind(self): self.CurrentLineNumber = 1 self.CurrentOffsetWithinLine = 0 - ## __UndoOneChar() method - # - # Go back one char in the file buffer - # - # @param self The object pointer - # @retval True Successfully go back one char - # @retval False Not able to go back one char as file beginning reached - # - def __UndoOneChar(self): - - if self.CurrentLineNumber == 1 and self.CurrentOffsetWithinLine == 0: - return False - elif self.CurrentOffsetWithinLine == 0: - self.CurrentLineNumber -= 1 - self.CurrentOffsetWithinLine = len(self.__CurrentLine()) - 1 - else: - self.CurrentOffsetWithinLine -= 1 - return True - ## __GetOneChar() method # # Move forward one char in the file buffer @@ -211,32 +175,6 @@ def __SetCharValue(self, Line, Offset, Value): def __CurrentLine(self): return self.Profile.FileLinesList[self.CurrentLineNumber - 1] - ## __InsertComma() method - # - # Insert ',' to replace PP - # - # @param self The object pointer - # @retval List current line contents - # - def __InsertComma(self, Line): - - - if self.Profile.FileLinesList[Line - 1][0] != T_CHAR_HASH: - BeforeHashPart = str(self.Profile.FileLinesList[Line - 1]).split(T_CHAR_HASH)[0] - if BeforeHashPart.rstrip().endswith(T_CHAR_COMMA) or BeforeHashPart.rstrip().endswith(';'): - return - - if Line - 2 >= 0 and str(self.Profile.FileLinesList[Line - 2]).rstrip().endswith(','): - return - - if Line - 2 >= 0 and str(self.Profile.FileLinesList[Line - 2]).rstrip().endswith(';'): - return - - if str(self.Profile.FileLinesList[Line]).lstrip().startswith(',') or str(self.Profile.FileLinesList[Line]).lstrip().startswith(';'): - return - - self.Profile.FileLinesList[Line - 1].insert(self.CurrentOffsetWithinLine, ',') - ## PreprocessFile() method # # Preprocess file contents, replace comments with spaces. diff --git a/BaseTools/Source/Python/Ecc/Database.py b/BaseTools/Source/Python/Ecc/Database.py index a5b70c52029b..f31dd933e89a 100644 --- a/BaseTools/Source/Python/Ecc/Database.py +++ b/BaseTools/Source/Python/Ecc/Database.py @@ -78,7 +78,6 @@ def InitDatabase(self, NewDatabase = True): self.Conn.execute("PRAGMA page_size=4096") self.Conn.execute("PRAGMA synchronous=OFF") # to avoid non-ascii character conversion error - self.Conn.text_factory = str self.Cur = self.Conn.cursor() self.TblDataModel = TableDataModel(self.Cur) @@ -206,59 +205,6 @@ def InsertOneFile(self, File): EdkLogger.verbose("Insert information from file %s ... DONE!" % File.FullPath) - ## UpdateIdentifierBelongsToFunction - # - # Update the field "BelongsToFunction" for each Identifier - # - # - def UpdateIdentifierBelongsToFunction_disabled(self): - EdkLogger.verbose("Update 'BelongsToFunction' for Identifiers started ...") - - SqlCommand = """select ID, BelongsToFile, StartLine, EndLine, Model from Identifier""" - EdkLogger.debug(4, "SqlCommand: %s" %SqlCommand) - self.Cur.execute(SqlCommand) - Records = self.Cur.fetchall() - for Record in Records: - IdentifierID = Record[0] - BelongsToFile = Record[1] - StartLine = Record[2] - EndLine = Record[3] - Model = Record[4] - - # - # Check whether an identifier belongs to a function - # - EdkLogger.debug(4, "For common identifiers ... ") - SqlCommand = """select ID from Function - where StartLine < %s and EndLine > %s - and BelongsToFile = %s""" % (StartLine, EndLine, BelongsToFile) - EdkLogger.debug(4, "SqlCommand: %s" %SqlCommand) - self.Cur.execute(SqlCommand) - IDs = self.Cur.fetchall() - for ID in IDs: - SqlCommand = """Update Identifier set BelongsToFunction = %s where ID = %s""" % (ID[0], IdentifierID) - EdkLogger.debug(4, "SqlCommand: %s" %SqlCommand) - self.Cur.execute(SqlCommand) - - # - # Check whether the identifier is a function header - # - EdkLogger.debug(4, "For function headers ... ") - if Model == DataClass.MODEL_IDENTIFIER_COMMENT: - SqlCommand = """select ID from Function - where StartLine = %s + 1 - and BelongsToFile = %s""" % (EndLine, BelongsToFile) - EdkLogger.debug(4, "SqlCommand: %s" %SqlCommand) - self.Cur.execute(SqlCommand) - IDs = self.Cur.fetchall() - for ID in IDs: - SqlCommand = """Update Identifier set BelongsToFunction = %s, Model = %s where ID = %s""" % (ID[0], DataClass.MODEL_IDENTIFIER_FUNCTION_HEADER, IdentifierID) - EdkLogger.debug(4, "SqlCommand: %s" %SqlCommand) - self.Cur.execute(SqlCommand) - - EdkLogger.verbose("Update 'BelongsToFunction' for Identifiers ... DONE") - - ## UpdateIdentifierBelongsToFunction # # Update the field "BelongsToFunction" for each Identifier diff --git a/BaseTools/Source/Python/Ecc/MetaFileWorkspace/MetaDataTable.py b/BaseTools/Source/Python/Ecc/MetaFileWorkspace/MetaDataTable.py index 1d7f6eb10434..552344e2c021 100644 --- a/BaseTools/Source/Python/Ecc/MetaFileWorkspace/MetaDataTable.py +++ b/BaseTools/Source/Python/Ecc/MetaFileWorkspace/MetaDataTable.py @@ -139,12 +139,6 @@ def Exec(self, SqlCommand): def SetEndFlag(self): pass - def IsIntegral(self): - Result = self.Exec("select min(ID) from %s" % (self.Table)) - if Result[0][0] != -1: - return False - return True - def GetAll(self): return self.Exec("select * from %s where ID > 0 order by ID" % (self.Table)) @@ -195,19 +189,3 @@ def InitTable(self): self.Insert(CrossIndex, Name, Description) EdkLogger.verbose("Initialize table DataModel ... DONE!") - ## Get CrossIndex - # - # Get a model's cross index from its name - # - # @param ModelName: Name of the model - # @retval CrossIndex: CrossIndex of the model - # - def GetCrossIndex(self, ModelName): - CrossIndex = -1 - SqlCommand = """select CrossIndex from DataModel where name = '""" + ModelName + """'""" - self.Cur.execute(SqlCommand) - for Item in self.Cur: - CrossIndex = Item[0] - - return CrossIndex - diff --git a/BaseTools/Source/Python/Ecc/Xml/XmlRoutines.py b/BaseTools/Source/Python/Ecc/Xml/XmlRoutines.py index b02f663b15a5..1428afef6d04 100644 --- a/BaseTools/Source/Python/Ecc/Xml/XmlRoutines.py +++ b/BaseTools/Source/Python/Ecc/Xml/XmlRoutines.py @@ -132,37 +132,6 @@ def XmlElement(Dom, String): except: return "" - -## Get a single XML element of the current node. -# -# Return a single XML element specified by the current root Dom. -# If the input Dom is not valid, then an empty string is returned. -# -# @param Dom The root XML DOM object. -# -# @revel Element An XML element in current root Dom. -# -def XmlElementData(Dom): - try: - return Dom.firstChild.data.strip() - except: - return "" - - -## Get a list of XML elements using XPath style syntax. -# -# Return a list of XML elements from the root Dom specified by XPath String. -# If the input Dom or String is not valid, then an empty list is returned. -# -# @param Dom The root XML DOM object. -# @param String A XPath style path. -# -# @revel Elements A list of XML elements matching XPath style Sting. -# -def XmlElementList(Dom, String): - return map(XmlElementData, XmlList(Dom, String)) - - ## Get the XML attribute of the current node. # # Return a single XML attribute named Attribute from the current root Dom. @@ -179,22 +148,6 @@ def XmlAttribute(Dom, Attribute): except: return '' - -## Get the XML node name of the current node. -# -# Return a single XML node name from the current root Dom. -# If the input Dom is not valid, then an empty string is returned. -# -# @param Dom The root XML DOM object. -# -# @revel Element A single XML element matching XPath style Sting. -# -def XmlNodeName(Dom): - try: - return Dom.nodeName.strip() - except: - return '' - ## Parse an XML file. # # Parse the input XML file named FileName and return a XML DOM it stands for. diff --git a/BaseTools/Source/Python/Ecc/c.py b/BaseTools/Source/Python/Ecc/c.py index 8e45c0730c46..a6b9076f9173 100644 --- a/BaseTools/Source/Python/Ecc/c.py +++ b/BaseTools/Source/Python/Ecc/c.py @@ -49,9 +49,6 @@ def GetTypedefFuncPointerPattern(): def GetDB(): return EccGlobalData.gDb -def GetConfig(): - return EccGlobalData.gConfig - def PrintErrorMsg(ErrorType, Msg, TableName, ItemId): Msg = Msg.replace('\n', '').replace('\r', '') MsgPartList = Msg.split() @@ -481,18 +478,6 @@ def GetFunctionList(): return FuncObjList -def GetFileModificationTimeFromDB(FullFileName): - TimeValue = 0.0 - Db = GetDB() - SqlStatement = """ select TimeStamp - from File - where FullPath = \'%s\' - """ % (FullFileName) - ResultSet = Db.TblFile.Exec(SqlStatement) - for Result in ResultSet: - TimeValue = Result[0] - return TimeValue - def CollectSourceCodeDataIntoDB(RootDir): FileObjList = [] tuple = os.walk(RootDir) From f2c824346886e40879d9ab97c79da863ea3a7e6a Mon Sep 17 00:00:00 2001 From: Pierre Gondois Date: Wed, 30 Apr 2025 14:40:15 +0200 Subject: [PATCH 07/15] BaseTools: Eot: Remove unnecessary code Running the vulture tool on the Eot folder gave the following report. Remove the unnecessary code. - Eot/CodeFragment.py:47: unused class 'AssignmentExpression' (60% confidence) - Eot/CodeFragmentCollector.py:75: unused attribute '__Token' (60% confidence) - Eot/CodeFragmentCollector.py:76: unused attribute '__SkippedChars' (60% confidence) - Eot/CodeFragmentCollector.py:104: unused method '__EndOfLine' (60% confidence) - Eot/CodeFragmentCollector.py:129: unused method '__UndoOneChar' (60% confidence) - Eot/CodeFragmentCollector.py:215: unused method '__InsertComma' (60% confidence) - Eot/Database.py:81: unused attribute 'text_factory' (60% confidence) - Eot/EotMain.py:1012: unused method 'SetFreeSpace' (60% confidence) - Eot/Identification.py:36: unused method 'GetFileFullPath' (60% confidence) - Eot/Identification.py:43: unused method 'GetFileRelativePath' (60% confidence) - Eot/Parser.py:119: unused function 'AddToGlobalMacro' (60% confidence) - Eot/Parser.py:238: unused function 'GetAllSourceFiles' (60% confidence) - Eot/Parser.py:257: unused function 'ParseConditionalStatementMacros' (60% confidence) - Eot/Parser.py:267: unused function 'GetAllFiles' (60% confidence) - Eot/Parser.py:291: unused function 'ParseConditionalStatement' (60% confidence) - Eot/Parser.py:367: unused function 'GetConditionalStatementStatus' (60% confidence) - Eot/Parser.py:722: unused function 'ConvertGuid' (60% confidence) - Eot/Parser.py:857: unused function 'ConvertGuid2' (60% confidence) - Eot/Report.py:161: unused method 'GeneratePpi' (60% confidence) - Eot/Report.py:173: unused method 'GenerateProtocol' (60% confidence) Signed-off-by: Pierre Gondois --- BaseTools/Source/Python/Eot/CodeFragment.py | 18 -- .../Python/Eot/CodeFragmentCollector.py | 63 ------ BaseTools/Source/Python/Eot/Database.py | 1 - BaseTools/Source/Python/Eot/EotMain.py | 3 - BaseTools/Source/Python/Eot/Identification.py | 14 -- BaseTools/Source/Python/Eot/Parser.py | 185 ------------------ BaseTools/Source/Python/Eot/Report.py | 24 --- 7 files changed, 308 deletions(-) diff --git a/BaseTools/Source/Python/Eot/CodeFragment.py b/BaseTools/Source/Python/Eot/CodeFragment.py index 94c3f52d5c20..ec73fc46ac7d 100644 --- a/BaseTools/Source/Python/Eot/CodeFragment.py +++ b/BaseTools/Source/Python/Eot/CodeFragment.py @@ -41,24 +41,6 @@ def __init__(self, Str, Begin, End): self.StartPos = Begin self.EndPos = End -## The description of assignment expression and start & end position -# -# -class AssignmentExpression : - ## The constructor - # - # @param self The object pointer - # @param Str The message to record - # @param Begin The start position tuple. - # @param End The end position tuple. - # - def __init__(self, Lvalue, Op, Exp, Begin, End): - self.Name = Lvalue - self.Operator = Op - self.Value = Exp - self.StartPos = Begin - self.EndPos = End - ## The description of predicate expression and start & end position # # diff --git a/BaseTools/Source/Python/Eot/CodeFragmentCollector.py b/BaseTools/Source/Python/Eot/CodeFragmentCollector.py index a5c1ceeaea32..a6827c0ce18e 100644 --- a/BaseTools/Source/Python/Eot/CodeFragmentCollector.py +++ b/BaseTools/Source/Python/Eot/CodeFragmentCollector.py @@ -72,9 +72,6 @@ def __init__(self, FileName): self.CurrentLineNumber = 1 self.CurrentOffsetWithinLine = 0 - self.__Token = "" - self.__SkippedChars = "" - ## __EndOfFile() method # # Judge current buffer pos is at file end @@ -93,21 +90,6 @@ def __EndOfFile(self): else: return False - ## __EndOfLine() method - # - # Judge current buffer pos is at line end - # - # @param self The object pointer - # @retval True Current File buffer position is at line end - # @retval False Current File buffer position is NOT at line end - # - def __EndOfLine(self): - SizeOfCurrentLine = len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - if self.CurrentOffsetWithinLine >= SizeOfCurrentLine - 1: - return True - else: - return False - ## Rewind() method # # Reset file data buffer to the initial state @@ -118,25 +100,6 @@ def Rewind(self): self.CurrentLineNumber = 1 self.CurrentOffsetWithinLine = 0 - ## __UndoOneChar() method - # - # Go back one char in the file buffer - # - # @param self The object pointer - # @retval True Successfully go back one char - # @retval False Not able to go back one char as file beginning reached - # - def __UndoOneChar(self): - - if self.CurrentLineNumber == 1 and self.CurrentOffsetWithinLine == 0: - return False - elif self.CurrentOffsetWithinLine == 0: - self.CurrentLineNumber -= 1 - self.CurrentOffsetWithinLine = len(self.__CurrentLine()) - 1 - else: - self.CurrentOffsetWithinLine -= 1 - return True - ## __GetOneChar() method # # Move forward one char in the file buffer @@ -205,32 +168,6 @@ def __SetCharValue(self, Line, Offset, Value): def __CurrentLine(self): return self.Profile.FileLinesList[self.CurrentLineNumber - 1] - ## __InsertComma() method - # - # Insert ',' to replace PP - # - # @param self The object pointer - # @retval List current line contents - # - def __InsertComma(self, Line): - - - if self.Profile.FileLinesList[Line - 1][0] != T_CHAR_HASH: - BeforeHashPart = str(self.Profile.FileLinesList[Line - 1]).split(T_CHAR_HASH)[0] - if BeforeHashPart.rstrip().endswith(T_CHAR_COMMA) or BeforeHashPart.rstrip().endswith(';'): - return - - if Line - 2 >= 0 and str(self.Profile.FileLinesList[Line - 2]).rstrip().endswith(','): - return - - if Line - 2 >= 0 and str(self.Profile.FileLinesList[Line - 2]).rstrip().endswith(';'): - return - - if str(self.Profile.FileLinesList[Line]).lstrip().startswith(',') or str(self.Profile.FileLinesList[Line]).lstrip().startswith(';'): - return - - self.Profile.FileLinesList[Line - 1].insert(self.CurrentOffsetWithinLine, ',') - ## PreprocessFileWithClear() method # # Run a preprocess for the file to clean all comments diff --git a/BaseTools/Source/Python/Eot/Database.py b/BaseTools/Source/Python/Eot/Database.py index fca08b96bbdf..64a72287e0d3 100644 --- a/BaseTools/Source/Python/Eot/Database.py +++ b/BaseTools/Source/Python/Eot/Database.py @@ -78,7 +78,6 @@ def InitDatabase(self, NewDatabase = True): self.Conn.execute("PRAGMA page_size=8192") self.Conn.execute("PRAGMA synchronous=OFF") # to avoid non-ascii character conversion error - self.Conn.text_factory = str self.Cur = self.Conn.cursor() self.TblDataModel = TableDataModel(self.Cur) diff --git a/BaseTools/Source/Python/Eot/EotMain.py b/BaseTools/Source/Python/Eot/EotMain.py index a5063782883d..e0b2ed2e70d4 100644 --- a/BaseTools/Source/Python/Eot/EotMain.py +++ b/BaseTools/Source/Python/Eot/EotMain.py @@ -1009,9 +1009,6 @@ def _Unpack(self): def Pack(self): pass - def SetFreeSpace(self, Size): - self.FreeSpace = Size - def _GetGuid(self): return gGuidStringFormat % self.Name diff --git a/BaseTools/Source/Python/Eot/Identification.py b/BaseTools/Source/Python/Eot/Identification.py index 31d47602e519..168a8865ca69 100644 --- a/BaseTools/Source/Python/Eot/Identification.py +++ b/BaseTools/Source/Python/Eot/Identification.py @@ -29,20 +29,6 @@ def __init__(self): def GetFileName(self, FileFullPath, FileRelativePath): pass - ## GetFileName - # - # Reserved - # - def GetFileFullPath(self, FileName, FileRelativePath): - pass - - ## GetFileName - # - # Reserved - # - def GetFileRelativePath(self, FileName, FileFullPath): - pass - ## # # This acts like the main() function for the script, unless it is 'import'ed into another diff --git a/BaseTools/Source/Python/Eot/Parser.py b/BaseTools/Source/Python/Eot/Parser.py index f204051d01f7..b0b3d0b6e1b3 100644 --- a/BaseTools/Source/Python/Eot/Parser.py +++ b/BaseTools/Source/Python/Eot/Parser.py @@ -109,17 +109,6 @@ def PreProcess(Filename, MergeMultipleLines = True, LineNo = -1): return Lines -## AddToGlobalMacro() method -# -# Add a macro to EotGlobalData.gMACRO -# -# @param Name: Name of the macro -# @param Value: Value of the macro -# -def AddToGlobalMacro(Name, Value): - Value = ReplaceMacro(Value, EotGlobalData.gMACRO, True) - EotGlobalData.gMACRO[Name] = Value - ## AddToSelfMacro() method # # Parse a line of macro definition and add it to a macro set @@ -238,139 +227,6 @@ def GetAllIncludeFiles(Db): return IncludeFileList -## GetAllSourceFiles() method -# -# Find all source files -# -# @param Db: Eot database -# -# @return SourceFileList: A list of source files -# -def GetAllSourceFiles(Db): - SourceFileList = [] - SqlCommand = """select distinct Value1 from Inf where Model = %s order by Value1""" % MODEL_EFI_SOURCE_FILE - RecordSet = Db.TblInf.Exec(SqlCommand) - - for Record in RecordSet: - SourceFileList.append(Record[0]) - - return SourceFileList - -## GetAllFiles() method -# -# Find all files, both source files and include files -# -# @param Db: Eot database -# -# @return FileList: A list of files -# -def GetAllFiles(Db): - FileList = [] - IncludeFileList = GetAllIncludeFiles(Db) - SourceFileList = GetAllSourceFiles(Db) - for Item in IncludeFileList: - if os.path.isfile(Item) and Item not in FileList: - FileList.append(Item) - for Item in SourceFileList: - if os.path.isfile(Item) and Item not in FileList: - FileList.append(Item) - - return FileList - -## ParseConditionalStatement() method -# -# Parse conditional statement -# -# @param Line: One line to be parsed -# @param Macros: A set of all macro -# @param StatusSet: A set of all status -# -# @retval True: Find keyword of conditional statement -# @retval False: Not find keyword of conditional statement -# -def ParseConditionalStatement(Line, Macros, StatusSet): - NewLine = Line.upper() - if NewLine.find(TAB_IF_EXIST.upper()) > -1: - IfLine = Line[NewLine.find(TAB_IF_EXIST) + len(TAB_IF_EXIST) + 1:].strip() - IfLine = ReplaceMacro(IfLine, EotGlobalData.gMACRO, True) - IfLine = ReplaceMacro(IfLine, Macros, True) - IfLine = IfLine.replace("\"", '') - IfLine = IfLine.replace("(", '') - IfLine = IfLine.replace(")", '') - Status = os.path.exists(os.path.normpath(IfLine)) - StatusSet.append([Status]) - return True - if NewLine.find(TAB_IF_DEF.upper()) > -1: - IfLine = Line[NewLine.find(TAB_IF_DEF) + len(TAB_IF_DEF) + 1:].strip() - Status = False - if IfLine in Macros or IfLine in EotGlobalData.gMACRO: - Status = True - StatusSet.append([Status]) - return True - if NewLine.find(TAB_IF_N_DEF.upper()) > -1: - IfLine = Line[NewLine.find(TAB_IF_N_DEF) + len(TAB_IF_N_DEF) + 1:].strip() - Status = False - if IfLine not in Macros and IfLine not in EotGlobalData.gMACRO: - Status = True - StatusSet.append([Status]) - return True - if NewLine.find(TAB_IF.upper()) > -1: - IfLine = Line[NewLine.find(TAB_IF) + len(TAB_IF) + 1:].strip() - Status = ParseConditionalStatementMacros(IfLine, Macros) - StatusSet.append([Status]) - return True - if NewLine.find(TAB_ELSE_IF.upper()) > -1: - IfLine = Line[NewLine.find(TAB_ELSE_IF) + len(TAB_ELSE_IF) + 1:].strip() - Status = ParseConditionalStatementMacros(IfLine, Macros) - StatusSet[-1].append(Status) - return True - if NewLine.find(TAB_ELSE.upper()) > -1: - Status = False - for Item in StatusSet[-1]: - Status = Status or Item - StatusSet[-1].append(not Status) - return True - if NewLine.find(TAB_END_IF.upper()) > -1: - StatusSet.pop() - return True - - return False - -## ParseConditionalStatement() method -# -# Parse conditional statement with Macros -# -# @param Line: One line to be parsed -# @param Macros: A set of macros -# -# @return Line: New line after replacing macros -# -def ParseConditionalStatementMacros(Line, Macros): - if Line.upper().find('DEFINED(') > -1 or Line.upper().find('EXIST') > -1: - return False - Line = ReplaceMacro(Line, EotGlobalData.gMACRO, True) - Line = ReplaceMacro(Line, Macros, True) - Line = Line.replace("&&", "and") - Line = Line.replace("||", "or") - return eval(Line) - -## GetConditionalStatementStatus() method -# -# 1. Assume the latest status as True -# 2. Pop the top status of status set, previous status -# 3. Compare the latest one and the previous one and get new status -# -# @param StatusSet: A set of all status -# -# @return Status: The final status -# -def GetConditionalStatementStatus(StatusSet): - Status = True - for Item in StatusSet: - Status = Status and Item[-1] - - return Status - ## SearchBelongsToFunction() method # # Search all functions belong to the file @@ -819,47 +675,6 @@ def ParseMapFile(Files): return AllMaps -## ConvertGuid -# -# Convert a GUID to a GUID with all upper letters -# -# @param guid: The GUID to be converted -# -# @param newGuid: The GUID with all upper letters. -# -def ConvertGuid(guid): - numList = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] - newGuid = '' - if guid.startswith('g'): - guid = guid[1:] - for i in guid: - if i.upper() == i and i not in numList: - newGuid = newGuid + ('_' + i) - else: - newGuid = newGuid + i.upper() - if newGuid.startswith('_'): - newGuid = newGuid[1:] - if newGuid.endswith('_'): - newGuid = newGuid[:-1] - - return newGuid - -## ConvertGuid2() method -# -# Convert a GUID to a GUID with new string instead of old string -# -# @param guid: The GUID to be converted -# @param old: Old string to be replaced -# @param new: New string to replace the old one -# -# @param newGuid: The GUID after replacement -# -def ConvertGuid2(guid, old, new): - newGuid = ConvertGuid(guid) - newGuid = newGuid.replace(old, new) - - return newGuid - ## # # This acts like the main() function for the script, unless it is 'import'ed into another diff --git a/BaseTools/Source/Python/Eot/Report.py b/BaseTools/Source/Python/Eot/Report.py index 9d99fe22a0f1..32af504f1342 100644 --- a/BaseTools/Source/Python/Eot/Report.py +++ b/BaseTools/Source/Python/Eot/Report.py @@ -149,30 +149,6 @@ def GenerateDepex(self, DepexString): """ % (DepexString) self.WriteLn(Content) - ## GeneratePpi() method - # - # Generate PPI information - # - # @param self: The object pointer - # @param Name: CName of a GUID - # @param Guid: Value of a GUID - # @param Type: Type of a GUID - # - def GeneratePpi(self, Name, Guid, Type): - self.GeneratePpiProtocol('Ppi', Name, Guid, Type, self.PpiIndex) - - ## GenerateProtocol() method - # - # Generate PROTOCOL information - # - # @param self: The object pointer - # @param Name: CName of a GUID - # @param Guid: Value of a GUID - # @param Type: Type of a GUID - # - def GenerateProtocol(self, Name, Guid, Type): - self.GeneratePpiProtocol('Protocol', Name, Guid, Type, self.ProtocolIndex) - ## GeneratePpiProtocol() method # # Generate PPI/PROTOCOL information From 98c2d61865562c29c63a4f8a6a1095cb19b7d845 Mon Sep 17 00:00:00 2001 From: Pierre Gondois Date: Wed, 30 Apr 2025 14:54:22 +0200 Subject: [PATCH 08/15] BaseTools: FMTT: Remove unnecessary code Running the vulture tool on the FMTT folder gave the following report. Remove the unnecessary code. - FMMT/core/BinaryFactoryProduct.py:132: unused attribute 'ROffset' (60% confidence) - FMMT/core/BinaryFactoryProduct.py:176: unused attribute 'ROffset' (60% confidence) - FMMT/core/BinaryFactoryProduct.py:229: unused attribute 'ROffset' (60% confidence) - FMMT/core/BinaryFactoryProduct.py:365: unused class 'ElfSectionProduct' (60% confidence) - FMMT/core/BinaryFactoryProduct.py:369: unused method 'ParserSectionData' (60% confidence) - FMMT/core/BinaryFactoryProduct.py:371: unused method 'ParserProgramData' (60% confidence) - FMMT/core/BiosTree.py:76: unused method 'insertRel' (60% confidence) - FMMT/core/BiosTree.py:84: unused method 'deleteNode' (60% confidence) - FMMT/core/BiosTreeNode.py:63: unused attribute 'ROffset' (60% confidence) - FMMT/core/BiosTreeNode.py:125: unused attribute 'ROffset' (60% confidence) - FMMT/core/BiosTreeNode.py:182: unused attribute 'ROffset' (60% confidence) - FMMT/core/BiosTreeNode.py:185: unused attribute 'SectionMaxAlignment' (60% confidence) - FMMT/core/BiosTreeNode.py:220: unused attribute 'ROffset' (60% confidence) - FMMT/core/BiosTreeNode.py:223: unused attribute 'OriHeader' (60% confidence) - FMMT/core/BiosTreeNode.py:226: unused attribute 'SectionMaxAlignment' (60% confidence) - FMMT/core/BiosTreeNode.py:247: unused attribute 'ROffset' (60% confidence) - FMMT/FMMT.py:52: unused attribute 'firmware_packet' (60% confidence) Signed-off-by: Pierre Gondois --- BaseTools/Source/Python/FMMT/FMMT.py | 2 +- .../Python/FMMT/core/BinaryFactoryProduct.py | 12 -------- BaseTools/Source/Python/FMMT/core/BiosTree.py | 30 ------------------- .../Source/Python/FMMT/core/BiosTreeNode.py | 8 ----- 4 files changed, 1 insertion(+), 51 deletions(-) diff --git a/BaseTools/Source/Python/FMMT/FMMT.py b/BaseTools/Source/Python/FMMT/FMMT.py index 7505b6c88a04..d4fa07a984ce 100644 --- a/BaseTools/Source/Python/FMMT/FMMT.py +++ b/BaseTools/Source/Python/FMMT/FMMT.py @@ -49,7 +49,7 @@ def print_banner(): class FMMT(): def __init__(self) -> None: - self.firmware_packet = {} + pass def SetConfigFilePath(self, configfilepath:str) -> str: os.environ['FmmtConfPath'] = os.path.abspath(configfilepath) diff --git a/BaseTools/Source/Python/FMMT/core/BinaryFactoryProduct.py b/BaseTools/Source/Python/FMMT/core/BinaryFactoryProduct.py index 7ebe11d00101..9dd717cfcf16 100644 --- a/BaseTools/Source/Python/FMMT/core/BinaryFactoryProduct.py +++ b/BaseTools/Source/Python/FMMT/core/BinaryFactoryProduct.py @@ -129,7 +129,6 @@ def ParserSection(self, ParTree, Whole_Data: bytes, Rel_Whole_Offset: int=0) -> Section_Info.Data = Whole_Data[Rel_Offset+Section_Info.HeaderLength: Rel_Offset+Section_Info.Size] Section_Info.DOffset = Section_Offset + Section_Info.HeaderLength + Rel_Whole_Offset Section_Info.HOffset = Section_Offset + Rel_Whole_Offset - Section_Info.ROffset = Rel_Offset if Section_Info.Header.Type == 0: break # The final Section in parent Section does not need to add padding, else must be 4-bytes align with parent Section start offset @@ -173,7 +172,6 @@ def ParserData(self, ParTree, Whole_Data: bytes, Rel_Whole_Offset: int=0) -> Non Section_Info.Data = Whole_Data[Rel_Offset+Section_Info.HeaderLength: Rel_Offset+Section_Info.Size] Section_Info.DOffset = Section_Offset + Section_Info.HeaderLength + Rel_Whole_Offset Section_Info.HOffset = Section_Offset + Rel_Whole_Offset - Section_Info.ROffset = Rel_Offset if Section_Info.Header.Type == 0: break # The final Section in Ffs does not need to add padding, else must be 4-bytes align with Ffs start offset @@ -226,7 +224,6 @@ def ParserData(self, ParTree, Whole_Data: bytes, Rel_Whole_Offset: int=0) -> Non Ffs_Tree = BIOSTREE(Ffs_Info.Name) Ffs_Info.HOffset = Ffs_Offset + Rel_Whole_Offset Ffs_Info.DOffset = Ffs_Offset + Ffs_Info.Header.HeaderLength + Rel_Whole_Offset - Ffs_Info.ROffset = Rel_Offset if Ffs_Info.Name == PADVECTOR: Ffs_Tree.type = FFS_PAD Ffs_Info.Data = Whole_Data[Rel_Offset+Ffs_Info.Header.HeaderLength: Rel_Offset+Ffs_Info.Size] @@ -362,15 +359,6 @@ def GetFvFromFd(self, whole_data: bytes=b'') -> list: tmp_index += 1 return Fd_Struct -class ElfSectionProduct(BinaryProduct): - ## Decompress the compressed section. - def ParserData(self, Section_Tree, whole_Data: bytes, Rel_Whole_Offset: int=0) -> None: - pass - def ParserSectionData(self, Section_Tree, whole_Data: bytes, Rel_Whole_Offset: int=0) -> None: - pass - def ParserProgramData(self, Section_Tree, whole_Data: bytes, Rel_Whole_Offset: int=0) -> None: - pass - class ElfProduct(BinaryProduct): def ParserData(self, ParTree, Whole_Data: bytes, Rel_Whole_Offset: int=0) -> None: diff --git a/BaseTools/Source/Python/FMMT/core/BiosTree.py b/BaseTools/Source/Python/FMMT/core/BiosTree.py index c5a7b017f4b1..d99f964e64c2 100644 --- a/BaseTools/Source/Python/FMMT/core/BiosTree.py +++ b/BaseTools/Source/Python/FMMT/core/BiosTree.py @@ -72,36 +72,6 @@ def insertChild(self, newNode, pos: int=None) -> None: self.Child.insert(pos, newNode) newNode.Parent = self - # lastNode.insertRel(newNode) - def insertRel(self, newNode) -> None: - if self.Parent: - parentTree = self.Parent - new_index = parentTree.Child.index(self) + 1 - parentTree.Child.insert(new_index, newNode) - self.NextRel = newNode - newNode.LastRel = self - - def deleteNode(self, deletekey: str) -> None: - FindStatus, DeleteTree = self.FindNode(deletekey) - if FindStatus: - parentTree = DeleteTree.Parent - lastTree = DeleteTree.LastRel - nextTree = DeleteTree.NextRel - if parentTree: - index = parentTree.Child.index(DeleteTree) - del parentTree.Child[index] - if lastTree and nextTree: - lastTree.NextRel = nextTree - nextTree.LastRel = lastTree - elif lastTree: - lastTree.NextRel = None - elif nextTree: - nextTree.LastRel = None - return DeleteTree - else: - logger.error('Could not find the target tree') - return None - def FindNode(self, key: str, Findlist: list) -> None: if self.key == key or (self.Data and self.Data.Name == key) or (self.type == FFS_TREE and self.Data.UiName == key): Findlist.append(self) diff --git a/BaseTools/Source/Python/FMMT/core/BiosTreeNode.py b/BaseTools/Source/Python/FMMT/core/BiosTreeNode.py index 5ca4c20dc67a..92611f8ce99d 100644 --- a/BaseTools/Source/Python/FMMT/core/BiosTreeNode.py +++ b/BaseTools/Source/Python/FMMT/core/BiosTreeNode.py @@ -60,7 +60,6 @@ def __init__(self, buffer: bytes) -> None: self.HeaderLength = len(struct2stream(self.Header)) self.HOffset = 0 self.DOffset = 0 - self.ROffset = 0 self.Data = b'' self.PadData = b'' self.Upld_Info_Align = False @@ -122,7 +121,6 @@ def __init__(self, name, buffer: bytes) -> None: self.HeaderLength = self.Header.HeaderLength self.HOffset = 0 self.DOffset = 0 - self.ROffset = 0 self.Data = b'' if self.Header.Signature != 1213613663: logger.error('Invalid Fv Header! Fv {} signature {} is not "_FVH".'.format(struct2stream(self.Header), self.Header.Signature)) @@ -179,10 +177,8 @@ def __init__(self, buffer: bytes) -> None: self.HeaderLength = self.Header.HeaderLength self.HOffset = 0 self.DOffset = 0 - self.ROffset = 0 self.Data = b'' self.PadData = b'' - self.SectionMaxAlignment = SECTION_COMMON_ALIGNMENT # 4-align def ModCheckSum(self) -> None: HeaderData = struct2stream(self.Header) @@ -217,13 +213,10 @@ def __init__(self, buffer: bytes) -> None: self.Type = self.Header.Type self.HOffset = 0 self.DOffset = 0 - self.ROffset = 0 self.Data = b'' self.OriData = b'' - self.OriHeader = b'' self.PadData = b'' self.IsPadSection = False - self.SectionMaxAlignment = SECTION_COMMON_ALIGNMENT # 4-align def GetExtHeader(self, Type: int, buffer: bytes, nums: int=0) -> None: if Type == 0x01: @@ -244,5 +237,4 @@ def __init__(self, buffer: bytes) -> None: self.Size = len(buffer) self.HOffset = 0 self.DOffset = 0 - self.ROffset = 0 self.PadData = b'' From 254cf0d0c9f2466573017289cdeaf6ede84a14e2 Mon Sep 17 00:00:00 2001 From: Pierre Gondois Date: Wed, 30 Apr 2025 14:53:32 +0200 Subject: [PATCH 09/15] BaseTools: GenFds: Remove unnecessary code Running the vulture tool on the GenFds folder gave the following report. Remove the unnecessary code. - GenFds/Fd.py:146: unused method 'GenFlashMap' (60% confidence) - GenFds/FdfParser.py:1539: unused attribute 'BaseAddressPcd' (60% confidence) - GenFds/FdfParser.py:1556: unused attribute 'SizePcd' (60% confidence) - GenFds/FdfParser.py:2394: unused attribute 'CurrentLineNum' (60% confidence) - GenFds/FdfParser.py:2395: unused attribute 'CurrentLineContent' (60% confidence) - GenFds/FdfParser.py:2653: unused attribute 'CurrentLineNum' (60% confidence) - GenFds/FdfParser.py:2654: unused attribute 'CurrentLineContent' (60% confidence) - GenFds/FdfParser.py:3229: unused attribute 'CreateFile' (60% confidence) - GenFds/FfsFileStatement.py:36: unused attribute 'CurrentLineNum' (60% confidence) - GenFds/FfsFileStatement.py:37: unused attribute 'CurrentLineContent' (60% confidence) - GenFds/FfsInfStatement.py:68: unused attribute 'CurrentLineNum' (60% confidence) - GenFds/FfsInfStatement.py:69: unused attribute 'CurrentLineContent' (60% confidence) - GenFds/GenFds.py:68: unused attribute 'LibDir' (60% confidence) - GenFds/GenFds.py:557: unused method 'GetFvBlockSize' (60% confidence) Signed-off-by: Pierre Gondois --- BaseTools/Source/Python/GenFds/Fd.py | 13 ------- BaseTools/Source/Python/GenFds/FdfParser.py | 9 ----- .../Source/Python/GenFds/FfsFileStatement.py | 2 -- .../Source/Python/GenFds/FfsInfStatement.py | 2 -- BaseTools/Source/Python/GenFds/GenFds.py | 36 ------------------- .../Python/GenFds/GenFdsGlobalVariable.py | 1 - 6 files changed, 63 deletions(-) diff --git a/BaseTools/Source/Python/GenFds/Fd.py b/BaseTools/Source/Python/GenFds/Fd.py index 973936b6f273..c21453adccf2 100644 --- a/BaseTools/Source/Python/GenFds/Fd.py +++ b/BaseTools/Source/Python/GenFds/Fd.py @@ -139,17 +139,4 @@ def GenFd (self, Flag = False): GenFdsGlobalVariable.ImageBinDict[self.FdUiName.upper() + 'fd'] = FdFileName return FdFileName - ## generate flash map file - # - # @param self The object pointer - # - def GenFlashMap (self): - pass - - - - - - - diff --git a/BaseTools/Source/Python/GenFds/FdfParser.py b/BaseTools/Source/Python/GenFds/FdfParser.py index c821494d0d82..63e73a78bfa6 100644 --- a/BaseTools/Source/Python/GenFds/FdfParser.py +++ b/BaseTools/Source/Python/GenFds/FdfParser.py @@ -1536,7 +1536,6 @@ def _GetTokenStatements(self, Obj): if self._IsToken(TAB_VALUE_SPLIT): pcdPair = self._GetNextPcdSettings() - Obj.BaseAddressPcd = pcdPair self.Profile.PcdDict[pcdPair] = Obj.BaseAddress self.SetPcdLocalation(pcdPair) FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber) @@ -1553,7 +1552,6 @@ def _GetTokenStatements(self, Obj): Size = self._Token if self._IsToken(TAB_VALUE_SPLIT): pcdPair = self._GetNextPcdSettings() - Obj.SizePcd = pcdPair self.Profile.PcdDict[pcdPair] = Size self.SetPcdLocalation(pcdPair) FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber) @@ -2391,9 +2389,6 @@ def _ParseInfStatement(self): if not ffsInf.InfFileName.endswith('.inf'): raise Warning.Expected(".inf file path", self.FileName, self.CurrentLineNumber) - ffsInf.CurrentLineNum = self.CurrentLineNumber - ffsInf.CurrentLineContent = self._CurrentLine() - #Replace $(SAPCE) with real space ffsInf.InfFileName = ffsInf.InfFileName.replace('$(SPACE)', ' ') @@ -2650,8 +2645,6 @@ def _GetFilePart(self, FfsFileObj): self._GetRAWData(FfsFileObj) else: - FfsFileObj.CurrentLineNum = self.CurrentLineNumber - FfsFileObj.CurrentLineContent = self._CurrentLine() FfsFileObj.FileName = self._Token.replace('$(SPACE)', ' ') self._VerifyFile(FfsFileObj.FileName) @@ -3226,8 +3219,6 @@ def _GetCapsule(self): if not self._GetNextToken(): raise Warning.Expected("file name", self.FileName, self.CurrentLineNumber) - CapsuleObj.CreateFile = self._Token - self._GetCapsuleStatements(CapsuleObj) self.Profile.CapsuleDict[CapsuleObj.UiCapsuleName] = CapsuleObj return True diff --git a/BaseTools/Source/Python/GenFds/FfsFileStatement.py b/BaseTools/Source/Python/GenFds/FfsFileStatement.py index 1c6e59bac75c..e1eb75d204b7 100644 --- a/BaseTools/Source/Python/GenFds/FfsFileStatement.py +++ b/BaseTools/Source/Python/GenFds/FfsFileStatement.py @@ -33,8 +33,6 @@ class FileStatement (FileStatementClassObject): # def __init__(self): FileStatementClassObject.__init__(self) - self.CurrentLineNum = None - self.CurrentLineContent = None self.FileName = None self.InfFileName = None self.SubAlignment = None diff --git a/BaseTools/Source/Python/GenFds/FfsInfStatement.py b/BaseTools/Source/Python/GenFds/FfsInfStatement.py index 4e26a5af9db7..6c837accee0a 100644 --- a/BaseTools/Source/Python/GenFds/FfsInfStatement.py +++ b/BaseTools/Source/Python/GenFds/FfsInfStatement.py @@ -65,8 +65,6 @@ def __init__(self): self.PiSpecVersion = '0x00000000' self.InfModule = None self.FinalTargetSuffixMap = {} - self.CurrentLineNum = None - self.CurrentLineContent = None self.FileName = None self.InfFileName = None self.OverrideGuid = None diff --git a/BaseTools/Source/Python/GenFds/GenFds.py b/BaseTools/Source/Python/GenFds/GenFds.py index 885e01e9cc9b..fe53d6b160e1 100644 --- a/BaseTools/Source/Python/GenFds/GenFds.py +++ b/BaseTools/Source/Python/GenFds/GenFds.py @@ -65,7 +65,6 @@ def resetFdsGlobalVariable(): # will be FvDir + os.sep + 'Ffs' GenFdsGlobalVariable.FfsDir = '' GenFdsGlobalVariable.FdfParser = None - GenFdsGlobalVariable.LibDir = '' GenFdsGlobalVariable.WorkSpace = None GenFdsGlobalVariable.WorkSpaceDir = '' GenFdsGlobalVariable.ConfDir = '' @@ -549,41 +548,6 @@ def GenFfsMakefile(OutputDir, FdfParserObject, WorkSpace, ArchList, GlobalData): return GenFdsGlobalVariable.FfsCmdDict - ## GetFvBlockSize() - # - # @param FvObj Whose block size to get - # @retval int Block size value - # - @staticmethod - def GetFvBlockSize(FvObj): - DefaultBlockSize = 0x1 - FdObj = None - if GenFds.OnlyGenerateThisFd is not None and GenFds.OnlyGenerateThisFd.upper() in GenFdsGlobalVariable.FdfParser.Profile.FdDict: - FdObj = GenFdsGlobalVariable.FdfParser.Profile.FdDict[GenFds.OnlyGenerateThisFd.upper()] - if FdObj is None: - for ElementFd in GenFdsGlobalVariable.FdfParser.Profile.FdDict.values(): - for ElementRegion in ElementFd.RegionList: - if ElementRegion.RegionType == BINARY_FILE_TYPE_FV: - for ElementRegionData in ElementRegion.RegionDataList: - if ElementRegionData is not None and ElementRegionData.upper() == FvObj.UiFvName: - if FvObj.BlockSizeList != []: - return FvObj.BlockSizeList[0][0] - else: - return ElementRegion.BlockSizeOfRegion(ElementFd.BlockSizeList) - if FvObj.BlockSizeList != []: - return FvObj.BlockSizeList[0][0] - return DefaultBlockSize - else: - for ElementRegion in FdObj.RegionList: - if ElementRegion.RegionType == BINARY_FILE_TYPE_FV: - for ElementRegionData in ElementRegion.RegionDataList: - if ElementRegionData is not None and ElementRegionData.upper() == FvObj.UiFvName: - if FvObj.BlockSizeList != []: - return FvObj.BlockSizeList[0][0] - else: - return ElementRegion.BlockSizeOfRegion(ElementFd.BlockSizeList) - return DefaultBlockSize - ## DisplayFvSpaceInfo() # # @param FvObj Whose block size to get diff --git a/BaseTools/Source/Python/GenFds/GenFdsGlobalVariable.py b/BaseTools/Source/Python/GenFds/GenFdsGlobalVariable.py index d7668ba681aa..4e014bf84b98 100644 --- a/BaseTools/Source/Python/GenFds/GenFdsGlobalVariable.py +++ b/BaseTools/Source/Python/GenFds/GenFdsGlobalVariable.py @@ -44,7 +44,6 @@ class GenFdsGlobalVariable: # will be FvDir + os.sep + 'Ffs' FfsDir = '' FdfParser = None - LibDir = '' WorkSpace = None WorkSpaceDir = '' ConfDir = '' From 11f873ef3ecd9ff095cde875a027a6505c32740f Mon Sep 17 00:00:00 2001 From: Pierre Gondois Date: Wed, 30 Apr 2025 14:59:09 +0200 Subject: [PATCH 10/15] BaseTools: Table: Remove unnecessary code Running the vulture tool on the Table folder gave the following report. Remove the unnecessary code. - Table/Table.py:88: unused method 'GenerateID' (60% confidence) - Table/TableDataModel.py:83: unused method 'GetCrossIndex' (60% confidence) - Table/TableEotReport.py:66: unused method 'GetMaxID' (60% confidence) Signed-off-by: Pierre Gondois --- BaseTools/Source/Python/Table/Table.py | 14 -------------- BaseTools/Source/Python/Table/TableDataModel.py | 16 ---------------- BaseTools/Source/Python/Table/TableEotReport.py | 6 ------ 3 files changed, 36 deletions(-) diff --git a/BaseTools/Source/Python/Table/Table.py b/BaseTools/Source/Python/Table/Table.py index 7a60313e9524..47ebea683523 100644 --- a/BaseTools/Source/Python/Table/Table.py +++ b/BaseTools/Source/Python/Table/Table.py @@ -77,20 +77,6 @@ def GetCount(self): for Item in self.Cur: return Item[0] - ## Generate ID - # - # Generate an ID if input ID is -1 - # - # @param ID: Input ID - # - # @retval ID: New generated ID - # - def GenerateID(self, ID): - if ID == -1: - self.ID = self.ID + 1 - - return self.ID - ## Init the ID of the table # # Init the ID of the table diff --git a/BaseTools/Source/Python/Table/TableDataModel.py b/BaseTools/Source/Python/Table/TableDataModel.py index 3855807452f7..7d1d7ed3db5b 100644 --- a/BaseTools/Source/Python/Table/TableDataModel.py +++ b/BaseTools/Source/Python/Table/TableDataModel.py @@ -72,19 +72,3 @@ def InitTable(self): Description = Item[0] self.Insert(CrossIndex, Name, Description) EdkLogger.verbose("Initialize table DataModel ... DONE!") - - ## Get CrossIndex - # - # Get a model's cross index from its name - # - # @param ModelName: Name of the model - # @retval CrossIndex: CrossIndex of the model - # - def GetCrossIndex(self, ModelName): - CrossIndex = -1 - SqlCommand = """select CrossIndex from DataModel where name = '""" + ModelName + """'""" - self.Cur.execute(SqlCommand) - for Item in self.Cur: - CrossIndex = Item[0] - - return CrossIndex diff --git a/BaseTools/Source/Python/Table/TableEotReport.py b/BaseTools/Source/Python/Table/TableEotReport.py index abd433ec8ef1..db6660f57afe 100644 --- a/BaseTools/Source/Python/Table/TableEotReport.py +++ b/BaseTools/Source/Python/Table/TableEotReport.py @@ -62,9 +62,3 @@ def Insert(self, ModuleID = -1, ModuleName = '', ModuleGuid = '', SourceFileID = % (self.Table, self.ID, ModuleID, ModuleName, ModuleGuid, SourceFileID, SourceFileFullPath, \ ItemName, ItemType, ItemMode, GuidName, GuidMacro, GuidValue, BelongsToFunction, Enabled) Table.Insert(self, SqlCommand) - - def GetMaxID(self): - SqlCommand = """select max(ID) from %s""" % self.Table - self.Cur.execute(SqlCommand) - for Item in self.Cur: - return Item[0] From 6431e475ed37588b0dfab69e60954b73078fe37f Mon Sep 17 00:00:00 2001 From: Pierre Gondois Date: Wed, 30 Apr 2025 14:48:04 +0200 Subject: [PATCH 11/15] BaseTools: UPT/Parser: Remove unnecessary code Running the vulture tool on the UPT/Parser folder gave the following report. Remove the unnecessary code. - UPT/Parser/InfAsBuiltProcess.py:223: unused function 'GetInfsFromWorkSpace' (60% confidence) - UPT/Parser/InfAsBuiltProcess.py:237: unused function 'GetGuidVerFormLibInstance' (60% confidence) - UPT/Parser/InfParserMisc.py:211: unused attribute 'InfPeiDepexSection' (60% confidence) - UPT/Parser/InfParserMisc.py:212: unused attribute 'InfDxeDepexSection' (60% confidence) - UPT/Parser/InfParserMisc.py:213: unused attribute 'InfSmmDepexSection' (60% confidence) - UPT/Parser/InfSectionParser.py:231: unused attribute 'InfPeiDepexSection' (60% confidence) - UPT/Parser/InfSectionParser.py:232: unused attribute 'InfDxeDepexSection' (60% confidence) - UPT/Parser/InfSectionParser.py:233: unused attribute 'InfSmmDepexSection' (60% confidence) Signed-off-by: Pierre Gondois --- .../Python/UPT/Parser/InfAsBuiltProcess.py | 66 ------------------- .../Source/Python/UPT/Parser/InfParserMisc.py | 3 - .../Python/UPT/Parser/InfSectionParser.py | 3 - 3 files changed, 72 deletions(-) diff --git a/BaseTools/Source/Python/UPT/Parser/InfAsBuiltProcess.py b/BaseTools/Source/Python/UPT/Parser/InfAsBuiltProcess.py index fb646191b186..fd0795e32661 100644 --- a/BaseTools/Source/Python/UPT/Parser/InfAsBuiltProcess.py +++ b/BaseTools/Source/Python/UPT/Parser/InfAsBuiltProcess.py @@ -215,69 +215,3 @@ def GetFileLineContent(FileName, WorkSpace, LineNo, OriginalString): FileLinesList = ProcessLineExtender(FileLinesList) return FileLinesList - -## -# Get all INF files from current workspace -# -# -def GetInfsFromWorkSpace(WorkSpace): - InfFiles = [] - for top, dirs, files in os.walk(WorkSpace): - dirs = dirs # just for pylint - for File in files: - if File.upper().endswith(".INF"): - InfFiles.append(os.path.join(top, File)) - - return InfFiles - -## -# Get GUID and version from library instance file -# -# -def GetGuidVerFormLibInstance(Guid, Version, WorkSpace, CurrentInfFileName): - for InfFile in GetInfsFromWorkSpace(WorkSpace): - try: - if InfFile.strip().upper() == CurrentInfFileName.strip().upper(): - continue - InfFile = InfFile.replace('\\', '/') - if InfFile not in GlobalData.gLIBINSTANCEDICT: - InfFileObj = open(InfFile, "r") - GlobalData.gLIBINSTANCEDICT[InfFile] = InfFileObj - else: - InfFileObj = GlobalData.gLIBINSTANCEDICT[InfFile] - - except BaseException: - Logger.Error("InfParser", - ToolError.FILE_READ_FAILURE, - ST.ERR_FILE_OPEN_FAILURE, - File=InfFile) - try: - FileLinesList = InfFileObj.readlines() - FileLinesList = ProcessLineExtender(FileLinesList) - - ReFindFileGuidPattern = re.compile(r"^\s*FILE_GUID\s*=.*$") - ReFindVerStringPattern = re.compile(r"^\s*VERSION_STRING\s*=.*$") - - for Line in FileLinesList: - if ReFindFileGuidPattern.match(Line): - FileGuidString = Line - if ReFindVerStringPattern.match(Line): - VerString = Line - - if FileGuidString: - FileGuidString = GetSplitValueList(FileGuidString, '=', 1)[1] - if VerString: - VerString = GetSplitValueList(VerString, '=', 1)[1] - - if FileGuidString.strip().upper() == Guid.upper() and \ - VerString.strip().upper() == Version.upper(): - return Guid, Version - - except BaseException: - Logger.Error("InfParser", ToolError.FILE_READ_FAILURE, ST.ERR_FILE_OPEN_FAILURE, File=InfFile) - finally: - InfFileObj.close() - - return '', '' - - diff --git a/BaseTools/Source/Python/UPT/Parser/InfParserMisc.py b/BaseTools/Source/Python/UPT/Parser/InfParserMisc.py index eb768b9a1241..fd0f819feb4e 100644 --- a/BaseTools/Source/Python/UPT/Parser/InfParserMisc.py +++ b/BaseTools/Source/Python/UPT/Parser/InfParserMisc.py @@ -208,9 +208,6 @@ def __init__(self): self.InfPpiSection = None self.InfGuidSection = None self.InfDepexSection = None - self.InfPeiDepexSection = None - self.InfDxeDepexSection = None - self.InfSmmDepexSection = None self.InfBinariesSection = None self.InfHeader = None self.InfSpecialCommentSection = None diff --git a/BaseTools/Source/Python/UPT/Parser/InfSectionParser.py b/BaseTools/Source/Python/UPT/Parser/InfSectionParser.py index 474d37379d2b..4899e7d6c58e 100644 --- a/BaseTools/Source/Python/UPT/Parser/InfSectionParser.py +++ b/BaseTools/Source/Python/UPT/Parser/InfSectionParser.py @@ -228,9 +228,6 @@ def __init__(self): self.InfPpiSection = InfPpiObject() self.InfGuidSection = InfGuidObject() self.InfDepexSection = InfDepexObject() - self.InfPeiDepexSection = InfDepexObject() - self.InfDxeDepexSection = InfDepexObject() - self.InfSmmDepexSection = InfDepexObject() self.InfBinariesSection = InfBinariesObject() self.InfHeader = InfHeaderObject() self.InfBinaryHeader = InfHeaderObject() From a962238ef54b20e89ad0299b091235b3584d53e8 Mon Sep 17 00:00:00 2001 From: Pierre Gondois Date: Wed, 30 Apr 2025 14:48:31 +0200 Subject: [PATCH 12/15] BaseTools: UPT/PomAdapter: Remove unnecessary code Running the vulture tool on the UPT/PomAdapter folder gave the following report. Remove the unnecessary code. - UPT/PomAdapter/DecPomAlignment.py:898: unused method 'ShowPackage' (60% confidence) Signed-off-by: Pierre Gondois --- .../Python/UPT/PomAdapter/DecPomAlignment.py | 47 ------------------- 1 file changed, 47 deletions(-) diff --git a/BaseTools/Source/Python/UPT/PomAdapter/DecPomAlignment.py b/BaseTools/Source/Python/UPT/PomAdapter/DecPomAlignment.py index b1f8135bc780..944fd2fa18d5 100644 --- a/BaseTools/Source/Python/UPT/PomAdapter/DecPomAlignment.py +++ b/BaseTools/Source/Python/UPT/PomAdapter/DecPomAlignment.py @@ -891,53 +891,6 @@ def GenModuleFileList(self, ContainerFile): self.SetModuleFileList(ModuleFileList) - ## Show detailed information of Package - # - # Print all members and their values of Package class - # - def ShowPackage(self): - print('\nName =', self.GetName()) - print('\nBaseName =', self.GetBaseName()) - print('\nVersion =', self.GetVersion()) - print('\nGuid =', self.GetGuid()) - - print('\nStandardIncludes = %d ' \ - % len(self.GetStandardIncludeFileList()), end=' ') - for Item in self.GetStandardIncludeFileList(): - print(Item.GetFilePath(), ' ', Item.GetSupArchList()) - print('\nPackageIncludes = %d \n' \ - % len(self.GetPackageIncludeFileList()), end=' ') - for Item in self.GetPackageIncludeFileList(): - print(Item.GetFilePath(), ' ', Item.GetSupArchList()) - - print('\nGuids =', self.GetGuidList()) - for Item in self.GetGuidList(): - print(Item.GetCName(), Item.GetGuid(), Item.GetSupArchList()) - print('\nProtocols =', self.GetProtocolList()) - for Item in self.GetProtocolList(): - print(Item.GetCName(), Item.GetGuid(), Item.GetSupArchList()) - print('\nPpis =', self.GetPpiList()) - for Item in self.GetPpiList(): - print(Item.GetCName(), Item.GetGuid(), Item.GetSupArchList()) - print('\nLibraryClasses =', self.GetLibraryClassList()) - for Item in self.GetLibraryClassList(): - print(Item.GetLibraryClass(), Item.GetRecommendedInstance(), \ - Item.GetSupArchList()) - print('\nPcds =', self.GetPcdList()) - for Item in self.GetPcdList(): - print('CName=', Item.GetCName(), 'TokenSpaceGuidCName=', \ - Item.GetTokenSpaceGuidCName(), \ - 'DefaultValue=', Item.GetDefaultValue(), \ - 'ValidUsage=', Item.GetValidUsage(), \ - 'SupArchList', Item.GetSupArchList(), \ - 'Token=', Item.GetToken(), 'DatumType=', Item.GetDatumType()) - - for Item in self.GetMiscFileList(): - print(Item.GetName()) - for FileObjectItem in Item.GetFileList(): - print(FileObjectItem.GetURI()) - print('****************\n') - ## GenPcdDeclaration # # @param ContainerFile: File name of the DEC file From 48c554fe3ce774fb278fae910cee6a12ac300a96 Mon Sep 17 00:00:00 2001 From: Pierre Gondois Date: Wed, 30 Apr 2025 14:45:49 +0200 Subject: [PATCH 13/15] BaseTools: UPT/Xml: Remove unnecessary code Running the vulture tool on the UPT/Xml folder gave the following report. Remove the unnecessary code. - UPT/Xml/CommonXml.py:585: unused attribute 'LangDefsList' (60% confidence) Signed-off-by: Pierre Gondois --- BaseTools/Source/Python/UPT/Xml/CommonXml.py | 1 - 1 file changed, 1 deletion(-) diff --git a/BaseTools/Source/Python/UPT/Xml/CommonXml.py b/BaseTools/Source/Python/UPT/Xml/CommonXml.py index cfadacf4aaaf..1c18d7f76323 100644 --- a/BaseTools/Source/Python/UPT/Xml/CommonXml.py +++ b/BaseTools/Source/Python/UPT/Xml/CommonXml.py @@ -582,7 +582,6 @@ def __init__(self): self.BinaryDescriptionList = [] self.BinaryCopyrightList = [] self.BinaryLicenseList = [] - self.LangDefsList = [] self.DefineDict = {} self.BuildOptionDict = {} self.IncludesDict = {} From 58a57df9a2c53ace84f587e2f2555f09ce60c286 Mon Sep 17 00:00:00 2001 From: Pierre Gondois Date: Wed, 30 Apr 2025 15:01:27 +0200 Subject: [PATCH 14/15] BaseTools: UPT/Library: Remove unnecessary code Running the vulture tool on the UPT/Library folder gave the following report. Remove the unnecessary code. - UPT/Library/CommentGenerating.py:50: unused function 'GenGenericComment' (60% confidence) - UPT/Library/CommentGenerating.py:172: unused function 'GenInfPcdTailComment' (60% confidence) - UPT/Library/CommentGenerating.py:185: unused function 'GenInfProtocolPPITailComment' (60% confidence) - UPT/Library/CommentGenerating.py:203: unused function 'GenInfGuidTailComment' (60% confidence) - UPT/Library/Misc.py:504: unused class 'MergeCommentDict' (60% confidence) - UPT/Library/Misc.py:527: unused function 'GenDummyHelpTextObj' (60% confidence) - UPT/Library/ParserValidate.py:110: unused function 'IsValidInfComponentType' (60% confidence) - UPT/Library/ParserValidate.py:122: unused function 'IsValidToolFamily' (60% confidence) - UPT/Library/ParserValidate.py:134: unused function 'IsValidToolTagName' (60% confidence) - UPT/Library/ParserValidate.py:465: unused function 'IsValidBuildNumber' (60% confidence) - UPT/Library/ParserValidate.py:478: unused function 'IsValidDepex' (60% confidence) - UPT/Library/ParserValidate.py:546: unused function 'IsValidVersionString' (60% confidence) - UPT/Library/ParserValidate.py:721: unused function 'CheckUTF16FileHeader' (60% confidence) - UPT/Library/Parsing.py:52: unused function 'GetBuildOption' (60% confidence) - UPT/Library/Parsing.py:100: unused function 'GetLibraryClassOfInf' (60% confidence) - UPT/Library/Parsing.py:150: unused function 'GetPcd' (60% confidence) - UPT/Library/Parsing.py:177: unused function 'GetFeatureFlagPcd' (60% confidence) - UPT/Library/Parsing.py:201: unused function 'GetDynamicDefaultPcd' (60% confidence) - UPT/Library/Parsing.py:227: unused function 'GetDynamicHiiPcd' (60% confidence) - UPT/Library/Parsing.py:254: unused function 'GetDynamicVpdPcd' (60% confidence) - UPT/Library/Parsing.py:277: unused function 'GetComponent' (60% confidence) - UPT/Library/Parsing.py:392: unused function 'GetExec' (60% confidence) - UPT/Library/Parsing.py:416: unused function 'GetComponents' (60% confidence) - UPT/Library/Parsing.py:532: unused function 'GetSource' (60% confidence) - UPT/Library/Parsing.py:581: unused function 'GetGuidsProtocolsPpisOfInf' (60% confidence) - UPT/Library/Parsing.py:595: unused function 'GetGuidsProtocolsPpisOfDec' (60% confidence) - UPT/Library/Parsing.py:645: unused function 'GetPcdOfInf' (60% confidence) - UPT/Library/Parsing.py:682: unused function 'GetPcdOfDec' (60% confidence) - UPT/Library/Parsing.py:739: unused function 'InsertSectionItems' (60% confidence) - UPT/Library/Parsing.py:776: unused function 'GenMetaDatSectionItem' (60% confidence) - UPT/Library/StringUtils.py:87: unused function 'GetLibraryClassesWithModuleType' (60% confidence) - UPT/Library/StringUtils.py:107: unused function 'GetDynamics' (60% confidence) - UPT/Library/StringUtils.py:350: unused function 'GetMultipleValuesOfKeyFromLines' (60% confidence) - UPT/Library/StringUtils.py:396: unused function 'GetSingleValueOfKeyFromLines' (60% confidence) - UPT/Library/StringUtils.py:463: unused function 'PreCheck' (60% confidence) - UPT/Library/StringUtils.py:675: unused function 'StringArrayLength' (60% confidence) - UPT/Library/StringUtils.py:718: unused function 'IsHexDigit' (60% confidence) - UPT/Library/UniClassObject.py:86: unused function 'UniToStr' (60% confidence) - UPT/Library/UniClassObject.py:227: unused attribute 'StringNameByteList' (60% confidence) - UPT/Library/UniClassObject.py:237: unused attribute 'StringNameByteList' (60% confidence) - UPT/Library/UniClassObject.py:377: unused method 'GetIncludeFile' (60% confidence) - UPT/Library/UniClassObject.py:947: unused method 'FindStringValue' (60% confidence) - UPT/Library/UniClassObject.py:957: unused method 'FindByToken' (60% confidence) - UPT/Library/UniClassObject.py:1022: unused method 'ReadIncludeUNIfile' (60% confidence) - UPT/Library/StringUtils.py:718: unused function 'IsHexDigit' (60% confidence) - UPT/Library/Xml/XmlRoutines.py:176: unused function 'XmlElementList' (60% confidence) - UPT/Library/Xml/XmlRoutines.py:202: unused function 'XmlNodeName' (60% confidence) Signed-off-by: Pierre Gondois --- .../Python/UPT/Library/CommentGenerating.py | 63 -- BaseTools/Source/Python/UPT/Library/Misc.py | 35 - .../Python/UPT/Library/ParserValidate.py | 97 --- .../Source/Python/UPT/Library/Parsing.py | 625 ------------------ .../Source/Python/UPT/Library/StringUtils.py | 229 ------- .../Python/UPT/Library/UniClassObject.py | 65 -- .../Python/UPT/Library/Xml/XmlRoutines.py | 41 -- 7 files changed, 1155 deletions(-) diff --git a/BaseTools/Source/Python/UPT/Library/CommentGenerating.py b/BaseTools/Source/Python/UPT/Library/CommentGenerating.py index 4c50adac6e50..ac09ab84c566 100644 --- a/BaseTools/Source/Python/UPT/Library/CommentGenerating.py +++ b/BaseTools/Source/Python/UPT/Library/CommentGenerating.py @@ -43,18 +43,6 @@ def GenTailCommentLines (TailCommentLines, LeadingSpaceNum = 0): return CommentStr -## GenGenericComment -# -# @param CommentLines: Generic comment Text, maybe Multiple Lines -# -def GenGenericComment (CommentLines): - if not CommentLines: - return '' - CommentLines = CommentLines.rstrip(END_OF_LINE) - CommentStr = TAB_SPECIAL_COMMENT + TAB_SPACE_SPLIT + (END_OF_LINE + TAB_COMMENT_SPLIT + TAB_SPACE_SPLIT).join\ - (GetSplitValueList(CommentLines, END_OF_LINE)) + END_OF_LINE - return CommentStr - ## GenGenericCommentF # # similar to GenGenericComment but will remove at end of comment once, @@ -162,57 +150,6 @@ def GenHeaderCommentSection(Abstract, Description, Copyright, License, IsBinaryH return Content - -## GenInfPcdTailComment -# Generate Pcd tail comment for Inf, this would be one line comment -# -# @param Usage: Usage type -# @param TailCommentText: Comment text for tail comment -# -def GenInfPcdTailComment (Usage, TailCommentText): - if (Usage == ITEM_UNDEFINED) and (not TailCommentText): - return '' - - CommentLine = TAB_SPACE_SPLIT.join([Usage, TailCommentText]) - return GenTailCommentLines(CommentLine) - -## GenInfProtocolPPITailComment -# Generate Protocol/PPI tail comment for Inf -# -# @param Usage: Usage type -# @param TailCommentText: Comment text for tail comment -# -def GenInfProtocolPPITailComment (Usage, Notify, TailCommentText): - if (not Notify) and (Usage == ITEM_UNDEFINED) and (not TailCommentText): - return '' - - if Notify: - CommentLine = USAGE_ITEM_NOTIFY + " ## " - else: - CommentLine = '' - - CommentLine += TAB_SPACE_SPLIT.join([Usage, TailCommentText]) - return GenTailCommentLines(CommentLine) - -## GenInfGuidTailComment -# Generate Guid tail comment for Inf -# -# @param Usage: Usage type -# @param TailCommentText: Comment text for tail comment -# -def GenInfGuidTailComment (Usage, GuidTypeList, VariableName, TailCommentText): - GuidType = GuidTypeList[0] - if (Usage == ITEM_UNDEFINED) and (GuidType == ITEM_UNDEFINED) and \ - (not TailCommentText): - return '' - - FirstLine = Usage + " ## " + GuidType - if GuidType == TAB_INF_GUIDTYPE_VAR: - FirstLine += ":" + VariableName - - CommentLine = TAB_SPACE_SPLIT.join([FirstLine, TailCommentText]) - return GenTailCommentLines(CommentLine) - ## GenDecGuidTailComment # # @param SupModuleList: Supported module type list diff --git a/BaseTools/Source/Python/UPT/Library/Misc.py b/BaseTools/Source/Python/UPT/Library/Misc.py index f3688de4b1b7..554d1ec350e2 100644 --- a/BaseTools/Source/Python/UPT/Library/Misc.py +++ b/BaseTools/Source/Python/UPT/Library/Misc.py @@ -495,41 +495,6 @@ def IsAllModuleList(ModuleList): else: return True -## Dictionary that use comment(GenericComment, TailComment) as value, -# if a new comment which key already in the dic is inserted, then the -# comment will be merged. -# Key is (Statement, SupArch), when TailComment is added, it will ident -# according to Statement -# -class MergeCommentDict(dict): - ## []= operator - # - def __setitem__(self, Key, CommentVal): - GenericComment, TailComment = CommentVal - if Key in self: - OrigVal1, OrigVal2 = dict.__getitem__(self, Key) - Statement = Key[0] - dict.__setitem__(self, Key, (OrigVal1 + GenericComment, OrigVal2 \ - + len(Statement) * ' ' + TailComment)) - else: - dict.__setitem__(self, Key, (GenericComment, TailComment)) - - ## =[] operator - # - def __getitem__(self, Key): - return dict.__getitem__(self, Key) - - -## GenDummyHelpTextObj -# -# @retval HelpTxt: Generated dummy help text object -# -def GenDummyHelpTextObj(): - HelpTxt = TextObject() - HelpTxt.SetLang(TAB_LANGUAGE_EN_US) - HelpTxt.SetString(' ') - return HelpTxt - ## ConvertVersionToDecimal, the minor version should be within 0 - 99 # ::= "0x" # ::= (a-fA-F0-9){4} diff --git a/BaseTools/Source/Python/UPT/Library/ParserValidate.py b/BaseTools/Source/Python/UPT/Library/ParserValidate.py index edb0c6275aef..cfa51e7a3c3d 100644 --- a/BaseTools/Source/Python/UPT/Library/ParserValidate.py +++ b/BaseTools/Source/Python/UPT/Library/ParserValidate.py @@ -103,43 +103,6 @@ def IsValidInfMoudleType(ModuleType): else: return False -## Is Valid Component Type or not -# -# @param ComponentType: A string contain ComponentType need to be judged. -# -def IsValidInfComponentType(ComponentType): - if ComponentType.upper() in COMPONENT_TYPE_LIST: - return True - else: - return False - - -## Is valid Tool Family or not -# -# @param ToolFamily: A string contain Tool Family need to be judged. -# Family := [A-Z]([a-zA-Z0-9])* -# -def IsValidToolFamily(ToolFamily): - ReIsValidFamily = re.compile(r"^[A-Z]+[A-Za-z0-9]{0,}$", re.DOTALL) - if ReIsValidFamily.match(ToolFamily) is None: - return False - return True - -## Is valid Tool TagName or not -# -# The TagName sample is MYTOOLS and VS2005. -# -# @param TagName: A string contain Tool TagName need to be judged. -# -def IsValidToolTagName(TagName): - if TagName.strip() == '': - return True - if TagName.strip() == '*': - return True - if not IsValidWord(TagName): - return False - return True - ## Is valid arch or not # # @param Arch The arch string need to be validated @@ -455,37 +418,6 @@ def IsValidHexVersion(Word): return True -## IsValidBuildNumber -# -# Check whether the BUILD_NUMBER is valid. -# ["BUILD_NUMBER" "=" {1,4} ] -# -# @param Word: The BUILD_NUMBER string need to be checked. -# -def IsValidBuildNumber(Word): - ReIsValieBuildNumber = re.compile(r"[0-9]{1,4}$", re.DOTALL) - if ReIsValieBuildNumber.match(Word) is None: - return False - - return True - -## IsValidDepex -# -# Check whether the Depex is valid. -# -# @param Word: The Depex string need to be checked. -# -def IsValidDepex(Word): - Index = Word.upper().find("PUSH") - if Index > -1: - return IsValidCFormatGuid(Word[Index+4:].strip()) - - ReIsValidCName = re.compile(r"^[A-Za-z_][0-9A-Za-z_\s\.]*$", re.DOTALL) - if ReIsValidCName.match(Word) is None: - return False - - return True - ## IsValidNormalizedString # # Check @@ -532,25 +464,6 @@ def IsValidIdString(String): return False -## IsValidVersionString -# -# Check whether the VersionString is valid. -# ::= [ []{0,} []{0,} ] {0,} -# ::= {} {} -# ::= 0x09 -# ::= 0x20 -# ::= (0x21 - 0x7E) -# -# @param VersionString: The VersionString need to be checked. -# -def IsValidVersionString(VersionString): - VersionString = VersionString.strip() - for Char in VersionString: - if not (Char >= 0x21 and Char <= 0x7E): - return False - - return True - ## IsValidPcdValue # # Check whether the PcdValue is valid. @@ -714,13 +627,3 @@ def IsValidUserId(UserId): if Char == '.' and not Quoted: return False return True - -# -# Check if a UTF16-LE file has a BOM header -# -def CheckUTF16FileHeader(File): - FileIn = open(File, 'rb').read(2) - if FileIn != b'\xff\xfe': - return False - - return True diff --git a/BaseTools/Source/Python/UPT/Library/Parsing.py b/BaseTools/Source/Python/UPT/Library/Parsing.py index 6fb133745e36..5f2a448adab9 100644 --- a/BaseTools/Source/Python/UPT/Library/Parsing.py +++ b/BaseTools/Source/Python/UPT/Library/Parsing.py @@ -41,29 +41,6 @@ gPKG_INFO_DICT = {} -## GetBuildOption -# -# Parse a string with format "[:]=Flag" -# Return (Family, ToolFlag, Flag) -# -# @param String: String with BuildOption statement -# @param File: The file which defines build option, used in error report -# -def GetBuildOption(String, File, LineNo= -1): - (Family, ToolChain, Flag) = ('', '', '') - if String.find(DataType.TAB_EQUAL_SPLIT) < 0: - RaiseParserError(String, 'BuildOptions', File, \ - '[:]=Flag', LineNo) - else: - List = GetSplitValueList(String, DataType.TAB_EQUAL_SPLIT, MaxSplit=1) - if List[0].find(':') > -1: - Family = List[0][ : List[0].find(':')].strip() - ToolChain = List[0][List[0].find(':') + 1 : ].strip() - else: - ToolChain = List[0].strip() - Flag = List[1].strip() - return (Family, ToolChain, Flag) - ## Get Library Class # # Get Library of Dsc as | @@ -88,37 +65,6 @@ def GetLibraryClass(Item, ContainerFile, WorkspaceDir, LineNo= -1): return (List[0], List[1], SupMod) -## Get Library Class -# -# Get Library of Dsc as [|] -# [|.] -# -# @param Item: String as | -# @param ContainerFile: The file which describes the library class, used for -# error report -# -def GetLibraryClassOfInf(Item, ContainerFile, WorkspaceDir, LineNo= -1): - ItemList = GetSplitValueList((Item[0] + DataType.TAB_VALUE_SPLIT * 2)) - SupMod = DataType.SUP_MODULE_LIST_STRING - - if len(ItemList) > 5: - RaiseParserError\ - (Item[0], 'LibraryClasses', ContainerFile, \ - '[|]\ - [|.]') - else: - CheckFileType(ItemList[1], '.Inf', ContainerFile, 'LibraryClasses', \ - Item[0], LineNo) - CheckFileExist(WorkspaceDir, ItemList[1], ContainerFile, \ - 'LibraryClasses', Item[0], LineNo) - if ItemList[2] != '': - CheckPcdTokenInfo(ItemList[2], 'LibraryClasses', \ - ContainerFile, LineNo) - if Item[1] != '': - SupMod = Item[1] - - return (ItemList[0], ItemList[1], ItemList[2], SupMod) - ## CheckPcdTokenInfo # # Check if PcdTokenInfo is following . @@ -136,414 +82,6 @@ def CheckPcdTokenInfo(TokenInfoString, Section, File, LineNo= -1): RaiseParserError(TokenInfoString, Section, File, Format, LineNo) -## Get Pcd -# -# Get Pcd of Dsc as .| -# [||] -# -# @param Item: String as .| -# [||] -# @param ContainerFile: The file which describes the pcd, used for error -# report - -# -def GetPcd(Item, Type, ContainerFile, LineNo= -1): - TokenGuid, TokenName, Value, MaximumDatumSize, Token = '', '', '', '', '' - List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2) - - if len(List) < 4 or len(List) > 6: - RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \ - '.|\ - [||]', LineNo) - else: - Value = List[1] - MaximumDatumSize = List[2] - Token = List[3] - - if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo): - (TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT) - - return (TokenName, TokenGuid, Value, MaximumDatumSize, Token, Type) - -## Get FeatureFlagPcd -# -# Get FeatureFlagPcd of Dsc as .|TRUE/FALSE -# -# @param Item: String as -# .|TRUE/FALSE -# @param ContainerFile: The file which describes the pcd, used for error -# report -# -def GetFeatureFlagPcd(Item, Type, ContainerFile, LineNo= -1): - TokenGuid, TokenName, Value = '', '', '' - List = GetSplitValueList(Item) - if len(List) != 2: - RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \ - '.|TRUE/FALSE', \ - LineNo) - else: - Value = List[1] - if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo): - (TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT) - - return (TokenName, TokenGuid, Value, Type) - -## Get DynamicDefaultPcd -# -# Get DynamicDefaultPcd of Dsc as . -# |[|[|]] -# -# @param Item: String as .| -# TRUE/FALSE -# @param ContainerFile: The file which describes the pcd, used for error -# report -# -def GetDynamicDefaultPcd(Item, Type, ContainerFile, LineNo= -1): - TokenGuid, TokenName, Value, DatumTyp, MaxDatumSize = '', '', '', '', '' - List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2) - if len(List) < 4 or len(List) > 8: - RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \ - '.|\ - [|[|]]', LineNo) - else: - Value = List[1] - DatumTyp = List[2] - MaxDatumSize = List[3] - if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo): - (TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT) - - return (TokenName, TokenGuid, Value, DatumTyp, MaxDatumSize, Type) - -## Get DynamicHiiPcd -# -# Get DynamicHiiPcd of Dsc as .|| -# |[|[|]] -# -# @param Item: String as .| -# TRUE/FALSE -# @param ContainerFile: The file which describes the pcd, used for error -# report -# -def GetDynamicHiiPcd(Item, Type, ContainerFile, LineNo= -1): - TokenGuid, TokenName, List1, List2, List3, List4, List5 = \ - '', '', '', '', '', '', '' - List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2) - if len(List) < 6 or len(List) > 8: - RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \ - '.||\ - |[|\ - [|]]', LineNo) - else: - List1, List2, List3, List4, List5 = \ - List[1], List[2], List[3], List[4], List[5] - if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo): - (TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT) - - return (TokenName, TokenGuid, List1, List2, List3, List4, List5, Type) - -## Get DynamicVpdPcd -# -# Get DynamicVpdPcd of Dsc as .| -# [|] -# -# @param Item: String as . -# |TRUE/FALSE -# @param ContainerFile: The file which describes the pcd, used for error -# report -# -def GetDynamicVpdPcd(Item, Type, ContainerFile, LineNo= -1): - TokenGuid, TokenName, List1, List2 = '', '', '', '' - List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT) - if len(List) < 3 or len(List) > 4: - RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \ - '.|\ - [|]', LineNo) - else: - List1, List2 = List[1], List[2] - if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo): - (TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT) - - return (TokenName, TokenGuid, List1, List2, Type) - -## GetComponent -# -# Parse block of the components defined in dsc file -# Set KeyValues as [ ['component name', [lib1, lib2, lib3], -# [bo1, bo2, bo3], [pcd1, pcd2, pcd3]], ...] -# -# @param Lines: The content to be parsed -# @param KeyValues: To store data after parsing -# -def GetComponent(Lines, KeyValues): - (FindBlock, FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \ - FindPcdsPatchableInModule, FindPcdsFixedAtBuild, FindPcdsDynamic, \ - FindPcdsDynamicEx) = (False, False, False, False, False, False, False, \ - False) - ListItem = None - LibraryClassItem = [] - BuildOption = [] - Pcd = [] - - for Line in Lines: - Line = Line[0] - # - # Ignore !include statement - # - if Line.upper().find(DataType.TAB_INCLUDE.upper() + ' ') > -1 or \ - Line.upper().find(DataType.TAB_DEFINE + ' ') > -1: - continue - - if FindBlock == False: - ListItem = Line - # - # find '{' at line tail - # - if Line.endswith('{'): - FindBlock = True - ListItem = CleanString(Line.rsplit('{', 1)[0], \ - DataType.TAB_COMMENT_SPLIT) - - # - # Parse a block content - # - if FindBlock: - if Line.find('') != -1: - (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \ - FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \ - FindPcdsDynamic, FindPcdsDynamicEx) = \ - (True, False, False, False, False, False, False) - continue - if Line.find('') != -1: - (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \ - FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \ - FindPcdsDynamic, FindPcdsDynamicEx) = \ - (False, True, False, False, False, False, False) - continue - if Line.find('') != -1: - (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \ - FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \ - FindPcdsDynamic, FindPcdsDynamicEx) = \ - (False, False, True, False, False, False, False) - continue - if Line.find('') != -1: - (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \ - FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \ - FindPcdsDynamic, FindPcdsDynamicEx) = \ - (False, False, False, True, False, False, False) - continue - if Line.find('') != -1: - (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \ - FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \ - FindPcdsDynamic, FindPcdsDynamicEx) = \ - (False, False, False, False, True, False, False) - continue - if Line.find('') != -1: - (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \ - FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \ - FindPcdsDynamic, FindPcdsDynamicEx) = \ - (False, False, False, False, False, True, False) - continue - if Line.find('') != -1: - (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \ - FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \ - FindPcdsDynamic, FindPcdsDynamicEx) = \ - (False, False, False, False, False, False, True) - continue - if Line.endswith('}'): - # - # find '}' at line tail - # - KeyValues.append([ListItem, LibraryClassItem, \ - BuildOption, Pcd]) - (FindBlock, FindLibraryClass, FindBuildOption, \ - FindPcdsFeatureFlag, FindPcdsPatchableInModule, \ - FindPcdsFixedAtBuild, FindPcdsDynamic, FindPcdsDynamicEx) = \ - (False, False, False, False, False, False, False, False) - LibraryClassItem, BuildOption, Pcd = [], [], [] - continue - - if FindBlock: - if FindLibraryClass: - LibraryClassItem.append(Line) - elif FindBuildOption: - BuildOption.append(Line) - elif FindPcdsFeatureFlag: - Pcd.append((DataType.TAB_PCDS_FEATURE_FLAG_NULL, Line)) - elif FindPcdsPatchableInModule: - Pcd.append((DataType.TAB_PCDS_PATCHABLE_IN_MODULE_NULL, Line)) - elif FindPcdsFixedAtBuild: - Pcd.append((DataType.TAB_PCDS_FIXED_AT_BUILD_NULL, Line)) - elif FindPcdsDynamic: - Pcd.append((DataType.TAB_PCDS_DYNAMIC_DEFAULT_NULL, Line)) - elif FindPcdsDynamicEx: - Pcd.append((DataType.TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL, Line)) - else: - KeyValues.append([ListItem, [], [], []]) - - return True - -## GetExec -# -# Parse a string with format "InfFilename [EXEC = ExecFilename]" -# Return (InfFilename, ExecFilename) -# -# @param String: String with EXEC statement -# -def GetExec(String): - InfFilename = '' - ExecFilename = '' - if String.find('EXEC') > -1: - InfFilename = String[ : String.find('EXEC')].strip() - ExecFilename = String[String.find('EXEC') + len('EXEC') : ].strip() - else: - InfFilename = String.strip() - - return (InfFilename, ExecFilename) - -## GetComponents -# -# Parse block of the components defined in dsc file -# Set KeyValues as [ ['component name', [lib1, lib2, lib3], [bo1, bo2, bo3], -# [pcd1, pcd2, pcd3]], ...] -# -# @param Lines: The content to be parsed -# @param Key: Reserved -# @param KeyValues: To store data after parsing -# @param CommentCharacter: Comment char, used to ignore comment content -# -# @retval True Get component successfully -# -def GetComponents(Lines, KeyValues, CommentCharacter): - if Lines.find(DataType.TAB_SECTION_END) > -1: - Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1] - (FindBlock, FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \ - FindPcdsPatchableInModule, FindPcdsFixedAtBuild, FindPcdsDynamic, \ - FindPcdsDynamicEx) = \ - (False, False, False, False, False, False, False, False) - ListItem = None - LibraryClassItem = [] - BuildOption = [] - Pcd = [] - - LineList = Lines.split('\n') - for Line in LineList: - Line = CleanString(Line, CommentCharacter) - if Line is None or Line == '': - continue - - if FindBlock == False: - ListItem = Line - # - # find '{' at line tail - # - if Line.endswith('{'): - FindBlock = True - ListItem = CleanString(Line.rsplit('{', 1)[0], CommentCharacter) - - # - # Parse a block content - # - if FindBlock: - if Line.find('') != -1: - (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \ - FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \ - FindPcdsDynamic, FindPcdsDynamicEx) = \ - (True, False, False, False, False, False, False) - continue - if Line.find('') != -1: - (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \ - FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \ - FindPcdsDynamic, FindPcdsDynamicEx) = \ - (False, True, False, False, False, False, False) - continue - if Line.find('') != -1: - (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \ - FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \ - FindPcdsDynamic, FindPcdsDynamicEx) = \ - (False, False, True, False, False, False, False) - continue - if Line.find('') != -1: - (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \ - FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \ - FindPcdsDynamic, FindPcdsDynamicEx) = \ - (False, False, False, True, False, False, False) - continue - if Line.find('') != -1: - (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \ - FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \ - FindPcdsDynamic, FindPcdsDynamicEx) = \ - (False, False, False, False, True, False, False) - continue - if Line.find('') != -1: - (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \ - FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \ - FindPcdsDynamic, FindPcdsDynamicEx) = \ - (False, False, False, False, False, True, False) - continue - if Line.find('') != -1: - (FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \ - FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \ - FindPcdsDynamic, FindPcdsDynamicEx) = \ - (False, False, False, False, False, False, True) - continue - if Line.endswith('}'): - # - # find '}' at line tail - # - KeyValues.append([ListItem, LibraryClassItem, BuildOption, \ - Pcd]) - (FindBlock, FindLibraryClass, FindBuildOption, \ - FindPcdsFeatureFlag, FindPcdsPatchableInModule, \ - FindPcdsFixedAtBuild, FindPcdsDynamic, FindPcdsDynamicEx) = \ - (False, False, False, False, False, False, False, False) - LibraryClassItem, BuildOption, Pcd = [], [], [] - continue - - if FindBlock: - if FindLibraryClass: - LibraryClassItem.append(Line) - elif FindBuildOption: - BuildOption.append(Line) - elif FindPcdsFeatureFlag: - Pcd.append((DataType.TAB_PCDS_FEATURE_FLAG, Line)) - elif FindPcdsPatchableInModule: - Pcd.append((DataType.TAB_PCDS_PATCHABLE_IN_MODULE, Line)) - elif FindPcdsFixedAtBuild: - Pcd.append((DataType.TAB_PCDS_FIXED_AT_BUILD, Line)) - elif FindPcdsDynamic: - Pcd.append((DataType.TAB_PCDS_DYNAMIC, Line)) - elif FindPcdsDynamicEx: - Pcd.append((DataType.TAB_PCDS_DYNAMIC_EX, Line)) - else: - KeyValues.append([ListItem, [], [], []]) - - return True - -## Get Source -# -# Get Source of Inf as [|[|[| -# [|]]]] -# -# @param Item: String as [|[|[| -# [|]]]] -# @param ContainerFile: The file which describes the library class, used -# for error report -# -def GetSource(Item, ContainerFile, FileRelativePath, LineNo= -1): - ItemNew = Item + DataType.TAB_VALUE_SPLIT * 4 - List = GetSplitValueList(ItemNew) - if len(List) < 5 or len(List) > 9: - RaiseParserError(Item, 'Sources', ContainerFile, \ - '[|[|[|\ - [|]]]]', LineNo) - List[0] = NormPath(List[0]) - CheckFileExist(FileRelativePath, List[0], ContainerFile, 'Sources', \ - Item, LineNo) - if List[4] != '': - CheckPcdTokenInfo(List[4], 'Sources', ContainerFile, LineNo) - - return (List[0], List[1], List[2], List[3], List[4]) - ## Get Binary # # Get Binary of Inf as [|[|[| @@ -569,51 +107,6 @@ def GetBinary(Item, ContainerFile, LineNo= -1): elif len(List) == 3: return (List[0], List[1], List[2], '') -## Get Guids/Protocols/Ppis -# -# Get Guids/Protocols/Ppis of Inf as [|] -# -# @param Item: String as [|] -# @param Type: Type of parsing string -# @param ContainerFile: The file which describes the library class, -# used for error report -# -def GetGuidsProtocolsPpisOfInf(Item): - ItemNew = Item + DataType.TAB_VALUE_SPLIT - List = GetSplitValueList(ItemNew) - return (List[0], List[1]) - -## Get Guids/Protocols/Ppis -# -# Get Guids/Protocols/Ppis of Dec as = -# -# @param Item: String as = -# @param Type: Type of parsing string -# @param ContainerFile: The file which describes the library class, -# used for error report -# -def GetGuidsProtocolsPpisOfDec(Item, Type, ContainerFile, LineNo= -1): - List = GetSplitValueList(Item, DataType.TAB_EQUAL_SPLIT) - if len(List) != 2: - RaiseParserError(Item, Type, ContainerFile, '=', \ - LineNo) - # - #convert C-Format Guid to Register Format - # - if List[1][0] == '{' and List[1][-1] == '}': - RegisterFormatGuid = GuidStructureStringToGuidString(List[1]) - if RegisterFormatGuid == '': - RaiseParserError(Item, Type, ContainerFile, \ - 'CFormat or RegisterFormat', LineNo) - else: - if CheckGuidRegFormat(List[1]): - RegisterFormatGuid = List[1] - else: - RaiseParserError(Item, Type, ContainerFile, \ - 'CFormat or RegisterFormat', LineNo) - - return (List[0], RegisterFormatGuid) - ## GetPackage # # Get Package of Inf as [|] @@ -634,70 +127,6 @@ def GetPackage(Item, ContainerFile, FileRelativePath, LineNo= -1): return (List[0], List[1]) -## Get Pcd Values of Inf -# -# Get Pcd of Inf as .[|] -# -# @param Item: The string describes pcd -# @param Type: The type of Pcd -# @param File: The file which describes the pcd, used for error report -# -def GetPcdOfInf(Item, Type, File, LineNo): - Format = '.[|]' - TokenGuid, TokenName, Value, InfType = '', '', '', '' - - if Type == DataType.TAB_PCDS_FIXED_AT_BUILD: - InfType = DataType.TAB_INF_FIXED_PCD - elif Type == DataType.TAB_PCDS_PATCHABLE_IN_MODULE: - InfType = DataType.TAB_INF_PATCH_PCD - elif Type == DataType.TAB_PCDS_FEATURE_FLAG: - InfType = DataType.TAB_INF_FEATURE_PCD - elif Type == DataType.TAB_PCDS_DYNAMIC_EX: - InfType = DataType.TAB_INF_PCD_EX - elif Type == DataType.TAB_PCDS_DYNAMIC: - InfType = DataType.TAB_INF_PCD - List = GetSplitValueList(Item, DataType.TAB_VALUE_SPLIT, 1) - TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT) - if len(TokenInfo) != 2: - RaiseParserError(Item, InfType, File, Format, LineNo) - else: - TokenGuid = TokenInfo[0] - TokenName = TokenInfo[1] - - if len(List) > 1: - Value = List[1] - else: - Value = None - return (TokenGuid, TokenName, Value, InfType) - - -## Get Pcd Values of Dec -# -# Get Pcd of Dec as .||| -# @param Item: Pcd item -# @param Type: Pcd type -# @param File: Dec file -# @param LineNo: Line number -# -def GetPcdOfDec(Item, Type, File, LineNo= -1): - Format = '.|||' - TokenGuid, TokenName, Value, DatumType, Token = '', '', '', '', '' - List = GetSplitValueList(Item) - if len(List) != 4: - RaiseParserError(Item, 'Pcds' + Type, File, Format, LineNo) - else: - Value = List[1] - DatumType = List[2] - Token = List[3] - TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT) - if len(TokenInfo) != 2: - RaiseParserError(Item, 'Pcds' + Type, File, Format, LineNo) - else: - TokenGuid = TokenInfo[0] - TokenName = TokenInfo[1] - - return (TokenGuid, TokenName, Value, DatumType, Token, Type) - ## Parse DEFINE statement # # Get DEFINE macros @@ -725,60 +154,6 @@ def ParseDefine(LineValue, StartLine, Table, FileID, SectionName, \ '', '', Arch, SectionModel, FileID, StartLine, -1, \ StartLine, -1, 0) -## InsertSectionItems -# -# Insert item data of a section to a dict -# -# @param Model: A model -# @param CurrentSection: Current section -# @param SectionItemList: Section item list -# @param ArchList: Arch list -# @param ThirdList: Third list -# @param RecordSet: Record set -# -def InsertSectionItems(Model, SectionItemList, ArchList, \ - ThirdList, RecordSet): - # - # Insert each item data of a section - # - for Index in range(0, len(ArchList)): - Arch = ArchList[Index] - Third = ThirdList[Index] - if Arch == '': - Arch = DataType.TAB_ARCH_COMMON - - Records = RecordSet[Model] - for SectionItem in SectionItemList: - LineValue, StartLine, Comment = SectionItem[0], \ - SectionItem[1], SectionItem[2] - - Logger.Debug(4, ST.MSG_PARSING % LineValue) - # - # And then parse DEFINE statement - # - if LineValue.upper().find(DataType.TAB_DEFINE.upper() + ' ') > -1: - continue - # - # At last parse other sections - # - IdNum = -1 - Records.append([LineValue, Arch, StartLine, IdNum, Third, Comment]) - - if RecordSet != {}: - RecordSet[Model] = Records - -## GenMetaDatSectionItem -# -# @param Key: A key -# @param Value: A value -# @param List: A list -# -def GenMetaDatSectionItem(Key, Value, List): - if Key not in List: - List[Key] = [Value] - else: - List[Key].append(Value) - ## GetPkgInfoFromDec # # get package name, guid, version info from dec files diff --git a/BaseTools/Source/Python/UPT/Library/StringUtils.py b/BaseTools/Source/Python/UPT/Library/StringUtils.py index a6f47d0dd79b..fa6c12149811 100644 --- a/BaseTools/Source/Python/UPT/Library/StringUtils.py +++ b/BaseTools/Source/Python/UPT/Library/StringUtils.py @@ -75,50 +75,6 @@ def GenDefines(String, Arch, Defines): return -1 return 1 -## GetLibraryClassesWithModuleType -# -# Get Library Class definition when no module type defined -# -# @param Lines: The content to be parsed -# @param Key: Reserved -# @param KeyValues: To store data after parsing -# @param CommentCharacter: Comment char, used to ignore comment content -# -def GetLibraryClassesWithModuleType(Lines, Key, KeyValues, CommentCharacter): - NewKey = SplitModuleType(Key) - Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1] - LineList = Lines.splitlines() - for Line in LineList: - Line = CleanString(Line, CommentCharacter) - if Line != '' and Line[0] != CommentCharacter: - KeyValues.append([CleanString(Line, CommentCharacter), NewKey[1]]) - - return True - -## GetDynamics -# -# Get Dynamic Pcds -# -# @param Lines: The content to be parsed -# @param Key: Reserved -# @param KeyValues: To store data after parsing -# @param CommentCharacter: Comment char, used to ignore comment content -# -def GetDynamics(Lines, Key, KeyValues, CommentCharacter): - # - # Get SkuId Name List - # - SkuIdNameList = SplitModuleType(Key) - - Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1] - LineList = Lines.splitlines() - for Line in LineList: - Line = CleanString(Line, CommentCharacter) - if Line != '' and Line[0] != CommentCharacter: - KeyValues.append([CleanString(Line, CommentCharacter), SkuIdNameList[1]]) - - return True - ## SplitModuleType # # Split ModuleType out of section defien to get key @@ -337,29 +293,6 @@ def CleanString2(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyl return Line, Comment -## GetMultipleValuesOfKeyFromLines -# -# Parse multiple strings to clean comment and spaces -# The result is saved to KeyValues -# -# @param Lines: The content to be parsed -# @param Key: Reserved -# @param KeyValues: To store data after parsing -# @param CommentCharacter: Comment char, used to ignore comment content -# -def GetMultipleValuesOfKeyFromLines(Lines, Key, KeyValues, CommentCharacter): - if Key: - pass - if KeyValues: - pass - Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1] - LineList = Lines.split('\n') - for Line in LineList: - Line = CleanString(Line, CommentCharacter) - if Line != '' and Line[0] != CommentCharacter: - KeyValues += [Line] - return True - ## GetDefineValue # # Parse a DEFINE statement to get defined value @@ -375,133 +308,6 @@ def GetDefineValue(String, Key, CommentCharacter): String = CleanString(String) return String[String.find(Key + ' ') + len(Key + ' ') : ] -## GetSingleValueOfKeyFromLines -# -# Parse multiple strings as below to get value of each definition line -# Key1 = Value1 -# Key2 = Value2 -# The result is saved to Dictionary -# -# @param Lines: The content to be parsed -# @param Dictionary: To store data after parsing -# @param CommentCharacter: Comment char, be used to ignore comment content -# @param KeySplitCharacter: Key split char, between key name and key value. -# Key1 = Value1, '=' is the key split char -# @param ValueSplitFlag: Value split flag, be used to decide if has -# multiple values -# @param ValueSplitCharacter: Value split char, be used to split multiple -# values. Key1 = Value1|Value2, '|' is the value -# split char -# -def GetSingleValueOfKeyFromLines(Lines, Dictionary, CommentCharacter, KeySplitCharacter, \ - ValueSplitFlag, ValueSplitCharacter): - Lines = Lines.split('\n') - Keys = [] - Value = '' - DefineValues = [''] - SpecValues = [''] - - for Line in Lines: - # - # Handle DEFINE and SPEC - # - if Line.find(DataType.TAB_INF_DEFINES_DEFINE + ' ') > -1: - if '' in DefineValues: - DefineValues.remove('') - DefineValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_DEFINE, CommentCharacter)) - continue - if Line.find(DataType.TAB_INF_DEFINES_SPEC + ' ') > -1: - if '' in SpecValues: - SpecValues.remove('') - SpecValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_SPEC, CommentCharacter)) - continue - - # - # Handle Others - # - LineList = Line.split(KeySplitCharacter, 1) - if len(LineList) >= 2: - Key = LineList[0].split() - if len(Key) == 1 and Key[0][0] != CommentCharacter: - # - # Remove comments and white spaces - # - LineList[1] = CleanString(LineList[1], CommentCharacter) - if ValueSplitFlag: - Value = list(map(lambda x: x.strip(), LineList[1].split(ValueSplitCharacter))) - else: - Value = CleanString(LineList[1], CommentCharacter).splitlines() - - if Key[0] in Dictionary: - if Key[0] not in Keys: - Dictionary[Key[0]] = Value - Keys.append(Key[0]) - else: - Dictionary[Key[0]].extend(Value) - else: - Dictionary[DataType.TAB_INF_DEFINES_MACRO][Key[0]] = Value[0] - - if DefineValues == []: - DefineValues = [''] - if SpecValues == []: - SpecValues = [''] - Dictionary[DataType.TAB_INF_DEFINES_DEFINE] = DefineValues - Dictionary[DataType.TAB_INF_DEFINES_SPEC] = SpecValues - - return True - -## The content to be parsed -# -# Do pre-check for a file before it is parsed -# Check $() -# Check [] -# -# @param FileName: Used for error report -# @param FileContent: File content to be parsed -# @param SupSectionTag: Used for error report -# -def PreCheck(FileName, FileContent, SupSectionTag): - if SupSectionTag: - pass - LineNo = 0 - IsFailed = False - NewFileContent = '' - for Line in FileContent.splitlines(): - LineNo = LineNo + 1 - # - # Clean current line - # - Line = CleanString(Line) - # - # Remove commented line - # - if Line.find(DataType.TAB_COMMA_SPLIT) == 0: - Line = '' - # - # Check $() - # - if Line.find('$') > -1: - if Line.find('$(') < 0 or Line.find(')') < 0: - Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=Logger.IS_RAISE_ERROR) - # - # Check [] - # - if Line.find('[') > -1 or Line.find(']') > -1: - # - # Only get one '[' or one ']' - # - if not (Line.find('[') > -1 and Line.find(']') > -1): - Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=Logger.IS_RAISE_ERROR) - # - # Regenerate FileContent - # - NewFileContent = NewFileContent + Line + '\r\n' - - if IsFailed: - Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=Logger.IS_RAISE_ERROR) - - return NewFileContent - ## CheckFileType # # Check if the Filename is including ExtName @@ -666,20 +472,6 @@ def GetHelpTextList(HelpTextClassList): List.extend(HelpText.String.split('\n')) return List -## Get String Array Length -# -# Get String Array Length -# -# @param String: the source string -# -def StringArrayLength(String): - if String.startswith('L"'): - return (len(String) - 3 + 1) * 2 - elif String.startswith('"'): - return (len(String) - 2 + 1) - else: - return len(String.split()) + 1 - ## RemoveDupOption # # Remove Dup Option @@ -707,27 +499,6 @@ def RemoveDupOption(OptionString, Which="/I", Against=None): ValueList.append(Val) return " ".join(OptionList) -## Check if the string is HexDgit -# -# Return true if all characters in the string are digits and there is at -# least one character -# or valid Hexs (started with 0x, following by hexdigit letters) -# , false otherwise. -# @param string: input string -# -def IsHexDigit(Str): - try: - int(Str, 10) - return True - except ValueError: - if len(Str) > 2 and Str.upper().startswith('0X'): - try: - int(Str, 16) - return True - except ValueError: - return False - return False - ## Check if the string is HexDgit and its integer value within limit of UINT32 # # Return true if all characters in the string are digits and there is at diff --git a/BaseTools/Source/Python/UPT/Library/UniClassObject.py b/BaseTools/Source/Python/UPT/Library/UniClassObject.py index 80924a6f2a9e..1e7ca70a76e7 100644 --- a/BaseTools/Source/Python/UPT/Library/UniClassObject.py +++ b/BaseTools/Source/Python/UPT/Library/UniClassObject.py @@ -74,18 +74,6 @@ 'wln':'wa', 'wol':'wo', 'xho':'xh', 'yid':'yi', 'yor':'yo', 'zha':'za', \ 'zho':'zh', 'zul':'zu'} -## Convert a python unicode string to a normal string -# -# Convert a python unicode string to a normal string -# UniToStr(u'I am a string') is 'I am a string' -# -# @param Uni: The python unicode string -# -# @retval: The formatted normal string -# -def UniToStr(Uni): - return repr(Uni)[2:-1] - ## Convert a unicode string to a Hex list # # Convert a unicode string to a Hex list @@ -224,7 +212,6 @@ def FormatUniEntry(StrTokenName, TokenValueList, ContainerFile): class StringDefClassObject(object): def __init__(self, Name = None, Value = None, Referenced = False, Token = None, UseOtherLangDef = ''): self.StringName = '' - self.StringNameByteList = [] self.StringValue = '' self.StringValueByteList = '' self.Token = 0 @@ -234,7 +221,6 @@ def __init__(self, Name = None, Value = None, Referenced = False, Token = None, if Name is not None: self.StringName = Name - self.StringNameByteList = UniToHexList(Name) if Value is not None: self.StringValue = Value self.StringValueByteList = UniToHexList(self.StringValue) @@ -371,15 +357,6 @@ def GetStringObject(self, Item): Language = GetLanguageCode(Language, self.IsCompatibleMode, self.File) self.AddStringToList(Name, Language, Value) - # - # Get include file list and load them - # - def GetIncludeFile(self, Item, Dir = None): - if Dir: - pass - FileName = Item[Item.find(u'!include ') + len(u'!include ') :Item.find(u' ', len(u'!include '))][1:-1] - self.LoadUniFile(FileName) - # # Pre-process before parse .uni file # @@ -941,26 +918,6 @@ def SetStringReferenced(self, Name): Item = self.OrderedStringList[Lang][ItemIndexInList] Item.Referenced = True - # - # Search the string in language definition by Name - # - def FindStringValue(self, Name, Lang): - if Name in self.OrderedStringDict[Lang]: - ItemIndexInList = self.OrderedStringDict[Lang][Name] - return self.OrderedStringList[Lang][ItemIndexInList] - - return None - - # - # Search the string in language definition by Token - # - def FindByToken(self, Token, Lang): - for Item in self.OrderedStringList[Lang]: - if Item.Token == Token: - return Item - - return None - # # Re-order strings and re-generate tokens # @@ -1015,25 +972,3 @@ def ShowMe(self): print(Item) for Member in self.OrderedStringList[Item]: print(str(Member)) - - # - # Read content from '!include' UNI file - # - def ReadIncludeUNIfile(self, FilaPath): - if self.File: - pass - - if not os.path.exists(FilaPath) or not os.path.isfile(FilaPath): - EdkLogger.Error("Unicode File Parser", - ToolError.FILE_NOT_FOUND, - ExtraData=FilaPath) - try: - FileIn = codecs.open(FilaPath, mode='rb', encoding='utf_8').readlines() - except UnicodeError as Xstr: - FileIn = codecs.open(FilaPath, mode='rb', encoding='utf_16').readlines() - except UnicodeError: - FileIn = codecs.open(FilaPath, mode='rb', encoding='utf_16_le').readlines() - except: - EdkLogger.Error("Unicode File Parser", ToolError.FILE_OPEN_FAILURE, ExtraData=FilaPath) - return FileIn - diff --git a/BaseTools/Source/Python/UPT/Library/Xml/XmlRoutines.py b/BaseTools/Source/Python/UPT/Library/Xml/XmlRoutines.py index 94e97fa45c12..b12999a72d02 100644 --- a/BaseTools/Source/Python/UPT/Library/Xml/XmlRoutines.py +++ b/BaseTools/Source/Python/UPT/Library/Xml/XmlRoutines.py @@ -150,33 +150,6 @@ def XmlElement2(Dom, String): except BaseException: return "" - -## Get a single XML element of the current node. -# -# Return a single XML element specified by the current root Dom. -# If the input Dom is not valid, then an empty string is returned. -# -# @param Dom The root XML DOM object. -# -def XmlElementData(Dom): - try: - return Dom.firstChild.data.strip() - except BaseException: - return "" - - -## Get a list of XML elements using XPath style syntax. -# -# Return a list of XML elements from the root Dom specified by XPath String. -# If the input Dom or String is not valid, then an empty list is returned. -# -# @param Dom The root XML DOM object. -# @param String A XPath style path. -# -def XmlElementList(Dom, String): - return list(map(XmlElementData, XmlList(Dom, String))) - - ## Get the XML attribute of the current node. # # Return a single XML attribute named Attribute from the current root Dom. @@ -191,20 +164,6 @@ def XmlAttribute(Dom, Attribute): except BaseException: return '' - -## Get the XML node name of the current node. -# -# Return a single XML node name from the current root Dom. -# If the input Dom is not valid, then an empty string is returned. -# -# @param Dom The root XML DOM object. -# -def XmlNodeName(Dom): - try: - return Dom.nodeName.strip() - except BaseException: - return '' - ## Parse an XML file. # # Parse the input XML file named FileName and return a XML DOM it stands for. From 128c46ef82e00129243bc71e368a168f8679da29 Mon Sep 17 00:00:00 2001 From: Pierre Gondois Date: Wed, 30 Apr 2025 14:45:13 +0200 Subject: [PATCH 15/15] BaseTools: WorkSpace: Remove unnecessary code Running the vulture tool on the WorkSpace folder gave the following report. Remove the unnecessary code. - Workspace/BuildClassObject.py:148: unused method 'IsSimpleTypeArray' (60% confidence) - Workspace/BuildClassObject.py:337: unused method 'SetPcdMode' (60% confidence) - Workspace/BuildClassObject.py:612: unused attribute 'DscSpecification' (60% confidence) - Workspace/DscBuildData.py:451: unused property 'DscSpecification' (60% confidence) - Workspace/DscBuildData.py:1253: unused method 'GetBuildOptionsByPkg' (60% confidence) - Workspace/DscBuildData.py:2064: unused method 'GetStarNum' (60% confidence) - Workspace/DscBuildData.py:3613: unused method 'AddModule' (60% confidence) - Workspace/DscBuildData.py:3650: unused method 'AddPcd' (60% confidence) - Workspace/InfBuildData.py:117: unused attribute '_TailComments' (60% confidence) - Workspace/InfBuildData.py:126: unused attribute '_BinaryModule' (60% confidence) - Workspace/MetaDataTable.py:114: unused method 'IsIntegral' (60% confidence) - Workspace/MetaDataTable.py:218: unused method 'GetFileTimeStamp' (60% confidence) - Workspace/MetaDataTable.py:230: unused method 'SetFileTimeStamp' (60% confidence) - Workspace/MetaDataTable.py:298: unused method 'GetCrossIndex' (60% confidence) - Workspace/MetaFileParser.py:161: unused attribute '_FileDir' (60% confidence) - Workspace/MetaFileParser.py:1187: unused method '_DecodeCODEData' (60% confidence) - Workspace/MetaFileParser.py:1796: unused attribute '_RestofValue' (60% confidence) - Workspace/MetaFileTable.py:31: unused attribute '_NumpyTab' (60% confidence) - Workspace/WorkspaceDatabase.py:136: unused class 'TransformObjectFactory' (60% confidence) - Workspace/WorkspaceDatabase.py:159: unused attribute 'TransformObject' (60% confidence) Signed-off-by: Pierre Gondois --- .../Python/Workspace/BuildClassObject.py | 13 ---- .../Source/Python/Workspace/DscBuildData.py | 66 ------------------- .../Source/Python/Workspace/InfBuildData.py | 2 - .../Source/Python/Workspace/MetaDataTable.py | 46 ------------- .../Source/Python/Workspace/MetaFileParser.py | 6 -- .../Source/Python/Workspace/MetaFileTable.py | 1 - .../Python/Workspace/WorkspaceDatabase.py | 10 --- 7 files changed, 144 deletions(-) diff --git a/BaseTools/Source/Python/Workspace/BuildClassObject.py b/BaseTools/Source/Python/Workspace/BuildClassObject.py index ef873720f455..631e01914019 100644 --- a/BaseTools/Source/Python/Workspace/BuildClassObject.py +++ b/BaseTools/Source/Python/Workspace/BuildClassObject.py @@ -145,11 +145,6 @@ def IsAggregateDatumType(self): return True return False - def IsSimpleTypeArray(self): - if self.IsArray() and self.BaseDatumType in [TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64, "BOOLEAN"]: - return True - return False - @staticmethod def GetPcdMaxSizeWorker(PcdString, MaxSize): if PcdString.startswith("{") and PcdString.endswith("}"): @@ -290,7 +285,6 @@ def __init__(self, StructuredPcdIncludeFile=None, Packages=None, Name=None, Guid self.PackageDecs = Packages self.DefaultStoreName = [default_store] self.DefaultValues = OrderedDict() - self.PcdMode = None self.SkuOverrideValues = OrderedDict() self.StructName = None self.PcdDefineLineNo = 0 @@ -334,9 +328,6 @@ def AddComponentOverrideValue(self,FieldName, Value, ModuleGuid, FileName="", Li self.PcdFiledValueFromDscComponent[ModuleGuid][DimensionAttr][FieldName] = [Value.strip(), FileName, LineNo] return self.PcdFiledValueFromDscComponent[ModuleGuid][DimensionAttr][FieldName] - def SetPcdMode (self, PcdMode): - self.PcdMode = PcdMode - def copy(self, PcdObject): self.TokenCName = PcdObject.TokenCName if PcdObject.TokenCName else self.TokenCName self.TokenSpaceGuidCName = PcdObject.TokenSpaceGuidCName if PcdObject.TokenSpaceGuidCName else PcdObject.TokenSpaceGuidCName @@ -365,7 +356,6 @@ def copy(self, PcdObject): self.StructuredPcdIncludeFile = PcdObject.StructuredPcdIncludeFile if PcdObject.StructuredPcdIncludeFile else self.StructuredPcdIncludeFile self.PackageDecs = PcdObject.PackageDecs if PcdObject.PackageDecs else self.PackageDecs self.DefaultValues = PcdObject.DefaultValues if PcdObject.DefaultValues else self.DefaultValues - self.PcdMode = PcdObject.PcdMode if PcdObject.PcdMode else self.PcdMode self.DefaultValueFromDec = PcdObject.DefaultValueFromDec if PcdObject.DefaultValueFromDec else self.DefaultValueFromDec self.DefaultValueFromDecInfo = PcdObject.DefaultValueFromDecInfo if PcdObject.DefaultValueFromDecInfo else self.DefaultValueFromDecInfo self.SkuOverrideValues = PcdObject.SkuOverrideValues if PcdObject.SkuOverrideValues else self.SkuOverrideValues @@ -383,7 +373,6 @@ def __deepcopy__(self,memo): new_pcd.DefaultValueFromDec = self.DefaultValueFromDec new_pcd.DefaultValueFromDecInfo = self.DefaultValueFromDecInfo - new_pcd.PcdMode = self.PcdMode new_pcd.StructName = self.DatumType new_pcd.PcdDefineLineNo = self.PcdDefineLineNo new_pcd.PkgPath = self.PkgPath @@ -586,7 +575,6 @@ def __init__(self): # @var PlatformName: To store value for PlatformName # @var Guid: To store value for Guid # @var Version: To store value for Version -# @var DscSpecification: To store value for DscSpecification # @var OutputDirectory: To store value for OutputDirectory # @var FlashDefinition: To store value for FlashDefinition # @var BuildNumber: To store value for BuildNumber @@ -609,7 +597,6 @@ def __init__(self): self.PlatformName = '' self.Guid = '' self.Version = '' - self.DscSpecification = '' self.OutputDirectory = '' self.FlashDefinition = '' self.BuildNumber = '' diff --git a/BaseTools/Source/Python/Workspace/DscBuildData.py b/BaseTools/Source/Python/Workspace/DscBuildData.py index 5df184f9c883..50752acb408d 100644 --- a/BaseTools/Source/Python/Workspace/DscBuildData.py +++ b/BaseTools/Source/Python/Workspace/DscBuildData.py @@ -447,16 +447,6 @@ def Version(self): EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No PLATFORM_VERSION", File=self.MetaFile) return self._Version - ## Retrieve platform description file version - @property - def DscSpecification(self): - if self._DscSpecification is None: - if self._Header is None: - self._GetHeaderInfo() - if self._DscSpecification is None: - EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No DSC_SPECIFICATION", File=self.MetaFile) - return self._DscSpecification - ## Retrieve OUTPUT_DIRECTORY @property def OutputDirectory(self): @@ -1250,27 +1240,6 @@ def BuildOptions(self): if ' ' + Option not in self._BuildOptions[CurKey]: self._BuildOptions[CurKey] += ' ' + Option return self._BuildOptions - def GetBuildOptionsByPkg(self, Module, ModuleType): - - local_pkg = os.path.split(Module.LocalPkg())[0] - if self._ModuleTypeOptions is None: - self._ModuleTypeOptions = OrderedDict() - if ModuleType not in self._ModuleTypeOptions: - options = OrderedDict() - self._ModuleTypeOptions[ ModuleType] = options - RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch] - for ToolChainFamily, ToolChain, Option, Dummy1, Dummy2, Dummy3, Dummy4, Dummy5 in RecordList: - if Dummy2 not in (TAB_COMMON,local_pkg.upper(),"EDKII"): - continue - Type = Dummy3 - if Type.upper() == ModuleType.upper(): - Key = (ToolChainFamily, ToolChain) - if Key not in options or not ToolChain.endswith('_FLAGS') or Option.startswith('='): - options[Key] = Option - else: - if ' ' + Option not in options[Key]: - options[Key] += ' ' + Option - return self._ModuleTypeOptions[ModuleType] def GetBuildOptionsByModuleType(self, Edk, ModuleType): if self._ModuleTypeOptions is None: self._ModuleTypeOptions = OrderedDict() @@ -2061,13 +2030,6 @@ def cleanupindex(indexstr): indicator += "->" + FieldName return indicator - def GetStarNum(self,Pcd): - if not Pcd.IsArray(): - return 1 - elif Pcd.IsSimpleTypeArray(): - return len(Pcd.Capacity) - else: - return len(Pcd.Capacity) + 1 def GenerateDefaultValueAssignFunction(self, Pcd): CApp = "// Default value in Dec \n" CApp = CApp + "void Assign_%s_%s_Default_Value(%s *Pcd){\n" % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName, Pcd.BaseDatumType) @@ -3603,20 +3565,6 @@ def _GetDynamicVpdPcd(self, Type): list(map(self.FilterSkuSettings, Pcds.values())) return Pcds - ## Add external modules - # - # The external modules are mostly those listed in FDF file, which don't - # need "build". - # - # @param FilePath The path of module description file - # - def AddModule(self, FilePath): - FilePath = NormPath(FilePath) - if FilePath not in self.Modules: - Module = ModuleBuildClassObject() - Module.MetaFile = FilePath - self.Modules.append(Module) - @property def ToolChainFamily(self): self._ToolChainFamily = TAB_COMPILER_MSFT @@ -3638,20 +3586,6 @@ def ToolChainFamily(self): self._ToolChainFamily = ToolDefinition[TAB_TOD_DEFINES_FAMILY][self._Toolchain] return self._ToolChainFamily - ## Add external PCDs - # - # The external PCDs are mostly those listed in FDF file to specify address - # or offset information. - # - # @param Name Name of the PCD - # @param Guid Token space guid of the PCD - # @param Value Value of the PCD - # - def AddPcd(self, Name, Guid, Value): - if (Name, Guid) not in self.Pcds: - self.Pcds[Name, Guid] = PcdClassObject(Name, Guid, '', '', '', '', '', {}, False, None) - self.Pcds[Name, Guid].DefaultValue = Value - @property def DecPcds(self): if self._DecPcds is None: diff --git a/BaseTools/Source/Python/Workspace/InfBuildData.py b/BaseTools/Source/Python/Workspace/InfBuildData.py index 6339e494ca87..fa047a707d94 100644 --- a/BaseTools/Source/Python/Workspace/InfBuildData.py +++ b/BaseTools/Source/Python/Workspace/InfBuildData.py @@ -114,7 +114,6 @@ def __init__(self, FilePath, RawData, BuildDatabase, Arch=TAB_ARCH_COMMON, Targe self._Target = Target self._Toolchain = Toolchain self._Platform = TAB_COMMON - self._TailComments = None self._BaseName = None self._DxsFile = None self._ModuleType = None @@ -123,7 +122,6 @@ def __init__(self, FilePath, RawData, BuildDatabase, Arch=TAB_ARCH_COMMON, Targe self._Guid = None self._Version = None self._PcdIsDriver = None - self._BinaryModule = None self._Shadow = None self._MakefileName = None self._CustomMakefile = None diff --git a/BaseTools/Source/Python/Workspace/MetaDataTable.py b/BaseTools/Source/Python/Workspace/MetaDataTable.py index a20bd147846b..325d01ae04f9 100644 --- a/BaseTools/Source/Python/Workspace/MetaDataTable.py +++ b/BaseTools/Source/Python/Workspace/MetaDataTable.py @@ -110,14 +110,6 @@ def SetEndFlag(self): Tab = self.Db.GetTable(self.Table) Tab.append(self._DUMMY_) - - def IsIntegral(self): - tab = self.Db.GetTable(self.Table) - Id = min([int(item[0]) for item in tab]) - if Id != -1: - return False - return True - def GetAll(self): tab = self.Db.GetTable(self.Table) return tab @@ -209,27 +201,6 @@ def GetFileType(self, FileId): return None return RecordList[0][0] - ## Get file timestamp of a given file - # - # @param FileId ID of file - # - # @retval timestamp TimeStamp value of given file in the table - # - def GetFileTimeStamp(self, FileId): - QueryScript = "select TimeStamp from %s where ID = '%s'" % (self.Table, FileId) - RecordList = self.Exec(QueryScript) - if len(RecordList) == 0: - return None - return RecordList[0][0] - - ## Update the timestamp of a given file - # - # @param FileId ID of file - # @param TimeStamp Time stamp of file - # - def SetFileTimeStamp(self, FileId, TimeStamp): - self.Exec("update %s set TimeStamp=%s where ID='%s'" % (self.Table, TimeStamp, FileId)) - ## Get list of file with given type # # @param FileType Type value of file @@ -287,20 +258,3 @@ def InitTable(self): Description = Item[0] self.Insert(CrossIndex, Name, Description) EdkLogger.verbose("Initialize table DataModel ... DONE!") - - ## Get CrossIndex - # - # Get a model's cross index from its name - # - # @param ModelName: Name of the model - # @retval CrossIndex: CrossIndex of the model - # - def GetCrossIndex(self, ModelName): - CrossIndex = -1 - SqlCommand = """select CrossIndex from DataModel where name = '""" + ModelName + """'""" - self.Db.execute(SqlCommand) - for Item in self.Db: - CrossIndex = Item[0] - - return CrossIndex - diff --git a/BaseTools/Source/Python/Workspace/MetaFileParser.py b/BaseTools/Source/Python/Workspace/MetaFileParser.py index 73a1654edb30..bed783591ff5 100644 --- a/BaseTools/Source/Python/Workspace/MetaFileParser.py +++ b/BaseTools/Source/Python/Workspace/MetaFileParser.py @@ -158,7 +158,6 @@ def __init__(self, FilePath, FileType, Arch, Table, Owner= -1, From= -1): self._Arch = Arch self._FileType = FileType self.MetaFile = FilePath - self._FileDir = self.MetaFile.Dir self._Defines = {} self._Packages = [] self._FileLocalMacros = {} @@ -1183,9 +1182,6 @@ def _DefaultStoresParser(self): def _LibraryInstanceParser(self): self._ValueList[0] = self._CurrentLine - - def _DecodeCODEData(self): - pass ## PCD sections parser # # [PcdsFixedAtBuild] @@ -1793,8 +1789,6 @@ def __init__(self, FilePath, FileType, Arch, Table): self._include_flag = False self._package_flag = False - self._RestofValue = "" - ## Parser starter def Start(self): Content = '' diff --git a/BaseTools/Source/Python/Workspace/MetaFileTable.py b/BaseTools/Source/Python/Workspace/MetaFileTable.py index bebf9062e8e5..7ff5f2011d0a 100644 --- a/BaseTools/Source/Python/Workspace/MetaFileTable.py +++ b/BaseTools/Source/Python/Workspace/MetaFileTable.py @@ -28,7 +28,6 @@ def __init__(self, DB, MetaFile, FileType, Temporary, FromItem=None): self.MetaFile = MetaFile self.TableName = "" self.DB = DB - self._NumpyTab = None self.CurrentContent = [] DB.TblFile.append([MetaFile.Name, diff --git a/BaseTools/Source/Python/Workspace/WorkspaceDatabase.py b/BaseTools/Source/Python/Workspace/WorkspaceDatabase.py index d955c78b258f..553b149695c0 100644 --- a/BaseTools/Source/Python/Workspace/WorkspaceDatabase.py +++ b/BaseTools/Source/Python/Workspace/WorkspaceDatabase.py @@ -132,15 +132,6 @@ def CreateBuildObject(self,FilePath, Arch, Target, Toolchain): ) return BuildObject - # placeholder for file format conversion - class TransformObjectFactory: - def __init__(self, WorkspaceDb): - self.WorkspaceDb = WorkspaceDb - - # key = FilePath, Arch - def __getitem__(self, Key): - pass - ## Constructor of WorkspaceDatabase # # @param DbPath Path of database file @@ -156,7 +147,6 @@ def __init__(self): # conversion object for build or file format conversion purpose self.BuildObject = WorkspaceDatabase.BuildObjectFactory(self) - self.TransformObject = WorkspaceDatabase.TransformObjectFactory(self) ## Summarize all packages in the database