X-Git-Url: http://g0dil.de/git?a=blobdiff_plain;f=senfscons%2FDoxygen.py;h=e5b7b79d2f153b09551c5ef5f6837aec74804af0;hb=05fab45b5f96c457d3c6b359f66ce1506c6ad510;hp=48cec6e40acde98489ce9330422f96ccef192dc8;hpb=93db392bf3739914fbf297f4aefa396a2d304d92;p=senf.git diff --git a/senfscons/Doxygen.py b/senfscons/Doxygen.py index 48cec6e..e5b7b79 100644 --- a/senfscons/Doxygen.py +++ b/senfscons/Doxygen.py @@ -1,3 +1,5 @@ +# The Doxygen builder is based on the Doxygen builder from: +# # Astxx, the Asterisk C++ API and Utility Library. # Copyright (C) 2005, 2006 Matthew A. Nicholson # Copyright (C) 2006 Tim Blechmann @@ -15,111 +17,281 @@ # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -import os, sys, traceback +# The Modifications are Copyright (C) 2006,2007 +# Fraunhofer Institut fuer offene Kommunikationssysteme (FOKUS) +# Kompetenzzentrum fuer Satelitenkommunikation (SatCom) +# Stefan Bund + +## \file +# \brief Doxygen builder + +## \package senfscons.Doxygen +# \brief Doxygen Documentation Builder +# +# This builder will invoke \c doxygen to build software +# documentation. The doxygen builder only takes the name of the +# doxyfile as it's source file. The builder parses that doxygen +# configuration file. +# +# The builder will automatically find all sources on which the +# documentation depends. This includes +# \li the source code files (as selected by the \c RECURSIVE, \c +# FILE_PATTERNS, \c INPUT and \c EXCLUDE_PATTERNS doxygen +# directives +# \li the \c HTML_HEADER and \c HTML_FOOTER +# \li all referenced \c TAGFILES +# \li the \c INPUT_FILTER +# \li all included doxyfiles (via \c @INCLUDE) +# +# The builder will emit a list of targets built by doxygen. This +# depends on the types of documentation built. +# +# The builder will also generate additional commands to resolve +# cross-references to other module documentations. This is based on +# the \c TAGFILES used. Tagfiles built in the same project in other +# modules are automatically found and the links will be resolved +# correctly. To resolve links from external tagfiles, you may specify +# tagfilename_DOXY_URL as a construction environment +# variable to specify the path to resolve references from the given +# tagfile to. tagfilename is the uppercased basename of the +# tagfile used. +# +# \par Construction Envrionment Variables: +# +# +# +#
\c DOXYGENdoxygen command, defaults to \c doxygen
tag_DOXY_URLexternal tagfile resolve URL
+# +# \ingroup builder + +# I (g0dil@berlios.de) have been fighting 4 problems in this +# implementation: +# - A Directory target will *not* call any source scanners +# - A Directory target will interpret the directory contents as +# sources not targets. This means, that if a command creates that +# directory plus contents, the target will never be up-to-date +# (since the directory contents will change with every call of +# scons) +# - Theres a bug in SCons which will produce an error message for +# directory targets if dir.sources is not set explicitly +# - the first argument to env.Clean() must be the command line target, +# with which the scons was invoked. This does not help to add +# aditional files or directories to be cleaned if you don't know +# that target (it's not possible to say 'if you clean this file, +# also clean that one' hich is, what I had expected env.Clean to +# do). +# +# Together, these problems have produced several difficulties. I have +# solved them by +# - Adding an (empty) stamp file as a (file) target. This target will +# cause source scanners to be invoked +# - Adding the documentation directory as a target (so it will be +# cleaned up which env.Clean doesn't help me to do), but *only* if +# scons is called to with the -c option +# - Setting dir.sources to the known source-list to silence the error +# message whenever a directory is added as a target +# +# You will find all this in the DoxyEmitter + +import os, sys, traceback, string import os.path -import glob +import glob, re +import SCons.Action from fnmatch import fnmatch -def DoxyfileParse(file_contents, dir, data = None): - """ - Parse a Doxygen source file and return a dictionary of all the values. - Values will be strings and lists of strings. - """ - try: - if data is None : data = {} - - import shlex - lex = shlex.shlex(instream = file_contents, posix = True) - lex.wordchars += "*+./-:" - lex.whitespace = lex.whitespace.replace("\n", "") - lex.escape = "" - - lineno = lex.lineno - token = lex.get_token() - key = None - last_token = "" - key_token = True - next_key = False - new_data = True - - def append_data(data, key, new_data, token): - if new_data or len(data[key]) == 0: - data[key].append(token) - else: - data[key][-1] += token - - while token: - if token in ['\n']: - if last_token not in ['\\']: - key_token = True - elif token in ['\\']: - pass - elif key_token: - if key == '@' : key += token - else : key = token - if token != '@' : key_token = False +class DoxyfileLexer: + + def __init__(self,stream): + self._stream = stream + self._buffer = "" + self.lineno = 0 + self._eof = False + self._fillbuffer() + + VARIABLE_RE = re.compile("[@A-Z_]+") + OPERATOR_RE = re.compile("\\+?=") + VALUE_RE = re.compile("\\S+") + + def _readline(self): + if self._eof: + self._buffer = "" + return + self._buffer = self._stream.readline() + if not self._buffer: + self._eof = True + return + self._buffer = self._buffer.strip() + self.lineno += 1 + + def _skip(self, nchars=0): + self._buffer = self._buffer[nchars:].strip() + while self._buffer[:1] == '\\' and not self.eof(): + self._readline() + if self._buffer[:1] == '#': + self._buffer="" + + def _fillbuffer(self): + while not self._buffer and not self.eof(): + self._readline() + self._skip() + + def _token(self, re, read=False): + if not self._buffer and read: + self._fillbuffer() + if not self._buffer: + return "" + m = re.match(self._buffer) + if m: + v = self._buffer[:m.end()] + self._skip(m.end()) + return v + else: + raise ValueError,"Invalid input" + + def var(self): return self._token(self.VARIABLE_RE, True) + def op(self): return self._token(self.OPERATOR_RE) + + def next(self): + if not self._buffer: + raise StopIteration + if self._buffer[0] == '"': + return self._qstr() + m = self.VALUE_RE.match(self._buffer) + if m: + v = self._buffer[:m.end()] + self._skip(m.end()) + return v + else: + raise ValueError + + def __iter__(self): + return self + + QSKIP_RE = re.compile("[^\\\"]+") + + def _qstr(self): + self._buffer = self._buffer[1:] + v = "" + while self._buffer: + m = self.QSKIP_RE.match(self._buffer) + if m: + v += self._buffer[:m.end()] + self._buffer = self._buffer[m.end():] + if self._buffer[:1] == '"': + self._skip(1) + return v + if self._buffer[:1] == '\\' and len(self._buffer)>1: + v += self._buffer[1] + self._buffer = self._buffer[2:] + else: + raise ValueError,"Unexpected charachter in string" + raise ValueError,"Unterminated string" + + def eof(self): + return self._eof + +class DoxyfileParser: + + ENVVAR_RE = re.compile(r"\$\(([0-9A-Za-z_-]+)\)") + + def __init__(self, path, env, include_path=None, items = None): + self._env = env + self._include_path = include_path or [] + self._lexer = DoxyfileLexer(file(path)) + self._dir = os.path.split(path)[0] + self._items = items or {} + + def parse(self): + while True: + var = self._lexer.var() + if not var: break; + op = self._lexer.op() + value = [ self._envsub(v) for v in self._lexer ] + if not value: + raise ValueError,"Missing value in assignment" + if var[0] == '@': + self._meta(var,op,value) + elif op == '=': + self._items[var] = value else: - if token == "+=" or (token == "=" and key == "@INCLUDE"): - if not data.has_key(key): - data[key] = list() - elif token == "=": - data[key] = list() - else: - append_data( data, key, new_data, token ) - new_data = True - if key == '@INCLUDE': - inc = os.path.join(dir,data['@INCLUDE'][-1]) - if os.path.exists(inc) : - DoxyfileParse(open(inc).read(),dir,data) - - last_token = token - token = lex.get_token() - - if last_token == '\\' and token != '\n': - new_data = False - append_data( data, key, new_data, '\\' ) - - # compress lists of len 1 into single strings - for (k, v) in data.items(): - if len(v) == 0: - data.pop(k) - - # items in the following list will be kept as lists and not converted to strings - if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS", "@INCLUDE", "TAGFILES"]: - continue - - if len(v) == 1: - data[k] = v[0] - - - return data - except: + self._items.setdefault(var,[]).extend(value) + + def _envsub(self,value): + return self.ENVVAR_RE.sub(lambda m, env=self._env : str(env.get(m.group(1),"")), value) + + def _meta(self, cmd, op, value): + m = '_'+cmd[1:] + try: + m = getattr(self,m) + except AttributeError: + raise ValueError,'Unknown meta command ' + cmd + m(op,value) + + def _INCLUDE(self, op, value): + if len(value) != 1: + raise ValueError,"Invalid argument to @INCLUDE" + + for d in [ self._dir ] + self._include_path: + p = os.path.join(d,value[0]) + if os.path.exists(p): + self._items.setdefault('@INCLDUE',[]).append(p) + parser = DoxyfileParser(p, self._env, self._include_path, self._items) + parser.parse() + return + + raise ValueError,"@INCLUDE file not found" + + def _INCLUDE_PATH(self, op, value): + self._include_path.extend(value) + + def items(self): + return self._items + +def DoxyfileParse(env,file): + # We don't parse source files which do not contain the word 'doxyfile'. SCons will + # pass other dependencies to DoxyfileParse which are not doxyfiles ... grmpf ... + if not 'doxyfile' in file.lower(): return {} + ENV = {} + ENV.update(env.get("ENV",{})) + ENV['TOPDIR'] = env.Dir('#').abspath + parser = DoxyfileParser(file,ENV) + try: + parser.parse() + except ValueError, v: + print "WARNING: Error while parsing doxygen configuration '%s': %s" % (str(file),str(v)) + return {} + data = parser.items() + for k,v in data.items(): + if not v : del data[k] + elif k in ("INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS", "@INCLUDE", "TAGFILES") : continue + elif len(v)==1 : data[k] = v[0] + return data def DoxySourceScan(node, env, path): """ Doxygen Doxyfile source scanner. This should scan the Doxygen file and add any files used to generate docs to the list of source files. """ - dep_add_keys = [ - '@INCLUDE', 'HTML_HEADER', 'HTML_FOOTER', 'TAGFILES' - ] - - default_file_patterns = [ + dep_add_keys = ( + '@INCLUDE', 'HTML_HEADER', 'HTML_FOOTER', 'TAGFILES', 'INPUT_FILTER' + ) + + default_file_patterns = ( '*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx', '*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++', '*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm', '*.py', - ] + ) - default_exclude_patterns = [ + default_exclude_patterns = ( '*~', - ] + ) sources = [] - basedir = str(node.dir) - data = DoxyfileParse(node.get_contents(), basedir) - recursive = ( data.get("RECURSIVE", "NO") == "YES" ) + basedir = node.dir.abspath + data = DoxyfileParse(env, node.abspath) + recursive = data.get("RECURSIVE", "NO").upper()=="YES" file_patterns = data.get("FILE_PATTERNS", default_file_patterns) exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns) @@ -133,9 +305,9 @@ def DoxySourceScan(node, env, path): for root, dirs, files in entries: for f in files: filename = os.path.normpath(os.path.join(root, f)) - if ( reduce(lambda x, y: x or fnmatch(filename, y), - file_patterns, False) - and not reduce(lambda x, y: x or fnmatch(filename, y), + if ( reduce(lambda x, y: x or fnmatch(f, y), + file_patterns, False) + and not reduce(lambda x, y: x or fnmatch(f, y), exclude_patterns, False) ): sources.append(filename) @@ -157,31 +329,40 @@ def DoxyEmitter(source, target, env): """Doxygen Doxyfile emitter""" # possible output formats and their default values and output locations output_formats = { - "HTML": ("YES", "html"), - "LATEX": ("YES", "latex"), - "RTF": ("NO", "rtf"), - "MAN": ("YES", "man"), - "XML": ("NO", "xml"), + "HTML" : ("YES", "html"), + "LATEX" : ("YES", "latex"), + "RTF" : ("NO", "rtf"), + "MAN" : ("YES", "man"), + "XML" : ("NO", "xml"), } - data = DoxyfileParse(source[0].get_contents(), str(source[0].dir)) + data = DoxyfileParse(env, source[0].abspath) targets = [] - out_dir = data.get("OUTPUT_DIRECTORY", ".") + if data.has_key("OUTPUT_DIRECTORY"): + out_dir = data["OUTPUT_DIRECTORY"] + dir = env.Dir( os.path.join(source[0].dir.abspath, out_dir) ) + dir.sources = source + if env.GetOption('clean'): targets.append(dir) + else: + out_dir = '.' # add our output locations - for (k, v) in output_formats.items(): - if data.get("GENERATE_" + k, v[0]) == "YES": - # Grmpf ... need to use a File object here. The problem is, that - # Dir.scan() is implemented to just return the directory entries - # and does *not* invoke the source-file scanners .. ARGH !! - dir = env.Dir( os.path.join(str(source[0].dir), out_dir, data.get(k + "_OUTPUT", v[1])) ) - node = env.File( os.path.join(str(dir), ".stamp" ) ) - env.Clean(node, dir) - targets.append( node ) - - if data.has_key("GENERATE_TAGFILE"): - targets.append(env.File( os.path.join(str(source[0].dir), data["GENERATE_TAGFILE"]) )) + html_dir = None + for (k, v) in output_formats.iteritems(): + if data.get("GENERATE_" + k, v[0]).upper() == "YES": + dir = env.Dir( os.path.join(source[0].dir.abspath, out_dir, data.get(k + "_OUTPUT", v[1])) ) + if k == "HTML" : html_dir = dir + dir.sources = source + node = env.File( os.path.join(dir.abspath, k.lower()+".stamp" ) ) + targets.append(node) + if env.GetOption('clean'): targets.append(dir) + + if data.has_key("GENERATE_TAGFILE") and html_dir: + targets.append(env.File( os.path.join(source[0].dir.abspath, data["GENERATE_TAGFILE"]) )) + + if data.get("SEARCHENGINE","NO").upper() == "YES": + targets.append(env.File( os.path.join(html_dir.abspath, "search.idx") )) # don't clobber targets for node in targets: @@ -189,11 +370,11 @@ def DoxyEmitter(source, target, env): return (targets, source) -def doxyNodeHtmlDir(node): +def doxyNodeHtmlDir(env,node): if not node.sources : return None - data = DoxyfileParse(node.sources[0].get_contents(), str(node.sources[0].dir)) - if data.get("GENERATE_HTML",'YES') != 'YES' : return None - return os.path.normpath(os.path.join( str(node.sources[0].dir), + data = DoxyfileParse(env, node.sources[0].abspath) + if data.get("GENERATE_HTML",'YES').upper() != 'YES' : return None + return os.path.normpath(os.path.join( node.sources[0].dir.abspath, data.get("OUTPUT_DIRECTORY","."), data.get("HTML_OUTPUT","html") )) @@ -211,9 +392,10 @@ def relpath(source, target): def DoxyGenerator(source, target, env, for_signature): - data = DoxyfileParse(source[0].get_contents(), str(source[0].dir)) + data = DoxyfileParse(env, source[0].abspath) - actions = [ env.Action("cd ${SOURCE.dir} && ${DOXYGEN} ${SOURCE.file}") ] + actions = [ SCons.Action.Action("cd ${SOURCE.dir} && TOPDIR=%s ${DOXYGEN} ${SOURCE.file}" + % (relpath(source[0].dir.abspath, env.Dir('#').abspath),)) ] # This will add automatic 'installdox' calls. # @@ -233,9 +415,9 @@ def DoxyGenerator(source, target, env, for_signature): # If for any referenced tagfile no url can be found, 'installdox' # will *not* be called and a warning about the missing url is # generated. - - if data.get('GENERATE_HTML','YES') == "YES": - output_dir = os.path.normpath(os.path.join( str(source[0].dir), + + if data.get('GENERATE_HTML','YES').upper() == "YES": + output_dir = os.path.normpath(os.path.join( source[0].dir.abspath, data.get("OUTPUT_DIRECTORY","."), data.get("HTML_OUTPUT","html") )) args = [] @@ -243,24 +425,25 @@ def DoxyGenerator(source, target, env, for_signature): url = env.get(os.path.splitext(os.path.basename(tagfile))[0].upper()+"_DOXY_URL", None) if not url: url = doxyNodeHtmlDir( - env.File(os.path.normpath(os.path.join( str(source[0].dir), tagfile )))) + env, + env.File(os.path.normpath(os.path.join(str(source[0].dir), tagfile)))) if url : url = relpath(output_dir, url) if not url: - print "WARNING:",str(node.sources[0]),": missing tagfile url for",tagfile + print "WARNING:",source[0].abspath, ": missing tagfile url for", tagfile args = None if args is not None and url: args.append("-l %s@%s" % ( os.path.basename(tagfile), url )) if args: - actions.append(env.Action('cd %s && ./installdox %s' % (output_dir, " ".join(args)))) - - actions.append(env.Action([ "touch $TARGETS" ])) + actions.append(SCons.Action.Action('cd %s && ./installdox %s' % (output_dir, " ".join(args)))) + + actions.append(SCons.Action.Action([ "touch $TARGETS" ])) return actions def generate(env): """ Add builders and construction variables for the - Doxygen tool. This is currently for Doxygen 1.4.6. + Doxygen tool. This is currently for Doxygen 1.4.6. """ doxyfile_scanner = env.Scanner( DoxySourceScan,