HowTos/NewPacket: More stuff
[senf.git] / senfscons / Doxygen.py
index 8cd35dd..6ef69d1 100644 (file)
@@ -18,8 +18,8 @@
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
 # The Modifications are Copyright (C) 2006,2007
-# Fraunhofer Institut fuer offene Kommunikationssysteme (FOKUS)
-# Kompetenzzentrum fuer Satelitenkommunikation (SatCom)
+# Fraunhofer Institute for Open Communication Systems (FOKUS) 
+# Competence Center NETwork research (NET), St. Augustin, GERMANY 
 #     Stefan Bund <g0dil@berlios.de>
 
 ## \file
 #
 # You will find all this in the DoxyEmitter
 
-import os, sys, traceback
+import os, sys, traceback, string
 import os.path
 import glob, re
+import SCons.Action
 from fnmatch import fnmatch
 
-EnvVar = re.compile(r"\$\(([0-9A-Za-z_-]+)\)")
+class DoxyfileLexer:
+
+   def __init__(self,stream):
+      self._stream = stream
+      self._buffer = ""
+      self.lineno = 0
+      self._eof = False
+      self._fillbuffer()
+      
+   VARIABLE_RE = re.compile("[@A-Z_]+")
+   OPERATOR_RE = re.compile("\\+?=")
+   VALUE_RE = re.compile("\\S+")
+
+   def _readline(self):
+      if self._eof:
+         self._buffer = ""
+         return
+      self._buffer = self._stream.readline()
+      if not self._buffer:
+         self._eof = True
+         return
+      self._buffer = self._buffer.strip()
+      self.lineno += 1
+
+   def _skip(self, nchars=0):
+      self._buffer = self._buffer[nchars:].strip()
+      while self._buffer[:1] == '\\' and not self.eof():
+         self._readline()
+      if self._buffer[:1] == '#':
+         self._buffer=""
+      
+   def _fillbuffer(self):
+      while not self._buffer and not self.eof():
+         self._readline()
+         self._skip()
+
+   def _token(self, re, read=False):
+      if not self._buffer and read:
+         self._fillbuffer()
+      if not self._buffer:
+         return ""
+      m = re.match(self._buffer)
+      if m:
+         v = self._buffer[:m.end()]
+         self._skip(m.end())
+         return v
+      else:
+         raise ValueError,"Invalid input"
+
+   def var(self): return self._token(self.VARIABLE_RE, True)
+   def op(self): return self._token(self.OPERATOR_RE)
+
+   def next(self):
+      if not self._buffer:
+         raise StopIteration
+      if self._buffer[0] == '"':
+         return self._qstr()
+      m = self.VALUE_RE.match(self._buffer)
+      if m:
+         v = self._buffer[:m.end()]
+         self._skip(m.end())
+         return v
+      else:
+         raise ValueError
+
+   def __iter__(self):
+      return self
+
+   QSKIP_RE = re.compile("[^\\\"]+")
+   
+   def _qstr(self):
+      self._buffer = self._buffer[1:]
+      v = ""
+      while self._buffer:
+          m = self.QSKIP_RE.match(self._buffer)
+          if m:
+             v += self._buffer[:m.end()]
+             self._buffer = self._buffer[m.end():]
+          if self._buffer[:1] == '"':
+             self._skip(1)
+             return v
+          if self._buffer[:1] == '\\' and len(self._buffer)>1:
+             v += self._buffer[1]
+             self._buffer = self._buffer[2:]
+          else:
+             raise ValueError,"Unexpected charachter in string"
+      raise ValueError,"Unterminated string"
+
+   def eof(self):
+      return self._eof
+
+class DoxyfileParser:
+
+   ENVVAR_RE = re.compile(r"\$\(([0-9A-Za-z_-]+)\)")
+
+   def __init__(self, path, env, include_path=None, items = None):
+      self._env = env
+      self._include_path = include_path or []
+      self._lexer = DoxyfileLexer(file(path))
+      self._dir = os.path.split(path)[0]
+      self._items = items or {}
+
+   def parse(self):
+      while True:
+         var = self._lexer.var()
+         if not var: break;
+         op = self._lexer.op()
+         value = [ self._envsub(v) for v in self._lexer ]
+         if not value:
+            raise ValueError,"Missing value in assignment"
+         if var[0] == '@':
+            self._meta(var,op,value)
+         elif op == '=':
+            self._items[var] = value
+         else:
+            self._items.setdefault(var,[]).extend(value)
+
+   def _envsub(self,value):
+      return self.ENVVAR_RE.sub(lambda m, env=self._env : str(env.get(m.group(1),"")), value)
+
+   def _meta(self, cmd, op, value):
+      m = '_'+cmd[1:]
+      try:
+         m = getattr(self,m)
+      except AttributeError:
+         raise ValueError,'Unknown meta command ' + cmd
+      m(op,value)
+
+   def _INCLUDE(self, op, value):
+      if len(value) != 1:
+         raise ValueError,"Invalid argument to @INCLUDE"
+      
+      for d in [ self._dir ] + self._include_path:
+         p = os.path.join(d,value[0])
+         if os.path.exists(p):
+            self._items.setdefault('@INCLUDE',[]).append(p)
+            parser = DoxyfileParser(p, self._env, self._include_path, self._items)
+            parser.parse()
+            return
+
+      raise ValueError,"@INCLUDE file not found"
+
+   def _INCLUDE_PATH(self, op, value):
+      self._include_path.extend(value)
+
+   def items(self):
+      return self._items
 
 def DoxyfileParse(env,file):
+   # We don't parse source files which do not contain the word 'doxyfile'. SCons will
+   # pass other dependencies to DoxyfileParse which are not doxyfiles ... grmpf ...
+   if not 'doxyfile' in file.lower():
+      return {}
    ENV = {}
    ENV.update(env.get("ENV",{}))
    ENV['TOPDIR'] = env.Dir('#').abspath
-   data = DoxyfileParse_(file,{},ENV)
+   parser = DoxyfileParser(file,ENV)
+   try:
+      parser.parse()
+   except ValueError, v:
+      print "WARNING: Error while parsing doxygen configuration '%s': %s" % (str(file),str(v))
+      return {}
+   data = parser.items()
    for k,v in data.items():
       if not v : del data[k]
       elif k in ("INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS", "@INCLUDE", "TAGFILES") : continue
       elif len(v)==1 : data[k] = v[0]
    return data
 
-def DoxyfileParse_(file, data, ENV):
-   """
-   Parse a Doxygen source file and return a dictionary of all the values.
-   Values will be strings and lists of strings.
-   """
-   try:
-      dir = os.path.dirname(file)
-
-      import shlex
-      lex = shlex.shlex(instream=open(file), posix=True)
-      lex.wordchars += "*+=./-:@~$()"
-      lex.whitespace = lex.whitespace.replace("\n", "")
-      lex.escape = "\\"
-
-      lineno = lex.lineno
-      token = lex.get_token()
-      key = None
-      last_token = ""
-      key_token = True
-      next_key = False
-      new_data = True
-
-      def append_data(data, key, new_data, token):
-         if new_data or len(data[key]) == 0:
-            data[key].append(token)
-         else:
-            data[key][-1] += token
-
-      while token:
-         if token=='\n':
-            if last_token!='\\':
-               key_token = True
-         elif token=='\\':
-            pass
-         elif key_token:
-            key = token
-            key_token = False
-         else:
-            if token=="+=" or (token=="=" and key=="@INCLUDE"):
-               if not data.has_key(key):
-                  data[key] = []
-            elif token == "=":
-               data[key] = []
-            else:
-               token = EnvVar.sub(lambda m,ENV=ENV: str(ENV.get(m.group(1),"")),token)
-               append_data(data, key, new_data, token)
-               new_data = True
-               if key=='@INCLUDE':
-                  inc = os.path.join(dir,data['@INCLUDE'][-1])
-                  if os.path.exists(inc) :
-                     DoxyfileParse_(inc,data,ENV)
-
-         last_token = token
-         token = lex.get_token()
-
-         if last_token=='\\' and token!='\n':
-            new_data = False
-            append_data(data, key, new_data, '\\')
-
-      return data
-   except:
-      return {}
-
 def DoxySourceScan(node, env, path):
    """
    Doxygen Doxyfile source scanner.  This should scan the Doxygen file and add
@@ -211,10 +305,8 @@ def DoxySourceScan(node, env, path):
          for root, dirs, files in entries:
             for f in files:
                filename = os.path.normpath(os.path.join(root, f))
-               if ( reduce(lambda x, y: x or fnmatch(f, y),
-                           file_patterns, False)
-                    and not reduce(lambda x, y: x or fnmatch(f, y),
-                                   exclude_patterns, False) ):
+               if (         reduce(lambda x, y: x or fnmatch(f, y), file_patterns,    False)
+                    and not reduce(lambda x, y: x or fnmatch(f, y), exclude_patterns, False) ):
                   sources.append(filename)
 
    for key in dep_add_keys:
@@ -249,22 +341,29 @@ def DoxyEmitter(source, target, env):
       out_dir = data["OUTPUT_DIRECTORY"]
       dir = env.Dir( os.path.join(source[0].dir.abspath, out_dir) )
       dir.sources = source
-      if env.GetOption('clean'): targets.append(dir)
+      if env.GetOption('clean'):
+         targets.append(dir)
+         return (targets, source)
    else:
       out_dir = '.'
 
    # add our output locations
+   html_dir = None
    for (k, v) in output_formats.iteritems():
       if data.get("GENERATE_" + k, v[0]).upper() == "YES":
          dir = env.Dir( os.path.join(source[0].dir.abspath, out_dir, data.get(k + "_OUTPUT", v[1])) )
+         if k == "HTML" : html_dir = dir
          dir.sources = source
          node = env.File( os.path.join(dir.abspath, k.lower()+".stamp" ) )
          targets.append(node)
          if env.GetOption('clean'): targets.append(dir)
 
-   if data.has_key("GENERATE_TAGFILE"):
+   if data.has_key("GENERATE_TAGFILE") and html_dir:
       targets.append(env.File( os.path.join(source[0].dir.abspath, data["GENERATE_TAGFILE"]) ))
 
+   if data.get("SEARCHENGINE","NO").upper() == "YES":
+      targets.append(env.File( os.path.join(html_dir.abspath, "search.idx") ))
+
    # don't clobber targets
    for node in targets:
       env.Precious(node)
@@ -295,8 +394,8 @@ def DoxyGenerator(source, target, env, for_signature):
 
    data = DoxyfileParse(env, source[0].abspath)
 
-   actions = [ env.Action("cd ${SOURCE.dir}  && TOPDIR=%s ${DOXYGEN} ${SOURCE.file}"
-                          % (relpath(source[0].dir.abspath, env.Dir('#').abspath),)) ]
+   actions = [ SCons.Action.Action("cd ${SOURCE.dir}  && TOPDIR=%s ${DOXYGEN} ${SOURCE.file}"
+                                   % (relpath(source[0].dir.abspath, env.Dir('#').abspath),)) ]
 
    # This will add automatic 'installdox' calls.
    #
@@ -335,9 +434,9 @@ def DoxyGenerator(source, target, env, for_signature):
          if args is not None and url:
             args.append("-l %s@%s" % ( os.path.basename(tagfile), url ))
       if args:
-         actions.append(env.Action('cd %s && ./installdox %s' % (output_dir, " ".join(args))))
+         actions.append(SCons.Action.Action('cd %s && ./installdox %s' % (output_dir, " ".join(args))))
 
-   actions.append(env.Action([ "touch $TARGETS" ]))
+   actions.append(SCons.Action.Action([ "touch $TARGETS" ]))
 
    return actions
 
@@ -365,7 +464,7 @@ def generate(env):
       'Doxygen': doxyfile_builder,
    })
 
-   env.AppendUnique(
+   env.SetDefault(
       DOXYGEN = 'doxygen',
    )