build system updated

git-svn-id: https://linkchecker.svn.sourceforge.net/svnroot/linkchecker/trunk/linkchecker@991 e7d03fd6-7b0d-0410-9947-9c21f3af8025
This commit is contained in:
calvin 2003-08-11 09:43:47 +00:00
parent 119d8e2ea7
commit 50285ac1d9
4 changed files with 124 additions and 69 deletions

View file

@ -1,2 +0,0 @@
pot-stamp
linkcheck.pot

View file

@ -1,61 +1,27 @@
# we use the scripts in Tools/i18n of the Python 2.3 distribution
PYTHON=python2.2
I18NTOOLS=.
GETTEXT=$(PYTHON) $(I18NTOOLS)/pygettext.py -k i18n._
MSGFMT=$(PYTHON) $(I18NTOOLS)/msgfmt.py
#MSGFMT=msgfmt
MSGMERGE=msgmerge
SOURCES=\
../linkcheck/AnsiColor.py \
../linkcheck/Config.py \
../linkcheck/FileUrlData.py \
../linkcheck/FtpUrlData.py \
../linkcheck/GopherUrlData.py \
../linkcheck/HostCheckingUrlData.py \
../linkcheck/HttpUrlData.py \
../linkcheck/HttpsUrlData.py \
../linkcheck/IgnoredUrlData.py \
../linkcheck/log/__init__.py \
../linkcheck/log/Logger.py \
../linkcheck/log/BlacklistLogger.py \
../linkcheck/log/CSVLogger.py \
../linkcheck/log/ColoredLogger.py \
../linkcheck/log/GMLLogger.py \
../linkcheck/log/HtmlLogger.py \
../linkcheck/log/SQLLogger.py \
../linkcheck/log/StandardLogger.py \
../linkcheck/log/XMLLogger.py \
../linkcheck/MailtoUrlData.py \
../linkcheck/NntpUrlData.py \
../linkcheck/ProxyUrlData.py \
../linkcheck/StringUtil.py \
../linkcheck/TelnetUrlData.py \
../linkcheck/Threader.py \
../linkcheck/UrlData.py \
../linkcheck/__init__.py \
../linkcheck/debug.py \
../linkcheck/i18n.py \
../linkcheck/lc_cgi.py \
../linkcheck/linkname.py \
../linkcheck/linkparse.py \
PYTHON := python2.3
I18NTOOLS := .
GETTEXT := $(PYTHON) $(I18NTOOLS)/pygettext.py -k i18n._
#MSGFMT := $(PYTHON) $(I18NTOOLS)/msgfmt.py
MSGFMT=msgfmt -c
MSGMERGE := msgmerge
SOURCES = $(shell find ../linkcheck -name \*.py) \
../linkchecker
LDIR=../share/locale
LFILE=LC_MESSAGES/$(PACKAGE).mo
PACKAGE=linkcheck
LFILE=LC_MESSAGES/$(PACKAGE).mo
# defined language (add new languages here)
LANGUAGES=de fr nl
MOS=$(patsubst %, %.mo, $(LANGUAGES) )
all: $(MOS)
%.po: pot-stamp
%.po: $(PACAKGE).pot
$(MSGMERGE) $@ $(PACKAGE).pot -o $@
pot-stamp: $(SOURCES)
rm -f $(PACKAGE).pot
$(PACAKGE).pot: $(SOURCES)
$(GETTEXT) --default-domain=$(PACKAGE) --no-location $(SOURCES)
touch pot-stamp
%.mo: %.po
if [ ! -d $(LDIR)/$*/LC_MESSAGES ]; then \
@ -66,7 +32,4 @@ pot-stamp: $(SOURCES)
clean:
for f in $(LANGUAGES); do rm -f $(LDIR)/$$f/$(LFILE); done
distclean: clean
rm -f pot-stamp
.PHONY: all clean distclean
.PHONY: all clean

View file

@ -1,4 +1,4 @@
#!/usr/bin/python
#! /usr/bin/python2.3
# -*- coding: iso-8859-1 -*-
# Written by Martin v. Löwis <loewis@informatik.hu-berlin.de>
@ -82,8 +82,8 @@ def generate():
koffsets += [l1, o1+keystart]
voffsets += [l2, o2+valuestart]
offsets = koffsets + voffsets
output = struct.pack("iiiiiii",
0x950412de, # Magic
output = struct.pack("Iiiiiii",
0x950412deL, # Magic
0, # Version
len(keys), # # of entries
7*4, # start of key index

View file

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/python2.3
# -*- coding: iso-8859-1 -*-
# Originally written by Barry Warsaw <barry@zope.com>
#
@ -12,6 +12,8 @@
# directory (including globbing chars, important for Win32).
# Made docstring fit in 80 chars wide displays using pydoc.
#
# 20030701 calvin@users.sf.net
# added html parser
# for selftesting
try:
@ -155,13 +157,7 @@ Options:
If `inputfile' is -, standard input is read.
""")
import os
import sys
import time
import getopt
import token
import tokenize
import operator
import os, sys, re, time, getopt, token, tokenize, operator, cgi
__version__ = '1.5'
@ -441,6 +437,9 @@ class TokenEater:
self.__curfile = filename
self.__freshmodule = 1
def has_entry (self, msg):
return self.__messages.has_key(msg)
def write(self, fp):
options = self.__options
timestamp = time.ctime(time.time())
@ -499,6 +498,89 @@ class TokenEater:
print >> fp, 'msgstr ""\n'
from sets import Set
import sgmllib
class HtmlGettext (sgmllib.SGMLParser, object):
"""handles all functions by printing the function name and
attributes"""
def __init__ (self, debug=0):
super(HtmlGettext, self).__init__()
self.tag = None
self.translations = Set()
self.data = ""
def unknown_starttag (self, tag, attributes):
attrs = {}
for key,val in attributes:
attrs[key] = val
msgid = attrs.get('i18n:translate', None)
if msgid == '':
if self.tag:
raise Exception, "nested i18n:translate is unsupported"
self.tag = tag
self.data = ""
elif msgid is not None:
if self.tag:
raise Exception, "nested i18n:translate is unsupported"
if msgid.startswith("string:"):
self.translations.add(msgid[7:].replace(';;', ';'))
else:
print >>sys.stderr, "tag <%s> has unsupported dynamic msgid %s" % (tag, `msgid`)
elif self.tag:
# nested tag to translate
self.data += "<%s"%tag
for key,val in attrs.items():
self.data += " %s=\"%s\"" % (key, cgi.escape(val, True))
self.data += ">"
argument = attrs.get('i18n:attributes', None)
if argument is not None:
for name, msgid in get_attribute_list(argument):
self.translations.add(msgid)
def unknown_endtag (self, tag):
if tag==self.tag:
self.translations.add(self.data)
self.tag = None
self.data = ""
elif self.tag:
self.data += "</%s>"%tag
def handle_data (self, data):
if self.tag:
self.data += data
def handle_charref (self, ref):
self.data += '&#%s;' % ref
def handle_entityref (self, ref):
self.data += '&%s;' % ref
def get_attribute_list (argument):
# Break up the list of attribute settings
commandArgs = []
# We only want to match semi-colons that are not escaped
argumentSplitter = re.compile('(?<!;);(?!;)')
for attributeStmt in argumentSplitter.split(argument):
# remove any leading space and un-escape any semi-colons
attributeStmt = attributeStmt.lstrip().replace(';;', ';')
# Break each attributeStmt into name and expression
stmtBits = attributeStmt.split(' ')
if len(stmtBits) < 2:
# Error, badly formed attributes command
print >>sys.stderr, "Badly formed attributes command '%s'. Attributes commands must be of the form: 'name expression[;name expression]'" % argument
attName = stmtBits[0]
attExpr = " ".join(stmtBits[1:])
commandArgs.append((attName, attExpr))
return commandArgs
def main():
global default_keywords
@ -618,6 +700,7 @@ def main():
expanded.extend(getFilesForName(arg))
args = expanded
html_translations = Set()
# slurp through all the files
eater = TokenEater(options)
for filename in args:
@ -629,15 +712,21 @@ def main():
else:
if options.verbose:
print _('Working on %s') % filename
fp = open(filename)
fp = file(filename)
closep = 1
try:
eater.set_filename(filename)
try:
tokenize.tokenize(fp.readline, eater)
except tokenize.TokenError, e:
print >> sys.stderr, '%s: %s, line %d, column %d' % (
e[0], filename, e[1][0], e[1][1])
if filename.endswith('.html'):
p = HtmlGettext()
p.feed(fp.read())
p.close()
html_translations.update(p.translations)
else:
eater.set_filename(filename)
try:
tokenize.tokenize(fp.readline, eater)
except tokenize.TokenError, e:
print >> sys.stderr, '%s: %s, line %d, column %d' % (
e[0], filename, e[1][0], e[1][1])
finally:
if closep:
fp.close()
@ -649,14 +738,19 @@ def main():
else:
if options.outpath:
options.outfile = os.path.join(options.outpath, options.outfile)
fp = open(options.outfile, 'w')
fp = file(options.outfile, 'w')
closep = 1
try:
eater.write(fp)
msgs = [msg for msg in html_translations if not eater.has_entry(msg)]
for msg in msgs:
print >> fp, 'msgid', normalize(msg)
print >> fp, 'msgstr ""\n'
finally:
if closep:
fp.close()
if __name__ == '__main__':
main()