##// END OF EJS Templates
move % out of translatable strings...
Martin Geisler -
r6913:580d5e6b default
parent child Browse files
Show More
@@ -1,337 +1,337 b''
1 # convcmd - convert extension commands definition
1 # convcmd - convert extension commands definition
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from common import NoRepo, MissingTool, SKIPREV, mapfile
8 from common import NoRepo, MissingTool, SKIPREV, mapfile
9 from cvs import convert_cvs
9 from cvs import convert_cvs
10 from darcs import darcs_source
10 from darcs import darcs_source
11 from git import convert_git
11 from git import convert_git
12 from hg import mercurial_source, mercurial_sink
12 from hg import mercurial_source, mercurial_sink
13 from subversion import debugsvnlog, svn_source, svn_sink
13 from subversion import debugsvnlog, svn_source, svn_sink
14 from monotone import monotone_source
14 from monotone import monotone_source
15 from gnuarch import gnuarch_source
15 from gnuarch import gnuarch_source
16 import filemap
16 import filemap
17
17
18 import os, shutil
18 import os, shutil
19 from mercurial import hg, util
19 from mercurial import hg, util
20 from mercurial.i18n import _
20 from mercurial.i18n import _
21
21
22 orig_encoding = 'ascii'
22 orig_encoding = 'ascii'
23
23
24 def recode(s):
24 def recode(s):
25 if isinstance(s, unicode):
25 if isinstance(s, unicode):
26 return s.encode(orig_encoding, 'replace')
26 return s.encode(orig_encoding, 'replace')
27 else:
27 else:
28 return s.decode('utf-8').encode(orig_encoding, 'replace')
28 return s.decode('utf-8').encode(orig_encoding, 'replace')
29
29
30 source_converters = [
30 source_converters = [
31 ('cvs', convert_cvs),
31 ('cvs', convert_cvs),
32 ('git', convert_git),
32 ('git', convert_git),
33 ('svn', svn_source),
33 ('svn', svn_source),
34 ('hg', mercurial_source),
34 ('hg', mercurial_source),
35 ('darcs', darcs_source),
35 ('darcs', darcs_source),
36 ('mtn', monotone_source),
36 ('mtn', monotone_source),
37 ('gnuarch', gnuarch_source),
37 ('gnuarch', gnuarch_source),
38 ]
38 ]
39
39
40 sink_converters = [
40 sink_converters = [
41 ('hg', mercurial_sink),
41 ('hg', mercurial_sink),
42 ('svn', svn_sink),
42 ('svn', svn_sink),
43 ]
43 ]
44
44
45 def convertsource(ui, path, type, rev):
45 def convertsource(ui, path, type, rev):
46 exceptions = []
46 exceptions = []
47 for name, source in source_converters:
47 for name, source in source_converters:
48 try:
48 try:
49 if not type or name == type:
49 if not type or name == type:
50 return source(ui, path, rev)
50 return source(ui, path, rev)
51 except (NoRepo, MissingTool), inst:
51 except (NoRepo, MissingTool), inst:
52 exceptions.append(inst)
52 exceptions.append(inst)
53 if not ui.quiet:
53 if not ui.quiet:
54 for inst in exceptions:
54 for inst in exceptions:
55 ui.write(_("%s\n") % inst)
55 ui.write("%s\n" % inst)
56 raise util.Abort('%s: unknown repository type' % path)
56 raise util.Abort(_('%s: unknown repository type') % path)
57
57
58 def convertsink(ui, path, type):
58 def convertsink(ui, path, type):
59 for name, sink in sink_converters:
59 for name, sink in sink_converters:
60 try:
60 try:
61 if not type or name == type:
61 if not type or name == type:
62 return sink(ui, path)
62 return sink(ui, path)
63 except NoRepo, inst:
63 except NoRepo, inst:
64 ui.note(_("convert: %s\n") % inst)
64 ui.note(_("convert: %s\n") % inst)
65 raise util.Abort('%s: unknown repository type' % path)
65 raise util.Abort(_('%s: unknown repository type') % path)
66
66
67 class converter(object):
67 class converter(object):
68 def __init__(self, ui, source, dest, revmapfile, opts):
68 def __init__(self, ui, source, dest, revmapfile, opts):
69
69
70 self.source = source
70 self.source = source
71 self.dest = dest
71 self.dest = dest
72 self.ui = ui
72 self.ui = ui
73 self.opts = opts
73 self.opts = opts
74 self.commitcache = {}
74 self.commitcache = {}
75 self.authors = {}
75 self.authors = {}
76 self.authorfile = None
76 self.authorfile = None
77
77
78 self.map = mapfile(ui, revmapfile)
78 self.map = mapfile(ui, revmapfile)
79
79
80 # Read first the dst author map if any
80 # Read first the dst author map if any
81 authorfile = self.dest.authorfile()
81 authorfile = self.dest.authorfile()
82 if authorfile and os.path.exists(authorfile):
82 if authorfile and os.path.exists(authorfile):
83 self.readauthormap(authorfile)
83 self.readauthormap(authorfile)
84 # Extend/Override with new author map if necessary
84 # Extend/Override with new author map if necessary
85 if opts.get('authors'):
85 if opts.get('authors'):
86 self.readauthormap(opts.get('authors'))
86 self.readauthormap(opts.get('authors'))
87 self.authorfile = self.dest.authorfile()
87 self.authorfile = self.dest.authorfile()
88
88
89 self.splicemap = mapfile(ui, opts.get('splicemap'))
89 self.splicemap = mapfile(ui, opts.get('splicemap'))
90
90
91 def walktree(self, heads):
91 def walktree(self, heads):
92 '''Return a mapping that identifies the uncommitted parents of every
92 '''Return a mapping that identifies the uncommitted parents of every
93 uncommitted changeset.'''
93 uncommitted changeset.'''
94 visit = heads
94 visit = heads
95 known = {}
95 known = {}
96 parents = {}
96 parents = {}
97 while visit:
97 while visit:
98 n = visit.pop(0)
98 n = visit.pop(0)
99 if n in known or n in self.map: continue
99 if n in known or n in self.map: continue
100 known[n] = 1
100 known[n] = 1
101 commit = self.cachecommit(n)
101 commit = self.cachecommit(n)
102 parents[n] = []
102 parents[n] = []
103 for p in commit.parents:
103 for p in commit.parents:
104 parents[n].append(p)
104 parents[n].append(p)
105 visit.append(p)
105 visit.append(p)
106
106
107 return parents
107 return parents
108
108
109 def toposort(self, parents):
109 def toposort(self, parents):
110 '''Return an ordering such that every uncommitted changeset is
110 '''Return an ordering such that every uncommitted changeset is
111 preceeded by all its uncommitted ancestors.'''
111 preceeded by all its uncommitted ancestors.'''
112 visit = parents.keys()
112 visit = parents.keys()
113 seen = {}
113 seen = {}
114 children = {}
114 children = {}
115 actives = []
115 actives = []
116
116
117 while visit:
117 while visit:
118 n = visit.pop(0)
118 n = visit.pop(0)
119 if n in seen: continue
119 if n in seen: continue
120 seen[n] = 1
120 seen[n] = 1
121 # Ensure that nodes without parents are present in the 'children'
121 # Ensure that nodes without parents are present in the 'children'
122 # mapping.
122 # mapping.
123 children.setdefault(n, [])
123 children.setdefault(n, [])
124 hasparent = False
124 hasparent = False
125 for p in parents[n]:
125 for p in parents[n]:
126 if not p in self.map:
126 if not p in self.map:
127 visit.append(p)
127 visit.append(p)
128 hasparent = True
128 hasparent = True
129 children.setdefault(p, []).append(n)
129 children.setdefault(p, []).append(n)
130 if not hasparent:
130 if not hasparent:
131 actives.append(n)
131 actives.append(n)
132
132
133 del seen
133 del seen
134 del visit
134 del visit
135
135
136 if self.opts.get('datesort'):
136 if self.opts.get('datesort'):
137 dates = {}
137 dates = {}
138 def getdate(n):
138 def getdate(n):
139 if n not in dates:
139 if n not in dates:
140 dates[n] = util.parsedate(self.commitcache[n].date)
140 dates[n] = util.parsedate(self.commitcache[n].date)
141 return dates[n]
141 return dates[n]
142
142
143 def picknext(nodes):
143 def picknext(nodes):
144 return min([(getdate(n), n) for n in nodes])[1]
144 return min([(getdate(n), n) for n in nodes])[1]
145 else:
145 else:
146 prev = [None]
146 prev = [None]
147 def picknext(nodes):
147 def picknext(nodes):
148 # Return the first eligible child of the previously converted
148 # Return the first eligible child of the previously converted
149 # revision, or any of them.
149 # revision, or any of them.
150 next = nodes[0]
150 next = nodes[0]
151 for n in nodes:
151 for n in nodes:
152 if prev[0] in parents[n]:
152 if prev[0] in parents[n]:
153 next = n
153 next = n
154 break
154 break
155 prev[0] = next
155 prev[0] = next
156 return next
156 return next
157
157
158 s = []
158 s = []
159 pendings = {}
159 pendings = {}
160 while actives:
160 while actives:
161 n = picknext(actives)
161 n = picknext(actives)
162 actives.remove(n)
162 actives.remove(n)
163 s.append(n)
163 s.append(n)
164
164
165 # Update dependents list
165 # Update dependents list
166 for c in children.get(n, []):
166 for c in children.get(n, []):
167 if c not in pendings:
167 if c not in pendings:
168 pendings[c] = [p for p in parents[c] if p not in self.map]
168 pendings[c] = [p for p in parents[c] if p not in self.map]
169 try:
169 try:
170 pendings[c].remove(n)
170 pendings[c].remove(n)
171 except ValueError:
171 except ValueError:
172 raise util.Abort(_('cycle detected between %s and %s')
172 raise util.Abort(_('cycle detected between %s and %s')
173 % (recode(c), recode(n)))
173 % (recode(c), recode(n)))
174 if not pendings[c]:
174 if not pendings[c]:
175 # Parents are converted, node is eligible
175 # Parents are converted, node is eligible
176 actives.insert(0, c)
176 actives.insert(0, c)
177 pendings[c] = None
177 pendings[c] = None
178
178
179 if len(s) != len(parents):
179 if len(s) != len(parents):
180 raise util.Abort(_("not all revisions were sorted"))
180 raise util.Abort(_("not all revisions were sorted"))
181
181
182 return s
182 return s
183
183
184 def writeauthormap(self):
184 def writeauthormap(self):
185 authorfile = self.authorfile
185 authorfile = self.authorfile
186 if authorfile:
186 if authorfile:
187 self.ui.status('Writing author map file %s\n' % authorfile)
187 self.ui.status('Writing author map file %s\n' % authorfile)
188 ofile = open(authorfile, 'w+')
188 ofile = open(authorfile, 'w+')
189 for author in self.authors:
189 for author in self.authors:
190 ofile.write("%s=%s\n" % (author, self.authors[author]))
190 ofile.write("%s=%s\n" % (author, self.authors[author]))
191 ofile.close()
191 ofile.close()
192
192
193 def readauthormap(self, authorfile):
193 def readauthormap(self, authorfile):
194 afile = open(authorfile, 'r')
194 afile = open(authorfile, 'r')
195 for line in afile:
195 for line in afile:
196 if line.strip() == '':
196 if line.strip() == '':
197 continue
197 continue
198 try:
198 try:
199 srcauthor, dstauthor = line.split('=', 1)
199 srcauthor, dstauthor = line.split('=', 1)
200 srcauthor = srcauthor.strip()
200 srcauthor = srcauthor.strip()
201 dstauthor = dstauthor.strip()
201 dstauthor = dstauthor.strip()
202 if srcauthor in self.authors and dstauthor != self.authors[srcauthor]:
202 if srcauthor in self.authors and dstauthor != self.authors[srcauthor]:
203 self.ui.status(
203 self.ui.status(
204 'Overriding mapping for author %s, was %s, will be %s\n'
204 'Overriding mapping for author %s, was %s, will be %s\n'
205 % (srcauthor, self.authors[srcauthor], dstauthor))
205 % (srcauthor, self.authors[srcauthor], dstauthor))
206 else:
206 else:
207 self.ui.debug('Mapping author %s to %s\n'
207 self.ui.debug('Mapping author %s to %s\n'
208 % (srcauthor, dstauthor))
208 % (srcauthor, dstauthor))
209 self.authors[srcauthor] = dstauthor
209 self.authors[srcauthor] = dstauthor
210 except IndexError:
210 except IndexError:
211 self.ui.warn(
211 self.ui.warn(
212 'Ignoring bad line in author map file %s: %s\n'
212 'Ignoring bad line in author map file %s: %s\n'
213 % (authorfile, line.rstrip()))
213 % (authorfile, line.rstrip()))
214 afile.close()
214 afile.close()
215
215
216 def cachecommit(self, rev):
216 def cachecommit(self, rev):
217 commit = self.source.getcommit(rev)
217 commit = self.source.getcommit(rev)
218 commit.author = self.authors.get(commit.author, commit.author)
218 commit.author = self.authors.get(commit.author, commit.author)
219 self.commitcache[rev] = commit
219 self.commitcache[rev] = commit
220 return commit
220 return commit
221
221
222 def copy(self, rev):
222 def copy(self, rev):
223 commit = self.commitcache[rev]
223 commit = self.commitcache[rev]
224
224
225 changes = self.source.getchanges(rev)
225 changes = self.source.getchanges(rev)
226 if isinstance(changes, basestring):
226 if isinstance(changes, basestring):
227 if changes == SKIPREV:
227 if changes == SKIPREV:
228 dest = SKIPREV
228 dest = SKIPREV
229 else:
229 else:
230 dest = self.map[changes]
230 dest = self.map[changes]
231 self.map[rev] = dest
231 self.map[rev] = dest
232 return
232 return
233 files, copies = changes
233 files, copies = changes
234 pbranches = []
234 pbranches = []
235 if commit.parents:
235 if commit.parents:
236 for prev in commit.parents:
236 for prev in commit.parents:
237 if prev not in self.commitcache:
237 if prev not in self.commitcache:
238 self.cachecommit(prev)
238 self.cachecommit(prev)
239 pbranches.append((self.map[prev],
239 pbranches.append((self.map[prev],
240 self.commitcache[prev].branch))
240 self.commitcache[prev].branch))
241 self.dest.setbranch(commit.branch, pbranches)
241 self.dest.setbranch(commit.branch, pbranches)
242 try:
242 try:
243 parents = self.splicemap[rev].replace(',', ' ').split()
243 parents = self.splicemap[rev].replace(',', ' ').split()
244 self.ui.status('spliced in %s as parents of %s\n' %
244 self.ui.status('spliced in %s as parents of %s\n' %
245 (parents, rev))
245 (parents, rev))
246 parents = [self.map.get(p, p) for p in parents]
246 parents = [self.map.get(p, p) for p in parents]
247 except KeyError:
247 except KeyError:
248 parents = [b[0] for b in pbranches]
248 parents = [b[0] for b in pbranches]
249 newnode = self.dest.putcommit(files, copies, parents, commit, self.source)
249 newnode = self.dest.putcommit(files, copies, parents, commit, self.source)
250 self.source.converted(rev, newnode)
250 self.source.converted(rev, newnode)
251 self.map[rev] = newnode
251 self.map[rev] = newnode
252
252
253 def convert(self):
253 def convert(self):
254
254
255 try:
255 try:
256 self.source.before()
256 self.source.before()
257 self.dest.before()
257 self.dest.before()
258 self.source.setrevmap(self.map)
258 self.source.setrevmap(self.map)
259 self.ui.status("scanning source...\n")
259 self.ui.status("scanning source...\n")
260 heads = self.source.getheads()
260 heads = self.source.getheads()
261 parents = self.walktree(heads)
261 parents = self.walktree(heads)
262 self.ui.status("sorting...\n")
262 self.ui.status("sorting...\n")
263 t = self.toposort(parents)
263 t = self.toposort(parents)
264 num = len(t)
264 num = len(t)
265 c = None
265 c = None
266
266
267 self.ui.status("converting...\n")
267 self.ui.status("converting...\n")
268 for c in t:
268 for c in t:
269 num -= 1
269 num -= 1
270 desc = self.commitcache[c].desc
270 desc = self.commitcache[c].desc
271 if "\n" in desc:
271 if "\n" in desc:
272 desc = desc.splitlines()[0]
272 desc = desc.splitlines()[0]
273 # convert log message to local encoding without using
273 # convert log message to local encoding without using
274 # tolocal() because util._encoding conver() use it as
274 # tolocal() because util._encoding conver() use it as
275 # 'utf-8'
275 # 'utf-8'
276 self.ui.status("%d %s\n" % (num, recode(desc)))
276 self.ui.status("%d %s\n" % (num, recode(desc)))
277 self.ui.note(_("source: %s\n" % recode(c)))
277 self.ui.note(_("source: %s\n") % recode(c))
278 self.copy(c)
278 self.copy(c)
279
279
280 tags = self.source.gettags()
280 tags = self.source.gettags()
281 ctags = {}
281 ctags = {}
282 for k in tags:
282 for k in tags:
283 v = tags[k]
283 v = tags[k]
284 if self.map.get(v, SKIPREV) != SKIPREV:
284 if self.map.get(v, SKIPREV) != SKIPREV:
285 ctags[k] = self.map[v]
285 ctags[k] = self.map[v]
286
286
287 if c and ctags:
287 if c and ctags:
288 nrev = self.dest.puttags(ctags)
288 nrev = self.dest.puttags(ctags)
289 # write another hash correspondence to override the previous
289 # write another hash correspondence to override the previous
290 # one so we don't end up with extra tag heads
290 # one so we don't end up with extra tag heads
291 if nrev:
291 if nrev:
292 self.map[c] = nrev
292 self.map[c] = nrev
293
293
294 self.writeauthormap()
294 self.writeauthormap()
295 finally:
295 finally:
296 self.cleanup()
296 self.cleanup()
297
297
298 def cleanup(self):
298 def cleanup(self):
299 try:
299 try:
300 self.dest.after()
300 self.dest.after()
301 finally:
301 finally:
302 self.source.after()
302 self.source.after()
303 self.map.close()
303 self.map.close()
304
304
305 def convert(ui, src, dest=None, revmapfile=None, **opts):
305 def convert(ui, src, dest=None, revmapfile=None, **opts):
306 global orig_encoding
306 global orig_encoding
307 orig_encoding = util._encoding
307 orig_encoding = util._encoding
308 util._encoding = 'UTF-8'
308 util._encoding = 'UTF-8'
309
309
310 if not dest:
310 if not dest:
311 dest = hg.defaultdest(src) + "-hg"
311 dest = hg.defaultdest(src) + "-hg"
312 ui.status("assuming destination %s\n" % dest)
312 ui.status("assuming destination %s\n" % dest)
313
313
314 destc = convertsink(ui, dest, opts.get('dest_type'))
314 destc = convertsink(ui, dest, opts.get('dest_type'))
315
315
316 try:
316 try:
317 srcc = convertsource(ui, src, opts.get('source_type'),
317 srcc = convertsource(ui, src, opts.get('source_type'),
318 opts.get('rev'))
318 opts.get('rev'))
319 except Exception:
319 except Exception:
320 for path in destc.created:
320 for path in destc.created:
321 shutil.rmtree(path, True)
321 shutil.rmtree(path, True)
322 raise
322 raise
323
323
324 fmap = opts.get('filemap')
324 fmap = opts.get('filemap')
325 if fmap:
325 if fmap:
326 srcc = filemap.filemap_source(ui, srcc, fmap)
326 srcc = filemap.filemap_source(ui, srcc, fmap)
327 destc.setfilemapmode(True)
327 destc.setfilemapmode(True)
328
328
329 if not revmapfile:
329 if not revmapfile:
330 try:
330 try:
331 revmapfile = destc.revmapfile()
331 revmapfile = destc.revmapfile()
332 except:
332 except:
333 revmapfile = os.path.join(destc, "map")
333 revmapfile = os.path.join(destc, "map")
334
334
335 c = converter(ui, srcc, destc, revmapfile, opts)
335 c = converter(ui, srcc, destc, revmapfile, opts)
336 c.convert()
336 c.convert()
337
337
@@ -1,299 +1,299 b''
1 # GNU Arch support for the convert extension
1 # GNU Arch support for the convert extension
2
2
3 from common import NoRepo, commandline, commit, converter_source
3 from common import NoRepo, commandline, commit, converter_source
4 from mercurial.i18n import _
4 from mercurial.i18n import _
5 from mercurial import util
5 from mercurial import util
6 import os, shutil, tempfile, stat
6 import os, shutil, tempfile, stat
7
7
8 class gnuarch_source(converter_source, commandline):
8 class gnuarch_source(converter_source, commandline):
9
9
10 class gnuarch_rev:
10 class gnuarch_rev:
11 def __init__(self, rev):
11 def __init__(self, rev):
12 self.rev = rev
12 self.rev = rev
13 self.summary = ''
13 self.summary = ''
14 self.date = None
14 self.date = None
15 self.author = ''
15 self.author = ''
16 self.add_files = []
16 self.add_files = []
17 self.mod_files = []
17 self.mod_files = []
18 self.del_files = []
18 self.del_files = []
19 self.ren_files = {}
19 self.ren_files = {}
20 self.ren_dirs = {}
20 self.ren_dirs = {}
21
21
22 def __init__(self, ui, path, rev=None):
22 def __init__(self, ui, path, rev=None):
23 super(gnuarch_source, self).__init__(ui, path, rev=rev)
23 super(gnuarch_source, self).__init__(ui, path, rev=rev)
24
24
25 if not os.path.exists(os.path.join(path, '{arch}')):
25 if not os.path.exists(os.path.join(path, '{arch}')):
26 raise NoRepo(_("%s does not look like a GNU Arch repo" % path))
26 raise NoRepo(_("%s does not look like a GNU Arch repo") % path)
27
27
28 # Could use checktool, but we want to check for baz or tla.
28 # Could use checktool, but we want to check for baz or tla.
29 self.execmd = None
29 self.execmd = None
30 if util.find_exe('baz'):
30 if util.find_exe('baz'):
31 self.execmd = 'baz'
31 self.execmd = 'baz'
32 else:
32 else:
33 if util.find_exe('tla'):
33 if util.find_exe('tla'):
34 self.execmd = 'tla'
34 self.execmd = 'tla'
35 else:
35 else:
36 raise util.Abort(_('cannot find a GNU Arch tool'))
36 raise util.Abort(_('cannot find a GNU Arch tool'))
37
37
38 commandline.__init__(self, ui, self.execmd)
38 commandline.__init__(self, ui, self.execmd)
39
39
40 self.path = os.path.realpath(path)
40 self.path = os.path.realpath(path)
41 self.tmppath = None
41 self.tmppath = None
42
42
43 self.treeversion = None
43 self.treeversion = None
44 self.lastrev = None
44 self.lastrev = None
45 self.changes = {}
45 self.changes = {}
46 self.parents = {}
46 self.parents = {}
47 self.tags = {}
47 self.tags = {}
48 self.modecache = {}
48 self.modecache = {}
49
49
50 def before(self):
50 def before(self):
51 if self.execmd == 'tla':
51 if self.execmd == 'tla':
52 output = self.run0('tree-version', self.path)
52 output = self.run0('tree-version', self.path)
53 else:
53 else:
54 output = self.run0('tree-version', '-d', self.path)
54 output = self.run0('tree-version', '-d', self.path)
55 self.treeversion = output.strip()
55 self.treeversion = output.strip()
56
56
57 self.ui.status(_('analyzing tree version %s...\n' % self.treeversion))
57 self.ui.status(_('analyzing tree version %s...\n') % self.treeversion)
58
58
59 # Get name of temporary directory
59 # Get name of temporary directory
60 version = self.treeversion.split('/')
60 version = self.treeversion.split('/')
61 self.tmppath = os.path.join(tempfile.gettempdir(),
61 self.tmppath = os.path.join(tempfile.gettempdir(),
62 'hg-%s' % version[1])
62 'hg-%s' % version[1])
63
63
64 # Generate parents dictionary
64 # Generate parents dictionary
65 child = []
65 child = []
66 output, status = self.runlines('revisions', self.treeversion)
66 output, status = self.runlines('revisions', self.treeversion)
67 self.checkexit(status, 'archive registered?')
67 self.checkexit(status, 'archive registered?')
68 for l in output:
68 for l in output:
69 rev = l.strip()
69 rev = l.strip()
70 self.changes[rev] = self.gnuarch_rev(rev)
70 self.changes[rev] = self.gnuarch_rev(rev)
71
71
72 # Read author, date and summary
72 # Read author, date and summary
73 catlog = self.runlines0('cat-log', '-d', self.path, rev)
73 catlog = self.runlines0('cat-log', '-d', self.path, rev)
74 self._parsecatlog(catlog, rev)
74 self._parsecatlog(catlog, rev)
75
75
76 self.parents[rev] = child
76 self.parents[rev] = child
77 child = [rev]
77 child = [rev]
78 if rev == self.rev:
78 if rev == self.rev:
79 break
79 break
80 self.parents[None] = child
80 self.parents[None] = child
81
81
82 def after(self):
82 def after(self):
83 self.ui.debug(_('cleaning up %s\n' % self.tmppath))
83 self.ui.debug(_('cleaning up %s\n') % self.tmppath)
84 shutil.rmtree(self.tmppath, ignore_errors=True)
84 shutil.rmtree(self.tmppath, ignore_errors=True)
85
85
86 def getheads(self):
86 def getheads(self):
87 return self.parents[None]
87 return self.parents[None]
88
88
89 def getfile(self, name, rev):
89 def getfile(self, name, rev):
90 if rev != self.lastrev:
90 if rev != self.lastrev:
91 raise util.Abort(_('internal calling inconsistency'))
91 raise util.Abort(_('internal calling inconsistency'))
92
92
93 # Raise IOError if necessary (i.e. deleted files).
93 # Raise IOError if necessary (i.e. deleted files).
94 if not os.path.exists(os.path.join(self.tmppath, name)):
94 if not os.path.exists(os.path.join(self.tmppath, name)):
95 raise IOError
95 raise IOError
96
96
97 data, mode = self._getfile(name, rev)
97 data, mode = self._getfile(name, rev)
98 self.modecache[(name, rev)] = mode
98 self.modecache[(name, rev)] = mode
99
99
100 return data
100 return data
101
101
102 def getmode(self, name, rev):
102 def getmode(self, name, rev):
103 return self.modecache[(name, rev)]
103 return self.modecache[(name, rev)]
104
104
105 def getchanges(self, rev):
105 def getchanges(self, rev):
106 self.modecache = {}
106 self.modecache = {}
107 self._update(rev)
107 self._update(rev)
108 changes = []
108 changes = []
109 copies = {}
109 copies = {}
110
110
111 for f in self.changes[rev].add_files:
111 for f in self.changes[rev].add_files:
112 changes.append((f, rev))
112 changes.append((f, rev))
113
113
114 for f in self.changes[rev].mod_files:
114 for f in self.changes[rev].mod_files:
115 changes.append((f, rev))
115 changes.append((f, rev))
116
116
117 for f in self.changes[rev].del_files:
117 for f in self.changes[rev].del_files:
118 changes.append((f, rev))
118 changes.append((f, rev))
119
119
120 for src in self.changes[rev].ren_files:
120 for src in self.changes[rev].ren_files:
121 to = self.changes[rev].ren_files[src]
121 to = self.changes[rev].ren_files[src]
122 changes.append((src, rev))
122 changes.append((src, rev))
123 changes.append((to, rev))
123 changes.append((to, rev))
124 copies[src] = to
124 copies[src] = to
125
125
126 for src in self.changes[rev].ren_dirs:
126 for src in self.changes[rev].ren_dirs:
127 to = self.changes[rev].ren_dirs[src]
127 to = self.changes[rev].ren_dirs[src]
128 chgs, cps = self._rendirchanges(src, to);
128 chgs, cps = self._rendirchanges(src, to);
129 changes += [(f, rev) for f in chgs]
129 changes += [(f, rev) for f in chgs]
130 for c in cps:
130 for c in cps:
131 copies[c] = cps[c]
131 copies[c] = cps[c]
132
132
133 self.lastrev = rev
133 self.lastrev = rev
134 return util.sort(changes), copies
134 return util.sort(changes), copies
135
135
136 def getcommit(self, rev):
136 def getcommit(self, rev):
137 changes = self.changes[rev]
137 changes = self.changes[rev]
138 return commit(author = changes.author, date = changes.date,
138 return commit(author = changes.author, date = changes.date,
139 desc = changes.summary, parents = self.parents[rev])
139 desc = changes.summary, parents = self.parents[rev])
140
140
141 def gettags(self):
141 def gettags(self):
142 return self.tags
142 return self.tags
143
143
144 def _execute(self, cmd, *args, **kwargs):
144 def _execute(self, cmd, *args, **kwargs):
145 cmdline = [self.execmd, cmd]
145 cmdline = [self.execmd, cmd]
146 cmdline += args
146 cmdline += args
147 cmdline = [util.shellquote(arg) for arg in cmdline]
147 cmdline = [util.shellquote(arg) for arg in cmdline]
148 cmdline += ['>', util.nulldev, '2>', util.nulldev]
148 cmdline += ['>', util.nulldev, '2>', util.nulldev]
149 cmdline = util.quotecommand(' '.join(cmdline))
149 cmdline = util.quotecommand(' '.join(cmdline))
150 self.ui.debug(cmdline, '\n')
150 self.ui.debug(cmdline, '\n')
151 return os.system(cmdline)
151 return os.system(cmdline)
152
152
153 def _update(self, rev):
153 def _update(self, rev):
154 if rev == 'base-0':
154 if rev == 'base-0':
155 # Initialise 'base-0' revision
155 # Initialise 'base-0' revision
156 self._obtainrevision(rev)
156 self._obtainrevision(rev)
157 else:
157 else:
158 self.ui.debug(_('applying revision %s...\n' % rev))
158 self.ui.debug(_('applying revision %s...\n') % rev)
159 revision = '%s--%s' % (self.treeversion, rev)
159 revision = '%s--%s' % (self.treeversion, rev)
160 changeset, status = self.runlines('replay', '-d', self.tmppath,
160 changeset, status = self.runlines('replay', '-d', self.tmppath,
161 revision)
161 revision)
162 if status:
162 if status:
163 # Something went wrong while merging (baz or tla
163 # Something went wrong while merging (baz or tla
164 # issue?), get latest revision and try from there
164 # issue?), get latest revision and try from there
165 shutil.rmtree(self.tmppath, ignore_errors=True)
165 shutil.rmtree(self.tmppath, ignore_errors=True)
166 self._obtainrevision(rev)
166 self._obtainrevision(rev)
167 else:
167 else:
168 old_rev = self.parents[rev][0]
168 old_rev = self.parents[rev][0]
169 self.ui.debug(_('computing changeset between %s and %s...\n' \
169 self.ui.debug(_('computing changeset between %s and %s...\n')
170 % (old_rev, rev)))
170 % (old_rev, rev))
171 rev_a = '%s--%s' % (self.treeversion, old_rev)
171 rev_a = '%s--%s' % (self.treeversion, old_rev)
172 rev_b = '%s--%s' % (self.treeversion, rev)
172 rev_b = '%s--%s' % (self.treeversion, rev)
173 self._parsechangeset(changeset, rev)
173 self._parsechangeset(changeset, rev)
174
174
175 def _getfile(self, name, rev):
175 def _getfile(self, name, rev):
176 mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
176 mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
177 if stat.S_ISLNK(mode):
177 if stat.S_ISLNK(mode):
178 data = os.readlink(os.path.join(self.tmppath, name))
178 data = os.readlink(os.path.join(self.tmppath, name))
179 mode = mode and 'l' or ''
179 mode = mode and 'l' or ''
180 else:
180 else:
181 data = open(os.path.join(self.tmppath, name), 'rb').read()
181 data = open(os.path.join(self.tmppath, name), 'rb').read()
182 mode = (mode & 0111) and 'x' or ''
182 mode = (mode & 0111) and 'x' or ''
183 return data, mode
183 return data, mode
184
184
185 def _exclude(self, name):
185 def _exclude(self, name):
186 exclude = [ '{arch}', '.arch-ids', '.arch-inventory' ]
186 exclude = [ '{arch}', '.arch-ids', '.arch-inventory' ]
187 for exc in exclude:
187 for exc in exclude:
188 if name.find(exc) != -1:
188 if name.find(exc) != -1:
189 return True
189 return True
190 return False
190 return False
191
191
192 def _readcontents(self, path):
192 def _readcontents(self, path):
193 files = []
193 files = []
194 contents = os.listdir(path)
194 contents = os.listdir(path)
195 while len(contents) > 0:
195 while len(contents) > 0:
196 c = contents.pop()
196 c = contents.pop()
197 p = os.path.join(path, c)
197 p = os.path.join(path, c)
198 # os.walk could be used, but here we avoid internal GNU
198 # os.walk could be used, but here we avoid internal GNU
199 # Arch files and directories, thus saving a lot time.
199 # Arch files and directories, thus saving a lot time.
200 if not self._exclude(p):
200 if not self._exclude(p):
201 if os.path.isdir(p):
201 if os.path.isdir(p):
202 contents += [os.path.join(c, f) for f in os.listdir(p)]
202 contents += [os.path.join(c, f) for f in os.listdir(p)]
203 else:
203 else:
204 files.append(c)
204 files.append(c)
205 return files
205 return files
206
206
207 def _rendirchanges(self, src, dest):
207 def _rendirchanges(self, src, dest):
208 changes = []
208 changes = []
209 copies = {}
209 copies = {}
210 files = self._readcontents(os.path.join(self.tmppath, dest))
210 files = self._readcontents(os.path.join(self.tmppath, dest))
211 for f in files:
211 for f in files:
212 s = os.path.join(src, f)
212 s = os.path.join(src, f)
213 d = os.path.join(dest, f)
213 d = os.path.join(dest, f)
214 changes.append(s)
214 changes.append(s)
215 changes.append(d)
215 changes.append(d)
216 copies[s] = d
216 copies[s] = d
217 return changes, copies
217 return changes, copies
218
218
219 def _obtainrevision(self, rev):
219 def _obtainrevision(self, rev):
220 self.ui.debug(_('obtaining revision %s...\n' % rev))
220 self.ui.debug(_('obtaining revision %s...\n') % rev)
221 revision = '%s--%s' % (self.treeversion, rev)
221 revision = '%s--%s' % (self.treeversion, rev)
222 output = self._execute('get', revision, self.tmppath)
222 output = self._execute('get', revision, self.tmppath)
223 self.checkexit(output)
223 self.checkexit(output)
224 self.ui.debug(_('analysing revision %s...\n' % rev))
224 self.ui.debug(_('analysing revision %s...\n') % rev)
225 files = self._readcontents(self.tmppath)
225 files = self._readcontents(self.tmppath)
226 self.changes[rev].add_files += files
226 self.changes[rev].add_files += files
227
227
228 def _stripbasepath(self, path):
228 def _stripbasepath(self, path):
229 if path.startswith('./'):
229 if path.startswith('./'):
230 return path[2:]
230 return path[2:]
231 return path
231 return path
232
232
233 def _parsecatlog(self, data, rev):
233 def _parsecatlog(self, data, rev):
234 summary = []
234 summary = []
235 for l in data:
235 for l in data:
236 l = l.strip()
236 l = l.strip()
237 if summary:
237 if summary:
238 summary.append(l)
238 summary.append(l)
239 elif l.startswith('Summary:'):
239 elif l.startswith('Summary:'):
240 summary.append(l[len('Summary: '):])
240 summary.append(l[len('Summary: '):])
241 elif l.startswith('Standard-date:'):
241 elif l.startswith('Standard-date:'):
242 date = l[len('Standard-date: '):]
242 date = l[len('Standard-date: '):]
243 strdate = util.strdate(date, '%Y-%m-%d %H:%M:%S')
243 strdate = util.strdate(date, '%Y-%m-%d %H:%M:%S')
244 self.changes[rev].date = util.datestr(strdate)
244 self.changes[rev].date = util.datestr(strdate)
245 elif l.startswith('Creator:'):
245 elif l.startswith('Creator:'):
246 self.changes[rev].author = l[len('Creator: '):]
246 self.changes[rev].author = l[len('Creator: '):]
247 self.changes[rev].summary = '\n'.join(summary)
247 self.changes[rev].summary = '\n'.join(summary)
248
248
249 def _parsechangeset(self, data, rev):
249 def _parsechangeset(self, data, rev):
250 for l in data:
250 for l in data:
251 l = l.strip()
251 l = l.strip()
252 # Added file (ignore added directory)
252 # Added file (ignore added directory)
253 if l.startswith('A') and not l.startswith('A/'):
253 if l.startswith('A') and not l.startswith('A/'):
254 file = self._stripbasepath(l[1:].strip())
254 file = self._stripbasepath(l[1:].strip())
255 if not self._exclude(file):
255 if not self._exclude(file):
256 self.changes[rev].add_files.append(file)
256 self.changes[rev].add_files.append(file)
257 # Deleted file (ignore deleted directory)
257 # Deleted file (ignore deleted directory)
258 elif l.startswith('D') and not l.startswith('D/'):
258 elif l.startswith('D') and not l.startswith('D/'):
259 file = self._stripbasepath(l[1:].strip())
259 file = self._stripbasepath(l[1:].strip())
260 if not self._exclude(file):
260 if not self._exclude(file):
261 self.changes[rev].del_files.append(file)
261 self.changes[rev].del_files.append(file)
262 # Modified binary file
262 # Modified binary file
263 elif l.startswith('Mb'):
263 elif l.startswith('Mb'):
264 file = self._stripbasepath(l[2:].strip())
264 file = self._stripbasepath(l[2:].strip())
265 if not self._exclude(file):
265 if not self._exclude(file):
266 self.changes[rev].mod_files.append(file)
266 self.changes[rev].mod_files.append(file)
267 # Modified link
267 # Modified link
268 elif l.startswith('M->'):
268 elif l.startswith('M->'):
269 file = self._stripbasepath(l[3:].strip())
269 file = self._stripbasepath(l[3:].strip())
270 if not self._exclude(file):
270 if not self._exclude(file):
271 self.changes[rev].mod_files.append(file)
271 self.changes[rev].mod_files.append(file)
272 # Modified file
272 # Modified file
273 elif l.startswith('M'):
273 elif l.startswith('M'):
274 file = self._stripbasepath(l[1:].strip())
274 file = self._stripbasepath(l[1:].strip())
275 if not self._exclude(file):
275 if not self._exclude(file):
276 self.changes[rev].mod_files.append(file)
276 self.changes[rev].mod_files.append(file)
277 # Renamed file (or link)
277 # Renamed file (or link)
278 elif l.startswith('=>'):
278 elif l.startswith('=>'):
279 files = l[2:].strip().split(' ')
279 files = l[2:].strip().split(' ')
280 if len(files) == 1:
280 if len(files) == 1:
281 files = l[2:].strip().split('\t')
281 files = l[2:].strip().split('\t')
282 src = self._stripbasepath(files[0])
282 src = self._stripbasepath(files[0])
283 dst = self._stripbasepath(files[1])
283 dst = self._stripbasepath(files[1])
284 if not self._exclude(src) and not self._exclude(dst):
284 if not self._exclude(src) and not self._exclude(dst):
285 self.changes[rev].ren_files[src] = dst
285 self.changes[rev].ren_files[src] = dst
286 # Conversion from file to link or from link to file (modified)
286 # Conversion from file to link or from link to file (modified)
287 elif l.startswith('ch'):
287 elif l.startswith('ch'):
288 file = self._stripbasepath(l[2:].strip())
288 file = self._stripbasepath(l[2:].strip())
289 if not self._exclude(file):
289 if not self._exclude(file):
290 self.changes[rev].mod_files.append(file)
290 self.changes[rev].mod_files.append(file)
291 # Renamed directory
291 # Renamed directory
292 elif l.startswith('/>'):
292 elif l.startswith('/>'):
293 dirs = l[2:].strip().split(' ')
293 dirs = l[2:].strip().split(' ')
294 if len(dirs) == 1:
294 if len(dirs) == 1:
295 dirs = l[2:].strip().split('\t')
295 dirs = l[2:].strip().split('\t')
296 src = self._stripbasepath(dirs[0])
296 src = self._stripbasepath(dirs[0])
297 dst = self._stripbasepath(dirs[1])
297 dst = self._stripbasepath(dirs[1])
298 if not self._exclude(src) and not self._exclude(dst):
298 if not self._exclude(src) and not self._exclude(dst):
299 self.changes[rev].ren_dirs[src] = dst
299 self.changes[rev].ren_dirs[src] = dst
@@ -1,224 +1,224 b''
1 # archival.py - revision archival for mercurial
1 # archival.py - revision archival for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of
5 # This software may be used and distributed according to the terms of
6 # the GNU General Public License, incorporated herein by reference.
6 # the GNU General Public License, incorporated herein by reference.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex
9 from node import hex
10 import cStringIO, os, stat, tarfile, time, util, zipfile
10 import cStringIO, os, stat, tarfile, time, util, zipfile
11 import zlib, gzip
11 import zlib, gzip
12
12
13 def tidyprefix(dest, prefix, suffixes):
13 def tidyprefix(dest, prefix, suffixes):
14 '''choose prefix to use for names in archive. make sure prefix is
14 '''choose prefix to use for names in archive. make sure prefix is
15 safe for consumers.'''
15 safe for consumers.'''
16
16
17 if prefix:
17 if prefix:
18 prefix = util.normpath(prefix)
18 prefix = util.normpath(prefix)
19 else:
19 else:
20 if not isinstance(dest, str):
20 if not isinstance(dest, str):
21 raise ValueError('dest must be string if no prefix')
21 raise ValueError('dest must be string if no prefix')
22 prefix = os.path.basename(dest)
22 prefix = os.path.basename(dest)
23 lower = prefix.lower()
23 lower = prefix.lower()
24 for sfx in suffixes:
24 for sfx in suffixes:
25 if lower.endswith(sfx):
25 if lower.endswith(sfx):
26 prefix = prefix[:-len(sfx)]
26 prefix = prefix[:-len(sfx)]
27 break
27 break
28 lpfx = os.path.normpath(util.localpath(prefix))
28 lpfx = os.path.normpath(util.localpath(prefix))
29 prefix = util.pconvert(lpfx)
29 prefix = util.pconvert(lpfx)
30 if not prefix.endswith('/'):
30 if not prefix.endswith('/'):
31 prefix += '/'
31 prefix += '/'
32 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
32 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
33 raise util.Abort(_('archive prefix contains illegal components'))
33 raise util.Abort(_('archive prefix contains illegal components'))
34 return prefix
34 return prefix
35
35
36 class tarit:
36 class tarit:
37 '''write archive to tar file or stream. can write uncompressed,
37 '''write archive to tar file or stream. can write uncompressed,
38 or compress with gzip or bzip2.'''
38 or compress with gzip or bzip2.'''
39
39
40 class GzipFileWithTime(gzip.GzipFile):
40 class GzipFileWithTime(gzip.GzipFile):
41
41
42 def __init__(self, *args, **kw):
42 def __init__(self, *args, **kw):
43 timestamp = None
43 timestamp = None
44 if 'timestamp' in kw:
44 if 'timestamp' in kw:
45 timestamp = kw.pop('timestamp')
45 timestamp = kw.pop('timestamp')
46 if timestamp == None:
46 if timestamp == None:
47 self.timestamp = time.time()
47 self.timestamp = time.time()
48 else:
48 else:
49 self.timestamp = timestamp
49 self.timestamp = timestamp
50 gzip.GzipFile.__init__(self, *args, **kw)
50 gzip.GzipFile.__init__(self, *args, **kw)
51
51
52 def _write_gzip_header(self):
52 def _write_gzip_header(self):
53 self.fileobj.write('\037\213') # magic header
53 self.fileobj.write('\037\213') # magic header
54 self.fileobj.write('\010') # compression method
54 self.fileobj.write('\010') # compression method
55 # Python 2.6 deprecates self.filename
55 # Python 2.6 deprecates self.filename
56 fname = getattr(self, 'name', None) or self.filename
56 fname = getattr(self, 'name', None) or self.filename
57 flags = 0
57 flags = 0
58 if fname:
58 if fname:
59 flags = gzip.FNAME
59 flags = gzip.FNAME
60 self.fileobj.write(chr(flags))
60 self.fileobj.write(chr(flags))
61 gzip.write32u(self.fileobj, long(self.timestamp))
61 gzip.write32u(self.fileobj, long(self.timestamp))
62 self.fileobj.write('\002')
62 self.fileobj.write('\002')
63 self.fileobj.write('\377')
63 self.fileobj.write('\377')
64 if fname:
64 if fname:
65 self.fileobj.write(fname + '\000')
65 self.fileobj.write(fname + '\000')
66
66
67 def __init__(self, dest, prefix, mtime, kind=''):
67 def __init__(self, dest, prefix, mtime, kind=''):
68 self.prefix = tidyprefix(dest, prefix, ['.tar', '.tar.bz2', '.tar.gz',
68 self.prefix = tidyprefix(dest, prefix, ['.tar', '.tar.bz2', '.tar.gz',
69 '.tgz', '.tbz2'])
69 '.tgz', '.tbz2'])
70 self.mtime = mtime
70 self.mtime = mtime
71
71
72 def taropen(name, mode, fileobj=None):
72 def taropen(name, mode, fileobj=None):
73 if kind == 'gz':
73 if kind == 'gz':
74 mode = mode[0]
74 mode = mode[0]
75 if not fileobj:
75 if not fileobj:
76 fileobj = open(name, mode + 'b')
76 fileobj = open(name, mode + 'b')
77 gzfileobj = self.GzipFileWithTime(name, mode + 'b',
77 gzfileobj = self.GzipFileWithTime(name, mode + 'b',
78 zlib.Z_BEST_COMPRESSION,
78 zlib.Z_BEST_COMPRESSION,
79 fileobj, timestamp=mtime)
79 fileobj, timestamp=mtime)
80 return tarfile.TarFile.taropen(name, mode, gzfileobj)
80 return tarfile.TarFile.taropen(name, mode, gzfileobj)
81 else:
81 else:
82 return tarfile.open(name, mode + kind, fileobj)
82 return tarfile.open(name, mode + kind, fileobj)
83
83
84 if isinstance(dest, str):
84 if isinstance(dest, str):
85 self.z = taropen(dest, mode='w:')
85 self.z = taropen(dest, mode='w:')
86 else:
86 else:
87 # Python 2.5-2.5.1 have a regression that requires a name arg
87 # Python 2.5-2.5.1 have a regression that requires a name arg
88 self.z = taropen(name='', mode='w|', fileobj=dest)
88 self.z = taropen(name='', mode='w|', fileobj=dest)
89
89
90 def addfile(self, name, mode, islink, data):
90 def addfile(self, name, mode, islink, data):
91 i = tarfile.TarInfo(self.prefix + name)
91 i = tarfile.TarInfo(self.prefix + name)
92 i.mtime = self.mtime
92 i.mtime = self.mtime
93 i.size = len(data)
93 i.size = len(data)
94 if islink:
94 if islink:
95 i.type = tarfile.SYMTYPE
95 i.type = tarfile.SYMTYPE
96 i.mode = 0777
96 i.mode = 0777
97 i.linkname = data
97 i.linkname = data
98 data = None
98 data = None
99 else:
99 else:
100 i.mode = mode
100 i.mode = mode
101 data = cStringIO.StringIO(data)
101 data = cStringIO.StringIO(data)
102 self.z.addfile(i, data)
102 self.z.addfile(i, data)
103
103
104 def done(self):
104 def done(self):
105 self.z.close()
105 self.z.close()
106
106
107 class tellable:
107 class tellable:
108 '''provide tell method for zipfile.ZipFile when writing to http
108 '''provide tell method for zipfile.ZipFile when writing to http
109 response file object.'''
109 response file object.'''
110
110
111 def __init__(self, fp):
111 def __init__(self, fp):
112 self.fp = fp
112 self.fp = fp
113 self.offset = 0
113 self.offset = 0
114
114
115 def __getattr__(self, key):
115 def __getattr__(self, key):
116 return getattr(self.fp, key)
116 return getattr(self.fp, key)
117
117
118 def write(self, s):
118 def write(self, s):
119 self.fp.write(s)
119 self.fp.write(s)
120 self.offset += len(s)
120 self.offset += len(s)
121
121
122 def tell(self):
122 def tell(self):
123 return self.offset
123 return self.offset
124
124
125 class zipit:
125 class zipit:
126 '''write archive to zip file or stream. can write uncompressed,
126 '''write archive to zip file or stream. can write uncompressed,
127 or compressed with deflate.'''
127 or compressed with deflate.'''
128
128
129 def __init__(self, dest, prefix, mtime, compress=True):
129 def __init__(self, dest, prefix, mtime, compress=True):
130 self.prefix = tidyprefix(dest, prefix, ('.zip',))
130 self.prefix = tidyprefix(dest, prefix, ('.zip',))
131 if not isinstance(dest, str):
131 if not isinstance(dest, str):
132 try:
132 try:
133 dest.tell()
133 dest.tell()
134 except (AttributeError, IOError):
134 except (AttributeError, IOError):
135 dest = tellable(dest)
135 dest = tellable(dest)
136 self.z = zipfile.ZipFile(dest, 'w',
136 self.z = zipfile.ZipFile(dest, 'w',
137 compress and zipfile.ZIP_DEFLATED or
137 compress and zipfile.ZIP_DEFLATED or
138 zipfile.ZIP_STORED)
138 zipfile.ZIP_STORED)
139 self.date_time = time.gmtime(mtime)[:6]
139 self.date_time = time.gmtime(mtime)[:6]
140
140
141 def addfile(self, name, mode, islink, data):
141 def addfile(self, name, mode, islink, data):
142 i = zipfile.ZipInfo(self.prefix + name, self.date_time)
142 i = zipfile.ZipInfo(self.prefix + name, self.date_time)
143 i.compress_type = self.z.compression
143 i.compress_type = self.z.compression
144 # unzip will not honor unix file modes unless file creator is
144 # unzip will not honor unix file modes unless file creator is
145 # set to unix (id 3).
145 # set to unix (id 3).
146 i.create_system = 3
146 i.create_system = 3
147 ftype = stat.S_IFREG
147 ftype = stat.S_IFREG
148 if islink:
148 if islink:
149 mode = 0777
149 mode = 0777
150 ftype = stat.S_IFLNK
150 ftype = stat.S_IFLNK
151 i.external_attr = (mode | ftype) << 16L
151 i.external_attr = (mode | ftype) << 16L
152 self.z.writestr(i, data)
152 self.z.writestr(i, data)
153
153
154 def done(self):
154 def done(self):
155 self.z.close()
155 self.z.close()
156
156
157 class fileit:
157 class fileit:
158 '''write archive as files in directory.'''
158 '''write archive as files in directory.'''
159
159
160 def __init__(self, name, prefix, mtime):
160 def __init__(self, name, prefix, mtime):
161 if prefix:
161 if prefix:
162 raise util.Abort(_('cannot give prefix when archiving to files'))
162 raise util.Abort(_('cannot give prefix when archiving to files'))
163 self.basedir = name
163 self.basedir = name
164 self.opener = util.opener(self.basedir)
164 self.opener = util.opener(self.basedir)
165
165
166 def addfile(self, name, mode, islink, data):
166 def addfile(self, name, mode, islink, data):
167 if islink:
167 if islink:
168 self.opener.symlink(data, name)
168 self.opener.symlink(data, name)
169 return
169 return
170 f = self.opener(name, "w", atomictemp=True)
170 f = self.opener(name, "w", atomictemp=True)
171 f.write(data)
171 f.write(data)
172 f.rename()
172 f.rename()
173 destfile = os.path.join(self.basedir, name)
173 destfile = os.path.join(self.basedir, name)
174 os.chmod(destfile, mode)
174 os.chmod(destfile, mode)
175
175
176 def done(self):
176 def done(self):
177 pass
177 pass
178
178
179 archivers = {
179 archivers = {
180 'files': fileit,
180 'files': fileit,
181 'tar': tarit,
181 'tar': tarit,
182 'tbz2': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'bz2'),
182 'tbz2': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'bz2'),
183 'tgz': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'gz'),
183 'tgz': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'gz'),
184 'uzip': lambda name, prefix, mtime: zipit(name, prefix, mtime, False),
184 'uzip': lambda name, prefix, mtime: zipit(name, prefix, mtime, False),
185 'zip': zipit,
185 'zip': zipit,
186 }
186 }
187
187
188 def archive(repo, dest, node, kind, decode=True, matchfn=None,
188 def archive(repo, dest, node, kind, decode=True, matchfn=None,
189 prefix=None, mtime=None):
189 prefix=None, mtime=None):
190 '''create archive of repo as it was at node.
190 '''create archive of repo as it was at node.
191
191
192 dest can be name of directory, name of archive file, or file
192 dest can be name of directory, name of archive file, or file
193 object to write archive to.
193 object to write archive to.
194
194
195 kind is type of archive to create.
195 kind is type of archive to create.
196
196
197 decode tells whether to put files through decode filters from
197 decode tells whether to put files through decode filters from
198 hgrc.
198 hgrc.
199
199
200 matchfn is function to filter names of files to write to archive.
200 matchfn is function to filter names of files to write to archive.
201
201
202 prefix is name of path to put before every archive member.'''
202 prefix is name of path to put before every archive member.'''
203
203
204 def write(name, mode, islink, getdata):
204 def write(name, mode, islink, getdata):
205 if matchfn and not matchfn(name): return
205 if matchfn and not matchfn(name): return
206 data = getdata()
206 data = getdata()
207 if decode:
207 if decode:
208 data = repo.wwritedata(name, data)
208 data = repo.wwritedata(name, data)
209 archiver.addfile(name, mode, islink, data)
209 archiver.addfile(name, mode, islink, data)
210
210
211 if kind not in archivers:
211 if kind not in archivers:
212 raise util.Abort(_("unknown archive type '%s'" % kind))
212 raise util.Abort(_("unknown archive type '%s'") % kind)
213
213
214 ctx = repo[node]
214 ctx = repo[node]
215 archiver = archivers[kind](dest, prefix, mtime or ctx.date()[0])
215 archiver = archivers[kind](dest, prefix, mtime or ctx.date()[0])
216
216
217 if repo.ui.configbool("ui", "archivemeta", True):
217 if repo.ui.configbool("ui", "archivemeta", True):
218 write('.hg_archival.txt', 0644, False,
218 write('.hg_archival.txt', 0644, False,
219 lambda: 'repo: %s\nnode: %s\n' % (
219 lambda: 'repo: %s\nnode: %s\n' % (
220 hex(repo.changelog.node(0)), hex(node)))
220 hex(repo.changelog.node(0)), hex(node)))
221 for f in ctx:
221 for f in ctx:
222 ff = ctx.flags(f)
222 ff = ctx.flags(f)
223 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data)
223 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data)
224 archiver.done()
224 archiver.done()
@@ -1,289 +1,289 b''
1 # hgweb/hgwebdir_mod.py - Web interface for a directory of repositories.
1 # hgweb/hgwebdir_mod.py - Web interface for a directory of repositories.
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 import os
9 import os
10 from mercurial.i18n import gettext as _
10 from mercurial.i18n import gettext as _
11 from mercurial.repo import RepoError
11 from mercurial.repo import RepoError
12 from mercurial import ui, hg, util, templater, templatefilters
12 from mercurial import ui, hg, util, templater, templatefilters
13 from common import ErrorResponse, get_mtime, staticfile, style_map, paritygen,\
13 from common import ErrorResponse, get_mtime, staticfile, style_map, paritygen,\
14 get_contact, HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
14 get_contact, HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
15 from hgweb_mod import hgweb
15 from hgweb_mod import hgweb
16 from request import wsgirequest
16 from request import wsgirequest
17
17
18 # This is a stopgap
18 # This is a stopgap
19 class hgwebdir(object):
19 class hgwebdir(object):
20 def __init__(self, config, parentui=None):
20 def __init__(self, config, parentui=None):
21 def cleannames(items):
21 def cleannames(items):
22 return [(util.pconvert(name).strip('/'), path)
22 return [(util.pconvert(name).strip('/'), path)
23 for name, path in items]
23 for name, path in items]
24
24
25 self.parentui = parentui or ui.ui(report_untrusted=False,
25 self.parentui = parentui or ui.ui(report_untrusted=False,
26 interactive = False)
26 interactive = False)
27 self.motd = None
27 self.motd = None
28 self.style = None
28 self.style = None
29 self.stripecount = None
29 self.stripecount = None
30 self.repos_sorted = ('name', False)
30 self.repos_sorted = ('name', False)
31 self._baseurl = None
31 self._baseurl = None
32 if isinstance(config, (list, tuple)):
32 if isinstance(config, (list, tuple)):
33 self.repos = cleannames(config)
33 self.repos = cleannames(config)
34 self.repos_sorted = ('', False)
34 self.repos_sorted = ('', False)
35 elif isinstance(config, dict):
35 elif isinstance(config, dict):
36 self.repos = util.sort(cleannames(config.items()))
36 self.repos = util.sort(cleannames(config.items()))
37 else:
37 else:
38 if isinstance(config, util.configparser):
38 if isinstance(config, util.configparser):
39 cp = config
39 cp = config
40 else:
40 else:
41 cp = util.configparser()
41 cp = util.configparser()
42 cp.read(config)
42 cp.read(config)
43 self.repos = []
43 self.repos = []
44 if cp.has_section('web'):
44 if cp.has_section('web'):
45 if cp.has_option('web', 'motd'):
45 if cp.has_option('web', 'motd'):
46 self.motd = cp.get('web', 'motd')
46 self.motd = cp.get('web', 'motd')
47 if cp.has_option('web', 'style'):
47 if cp.has_option('web', 'style'):
48 self.style = cp.get('web', 'style')
48 self.style = cp.get('web', 'style')
49 if cp.has_option('web', 'stripes'):
49 if cp.has_option('web', 'stripes'):
50 self.stripecount = int(cp.get('web', 'stripes'))
50 self.stripecount = int(cp.get('web', 'stripes'))
51 if cp.has_option('web', 'baseurl'):
51 if cp.has_option('web', 'baseurl'):
52 self._baseurl = cp.get('web', 'baseurl')
52 self._baseurl = cp.get('web', 'baseurl')
53 if cp.has_section('paths'):
53 if cp.has_section('paths'):
54 self.repos.extend(cleannames(cp.items('paths')))
54 self.repos.extend(cleannames(cp.items('paths')))
55 if cp.has_section('collections'):
55 if cp.has_section('collections'):
56 for prefix, root in cp.items('collections'):
56 for prefix, root in cp.items('collections'):
57 for path in util.walkrepos(root, followsym=True):
57 for path in util.walkrepos(root, followsym=True):
58 repo = os.path.normpath(path)
58 repo = os.path.normpath(path)
59 name = repo
59 name = repo
60 if name.startswith(prefix):
60 if name.startswith(prefix):
61 name = name[len(prefix):]
61 name = name[len(prefix):]
62 self.repos.append((name.lstrip(os.sep), repo))
62 self.repos.append((name.lstrip(os.sep), repo))
63 self.repos.sort()
63 self.repos.sort()
64
64
65 def run(self):
65 def run(self):
66 if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
66 if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
67 raise RuntimeError("This function is only intended to be called while running as a CGI script.")
67 raise RuntimeError("This function is only intended to be called while running as a CGI script.")
68 import mercurial.hgweb.wsgicgi as wsgicgi
68 import mercurial.hgweb.wsgicgi as wsgicgi
69 wsgicgi.launch(self)
69 wsgicgi.launch(self)
70
70
71 def __call__(self, env, respond):
71 def __call__(self, env, respond):
72 req = wsgirequest(env, respond)
72 req = wsgirequest(env, respond)
73 return self.run_wsgi(req)
73 return self.run_wsgi(req)
74
74
75 def run_wsgi(self, req):
75 def run_wsgi(self, req):
76
76
77 try:
77 try:
78 try:
78 try:
79
79
80 virtual = req.env.get("PATH_INFO", "").strip('/')
80 virtual = req.env.get("PATH_INFO", "").strip('/')
81 tmpl = self.templater(req)
81 tmpl = self.templater(req)
82 ctype = tmpl('mimetype', encoding=util._encoding)
82 ctype = tmpl('mimetype', encoding=util._encoding)
83 ctype = templater.stringify(ctype)
83 ctype = templater.stringify(ctype)
84
84
85 # a static file
85 # a static file
86 if virtual.startswith('static/') or 'static' in req.form:
86 if virtual.startswith('static/') or 'static' in req.form:
87 static = os.path.join(templater.templatepath(), 'static')
87 static = os.path.join(templater.templatepath(), 'static')
88 if virtual.startswith('static/'):
88 if virtual.startswith('static/'):
89 fname = virtual[7:]
89 fname = virtual[7:]
90 else:
90 else:
91 fname = req.form['static'][0]
91 fname = req.form['static'][0]
92 req.write(staticfile(static, fname, req))
92 req.write(staticfile(static, fname, req))
93 return []
93 return []
94
94
95 # top-level index
95 # top-level index
96 elif not virtual:
96 elif not virtual:
97 req.respond(HTTP_OK, ctype)
97 req.respond(HTTP_OK, ctype)
98 req.write(self.makeindex(req, tmpl))
98 req.write(self.makeindex(req, tmpl))
99 return []
99 return []
100
100
101 # nested indexes and hgwebs
101 # nested indexes and hgwebs
102
102
103 repos = dict(self.repos)
103 repos = dict(self.repos)
104 while virtual:
104 while virtual:
105 real = repos.get(virtual)
105 real = repos.get(virtual)
106 if real:
106 if real:
107 req.env['REPO_NAME'] = virtual
107 req.env['REPO_NAME'] = virtual
108 try:
108 try:
109 repo = hg.repository(self.parentui, real)
109 repo = hg.repository(self.parentui, real)
110 return hgweb(repo).run_wsgi(req)
110 return hgweb(repo).run_wsgi(req)
111 except IOError, inst:
111 except IOError, inst:
112 msg = inst.strerror
112 msg = inst.strerror
113 raise ErrorResponse(HTTP_SERVER_ERROR, msg)
113 raise ErrorResponse(HTTP_SERVER_ERROR, msg)
114 except RepoError, inst:
114 except RepoError, inst:
115 raise ErrorResponse(HTTP_SERVER_ERROR, str(inst))
115 raise ErrorResponse(HTTP_SERVER_ERROR, str(inst))
116
116
117 # browse subdirectories
117 # browse subdirectories
118 subdir = virtual + '/'
118 subdir = virtual + '/'
119 if [r for r in repos if r.startswith(subdir)]:
119 if [r for r in repos if r.startswith(subdir)]:
120 req.respond(HTTP_OK, ctype)
120 req.respond(HTTP_OK, ctype)
121 req.write(self.makeindex(req, tmpl, subdir))
121 req.write(self.makeindex(req, tmpl, subdir))
122 return []
122 return []
123
123
124 up = virtual.rfind('/')
124 up = virtual.rfind('/')
125 if up < 0:
125 if up < 0:
126 break
126 break
127 virtual = virtual[:up]
127 virtual = virtual[:up]
128
128
129 # prefixes not found
129 # prefixes not found
130 req.respond(HTTP_NOT_FOUND, ctype)
130 req.respond(HTTP_NOT_FOUND, ctype)
131 req.write(tmpl("notfound", repo=virtual))
131 req.write(tmpl("notfound", repo=virtual))
132 return []
132 return []
133
133
134 except ErrorResponse, err:
134 except ErrorResponse, err:
135 req.respond(err.code, ctype)
135 req.respond(err.code, ctype)
136 req.write(tmpl('error', error=err.message or ''))
136 req.write(tmpl('error', error=err.message or ''))
137 return []
137 return []
138 finally:
138 finally:
139 tmpl = None
139 tmpl = None
140
140
141 def makeindex(self, req, tmpl, subdir=""):
141 def makeindex(self, req, tmpl, subdir=""):
142
142
143 def archivelist(ui, nodeid, url):
143 def archivelist(ui, nodeid, url):
144 allowed = ui.configlist("web", "allow_archive", untrusted=True)
144 allowed = ui.configlist("web", "allow_archive", untrusted=True)
145 for i in [('zip', '.zip'), ('gz', '.tar.gz'), ('bz2', '.tar.bz2')]:
145 for i in [('zip', '.zip'), ('gz', '.tar.gz'), ('bz2', '.tar.bz2')]:
146 if i[0] in allowed or ui.configbool("web", "allow" + i[0],
146 if i[0] in allowed or ui.configbool("web", "allow" + i[0],
147 untrusted=True):
147 untrusted=True):
148 yield {"type" : i[0], "extension": i[1],
148 yield {"type" : i[0], "extension": i[1],
149 "node": nodeid, "url": url}
149 "node": nodeid, "url": url}
150
150
151 def entries(sortcolumn="", descending=False, subdir="", **map):
151 def entries(sortcolumn="", descending=False, subdir="", **map):
152 def sessionvars(**map):
152 def sessionvars(**map):
153 fields = []
153 fields = []
154 if 'style' in req.form:
154 if 'style' in req.form:
155 style = req.form['style'][0]
155 style = req.form['style'][0]
156 if style != get('web', 'style', ''):
156 if style != get('web', 'style', ''):
157 fields.append(('style', style))
157 fields.append(('style', style))
158
158
159 separator = url[-1] == '?' and ';' or '?'
159 separator = url[-1] == '?' and ';' or '?'
160 for name, value in fields:
160 for name, value in fields:
161 yield dict(name=name, value=value, separator=separator)
161 yield dict(name=name, value=value, separator=separator)
162 separator = ';'
162 separator = ';'
163
163
164 rows = []
164 rows = []
165 parity = paritygen(self.stripecount)
165 parity = paritygen(self.stripecount)
166 for name, path in self.repos:
166 for name, path in self.repos:
167 if not name.startswith(subdir):
167 if not name.startswith(subdir):
168 continue
168 continue
169 name = name[len(subdir):]
169 name = name[len(subdir):]
170
170
171 u = ui.ui(parentui=self.parentui)
171 u = ui.ui(parentui=self.parentui)
172 try:
172 try:
173 u.readconfig(os.path.join(path, '.hg', 'hgrc'))
173 u.readconfig(os.path.join(path, '.hg', 'hgrc'))
174 except Exception, e:
174 except Exception, e:
175 u.warn(_('error reading %s/.hg/hgrc: %s\n' % (path, e)))
175 u.warn(_('error reading %s/.hg/hgrc: %s\n') % (path, e))
176 continue
176 continue
177 def get(section, name, default=None):
177 def get(section, name, default=None):
178 return u.config(section, name, default, untrusted=True)
178 return u.config(section, name, default, untrusted=True)
179
179
180 if u.configbool("web", "hidden", untrusted=True):
180 if u.configbool("web", "hidden", untrusted=True):
181 continue
181 continue
182
182
183 parts = [name]
183 parts = [name]
184 if 'PATH_INFO' in req.env:
184 if 'PATH_INFO' in req.env:
185 parts.insert(0, req.env['PATH_INFO'].rstrip('/'))
185 parts.insert(0, req.env['PATH_INFO'].rstrip('/'))
186 if req.env['SCRIPT_NAME']:
186 if req.env['SCRIPT_NAME']:
187 parts.insert(0, req.env['SCRIPT_NAME'])
187 parts.insert(0, req.env['SCRIPT_NAME'])
188 url = ('/'.join(parts).replace("//", "/")) + '/'
188 url = ('/'.join(parts).replace("//", "/")) + '/'
189
189
190 # update time with local timezone
190 # update time with local timezone
191 try:
191 try:
192 d = (get_mtime(path), util.makedate()[1])
192 d = (get_mtime(path), util.makedate()[1])
193 except OSError:
193 except OSError:
194 continue
194 continue
195
195
196 contact = get_contact(get)
196 contact = get_contact(get)
197 description = get("web", "description", "")
197 description = get("web", "description", "")
198 name = get("web", "name", name)
198 name = get("web", "name", name)
199 row = dict(contact=contact or "unknown",
199 row = dict(contact=contact or "unknown",
200 contact_sort=contact.upper() or "unknown",
200 contact_sort=contact.upper() or "unknown",
201 name=name,
201 name=name,
202 name_sort=name,
202 name_sort=name,
203 url=url,
203 url=url,
204 description=description or "unknown",
204 description=description or "unknown",
205 description_sort=description.upper() or "unknown",
205 description_sort=description.upper() or "unknown",
206 lastchange=d,
206 lastchange=d,
207 lastchange_sort=d[1]-d[0],
207 lastchange_sort=d[1]-d[0],
208 sessionvars=sessionvars,
208 sessionvars=sessionvars,
209 archives=archivelist(u, "tip", url))
209 archives=archivelist(u, "tip", url))
210 if (not sortcolumn
210 if (not sortcolumn
211 or (sortcolumn, descending) == self.repos_sorted):
211 or (sortcolumn, descending) == self.repos_sorted):
212 # fast path for unsorted output
212 # fast path for unsorted output
213 row['parity'] = parity.next()
213 row['parity'] = parity.next()
214 yield row
214 yield row
215 else:
215 else:
216 rows.append((row["%s_sort" % sortcolumn], row))
216 rows.append((row["%s_sort" % sortcolumn], row))
217 if rows:
217 if rows:
218 rows.sort()
218 rows.sort()
219 if descending:
219 if descending:
220 rows.reverse()
220 rows.reverse()
221 for key, row in rows:
221 for key, row in rows:
222 row['parity'] = parity.next()
222 row['parity'] = parity.next()
223 yield row
223 yield row
224
224
225 sortable = ["name", "description", "contact", "lastchange"]
225 sortable = ["name", "description", "contact", "lastchange"]
226 sortcolumn, descending = self.repos_sorted
226 sortcolumn, descending = self.repos_sorted
227 if 'sort' in req.form:
227 if 'sort' in req.form:
228 sortcolumn = req.form['sort'][0]
228 sortcolumn = req.form['sort'][0]
229 descending = sortcolumn.startswith('-')
229 descending = sortcolumn.startswith('-')
230 if descending:
230 if descending:
231 sortcolumn = sortcolumn[1:]
231 sortcolumn = sortcolumn[1:]
232 if sortcolumn not in sortable:
232 if sortcolumn not in sortable:
233 sortcolumn = ""
233 sortcolumn = ""
234
234
235 sort = [("sort_%s" % column,
235 sort = [("sort_%s" % column,
236 "%s%s" % ((not descending and column == sortcolumn)
236 "%s%s" % ((not descending and column == sortcolumn)
237 and "-" or "", column))
237 and "-" or "", column))
238 for column in sortable]
238 for column in sortable]
239
239
240 if self._baseurl is not None:
240 if self._baseurl is not None:
241 req.env['SCRIPT_NAME'] = self._baseurl
241 req.env['SCRIPT_NAME'] = self._baseurl
242
242
243 return tmpl("index", entries=entries, subdir=subdir,
243 return tmpl("index", entries=entries, subdir=subdir,
244 sortcolumn=sortcolumn, descending=descending,
244 sortcolumn=sortcolumn, descending=descending,
245 **dict(sort))
245 **dict(sort))
246
246
247 def templater(self, req):
247 def templater(self, req):
248
248
249 def header(**map):
249 def header(**map):
250 yield tmpl('header', encoding=util._encoding, **map)
250 yield tmpl('header', encoding=util._encoding, **map)
251
251
252 def footer(**map):
252 def footer(**map):
253 yield tmpl("footer", **map)
253 yield tmpl("footer", **map)
254
254
255 def motd(**map):
255 def motd(**map):
256 if self.motd is not None:
256 if self.motd is not None:
257 yield self.motd
257 yield self.motd
258 else:
258 else:
259 yield config('web', 'motd', '')
259 yield config('web', 'motd', '')
260
260
261 def config(section, name, default=None, untrusted=True):
261 def config(section, name, default=None, untrusted=True):
262 return self.parentui.config(section, name, default, untrusted)
262 return self.parentui.config(section, name, default, untrusted)
263
263
264 if self._baseurl is not None:
264 if self._baseurl is not None:
265 req.env['SCRIPT_NAME'] = self._baseurl
265 req.env['SCRIPT_NAME'] = self._baseurl
266
266
267 url = req.env.get('SCRIPT_NAME', '')
267 url = req.env.get('SCRIPT_NAME', '')
268 if not url.endswith('/'):
268 if not url.endswith('/'):
269 url += '/'
269 url += '/'
270
270
271 staticurl = config('web', 'staticurl') or url + 'static/'
271 staticurl = config('web', 'staticurl') or url + 'static/'
272 if not staticurl.endswith('/'):
272 if not staticurl.endswith('/'):
273 staticurl += '/'
273 staticurl += '/'
274
274
275 style = self.style
275 style = self.style
276 if style is None:
276 if style is None:
277 style = config('web', 'style', '')
277 style = config('web', 'style', '')
278 if 'style' in req.form:
278 if 'style' in req.form:
279 style = req.form['style'][0]
279 style = req.form['style'][0]
280 if self.stripecount is None:
280 if self.stripecount is None:
281 self.stripecount = int(config('web', 'stripes', 1))
281 self.stripecount = int(config('web', 'stripes', 1))
282 mapfile = style_map(templater.templatepath(), style)
282 mapfile = style_map(templater.templatepath(), style)
283 tmpl = templater.templater(mapfile, templatefilters.filters,
283 tmpl = templater.templater(mapfile, templatefilters.filters,
284 defaults={"header": header,
284 defaults={"header": header,
285 "footer": footer,
285 "footer": footer,
286 "motd": motd,
286 "motd": motd,
287 "url": url,
287 "url": url,
288 "staticurl": staticurl})
288 "staticurl": staticurl})
289 return tmpl
289 return tmpl
General Comments 0
You need to be logged in to leave comments. Login now